1package vault
2
3import (
4	"bytes"
5	"context"
6	"crypto/ecdsa"
7	"crypto/elliptic"
8	"crypto/rand"
9	"crypto/sha256"
10	"crypto/tls"
11	"crypto/x509"
12	"crypto/x509/pkix"
13	"encoding/base64"
14	"encoding/pem"
15	"errors"
16	"fmt"
17	"io"
18	"io/ioutil"
19	"math/big"
20	mathrand "math/rand"
21	"net"
22	"net/http"
23	"os"
24	"path/filepath"
25	"sync"
26	"sync/atomic"
27	"time"
28
29	"github.com/armon/go-metrics"
30	"github.com/hashicorp/go-cleanhttp"
31	log "github.com/hashicorp/go-hclog"
32	raftlib "github.com/hashicorp/raft"
33	"github.com/hashicorp/vault/api"
34	"github.com/hashicorp/vault/audit"
35	"github.com/hashicorp/vault/command/server"
36	"github.com/hashicorp/vault/helper/metricsutil"
37	"github.com/hashicorp/vault/helper/namespace"
38	"github.com/hashicorp/vault/internalshared/configutil"
39	"github.com/hashicorp/vault/internalshared/reloadutil"
40	dbMysql "github.com/hashicorp/vault/plugins/database/mysql"
41	dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql"
42	"github.com/hashicorp/vault/sdk/framework"
43	"github.com/hashicorp/vault/sdk/helper/consts"
44	"github.com/hashicorp/vault/sdk/helper/logging"
45	"github.com/hashicorp/vault/sdk/helper/salt"
46	"github.com/hashicorp/vault/sdk/logical"
47	"github.com/hashicorp/vault/sdk/physical"
48	physInmem "github.com/hashicorp/vault/sdk/physical/inmem"
49	"github.com/hashicorp/vault/vault/cluster"
50	"github.com/hashicorp/vault/vault/seal"
51	"github.com/mitchellh/copystructure"
52	"github.com/mitchellh/go-testing-interface"
53	"golang.org/x/crypto/ed25519"
54	"golang.org/x/net/http2"
55)
56
57// This file contains a number of methods that are useful for unit
58// tests within other packages.
59
60const (
61	testSharedPublicKey = `
62ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT
63`
64	testSharedPrivateKey = `
65-----BEGIN RSA PRIVATE KEY-----
66MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG
67A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A
68/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE
69mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+
70GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe
71nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x
72+aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+
73MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8
74Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h
754hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal
76oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+
77Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y
786FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G
79IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ
80CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2
81AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM
82kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe
83rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy
84AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9
85cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X
865nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D
87My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t
88O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi
89oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
90+B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs=
91-----END RSA PRIVATE KEY-----
92`
93)
94
95// TestCore returns a pure in-memory, uninitialized core for testing.
96func TestCore(t testing.T) *Core {
97	return TestCoreWithSeal(t, nil, false)
98}
99
100// TestCoreRaw returns a pure in-memory, uninitialized core for testing. The raw
101// storage endpoints are enabled with this core.
102func TestCoreRaw(t testing.T) *Core {
103	return TestCoreWithSeal(t, nil, true)
104}
105
106// TestCoreNewSeal returns a pure in-memory, uninitialized core with
107// the new seal configuration.
108func TestCoreNewSeal(t testing.T) *Core {
109	seal := NewTestSeal(t, nil)
110	return TestCoreWithSeal(t, seal, false)
111}
112
113// TestCoreWithConfig returns a pure in-memory, uninitialized core with the
114// specified core configurations overridden for testing.
115func TestCoreWithConfig(t testing.T, conf *CoreConfig) *Core {
116	return TestCoreWithSealAndUI(t, conf)
117}
118
119// TestCoreWithSeal returns a pure in-memory, uninitialized core with the
120// specified seal for testing.
121func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core {
122	conf := &CoreConfig{
123		Seal:            testSeal,
124		EnableUI:        false,
125		EnableRaw:       enableRaw,
126		BuiltinRegistry: NewMockBuiltinRegistry(),
127	}
128	return TestCoreWithSealAndUI(t, conf)
129}
130
131func TestCoreUI(t testing.T, enableUI bool) *Core {
132	conf := &CoreConfig{
133		EnableUI:        enableUI,
134		EnableRaw:       true,
135		BuiltinRegistry: NewMockBuiltinRegistry(),
136	}
137	return TestCoreWithSealAndUI(t, conf)
138}
139
140func TestCoreWithSealAndUI(t testing.T, opts *CoreConfig) *Core {
141	logger := logging.NewVaultLogger(log.Trace)
142	physicalBackend, err := physInmem.NewInmem(nil, logger)
143	if err != nil {
144		t.Fatal(err)
145	}
146
147	errInjector := physical.NewErrorInjector(physicalBackend, 0, logger)
148
149	// Start off with base test core config
150	conf := testCoreConfig(t, errInjector, logger)
151
152	// Override config values with ones that gets passed in
153	conf.EnableUI = opts.EnableUI
154	conf.EnableRaw = opts.EnableRaw
155	conf.Seal = opts.Seal
156	conf.LicensingConfig = opts.LicensingConfig
157	conf.DisableKeyEncodingChecks = opts.DisableKeyEncodingChecks
158	conf.MetricsHelper = opts.MetricsHelper
159	conf.MetricSink = opts.MetricSink
160	conf.NumExpirationWorkers = numExpirationWorkersTest
161	conf.RawConfig = opts.RawConfig
162	conf.EnableResponseHeaderHostname = opts.EnableResponseHeaderHostname
163
164	if opts.Logger != nil {
165		conf.Logger = opts.Logger
166	}
167
168	for k, v := range opts.LogicalBackends {
169		conf.LogicalBackends[k] = v
170	}
171	for k, v := range opts.CredentialBackends {
172		conf.CredentialBackends[k] = v
173	}
174
175	for k, v := range opts.AuditBackends {
176		conf.AuditBackends[k] = v
177	}
178
179	c, err := NewCore(conf)
180	if err != nil {
181		t.Fatalf("err: %s", err)
182	}
183
184	t.Cleanup(func() {
185		defer func() {
186			if r := recover(); r != nil {
187				t.Log("panic closing core during cleanup", "panic", r)
188			}
189		}()
190		c.Shutdown()
191	})
192
193	return c
194}
195
196func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Logger) *CoreConfig {
197	t.Helper()
198	noopAudits := map[string]audit.Factory{
199		"noop": func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) {
200			view := &logical.InmemStorage{}
201			view.Put(context.Background(), &logical.StorageEntry{
202				Key:   "salt",
203				Value: []byte("foo"),
204			})
205			config.SaltConfig = &salt.Config{
206				HMAC:     sha256.New,
207				HMACType: "hmac-sha256",
208			}
209			config.SaltView = view
210
211			n := &noopAudit{
212				Config: config,
213			}
214			n.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
215				SaltFunc: n.Salt,
216			}
217			return n, nil
218		},
219	}
220
221	noopBackends := make(map[string]logical.Factory)
222	noopBackends["noop"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
223		b := new(framework.Backend)
224		b.Setup(ctx, config)
225		b.BackendType = logical.TypeCredential
226		return b, nil
227	}
228	noopBackends["http"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
229		return new(rawHTTP), nil
230	}
231
232	credentialBackends := make(map[string]logical.Factory)
233	for backendName, backendFactory := range noopBackends {
234		credentialBackends[backendName] = backendFactory
235	}
236	for backendName, backendFactory := range testCredentialBackends {
237		credentialBackends[backendName] = backendFactory
238	}
239
240	logicalBackends := make(map[string]logical.Factory)
241	for backendName, backendFactory := range noopBackends {
242		logicalBackends[backendName] = backendFactory
243	}
244
245	logicalBackends["kv"] = LeasedPassthroughBackendFactory
246	for backendName, backendFactory := range testLogicalBackends {
247		logicalBackends[backendName] = backendFactory
248	}
249
250	conf := &CoreConfig{
251		Physical:           physicalBackend,
252		AuditBackends:      noopAudits,
253		LogicalBackends:    logicalBackends,
254		CredentialBackends: credentialBackends,
255		DisableMlock:       true,
256		Logger:             logger,
257		BuiltinRegistry:    NewMockBuiltinRegistry(),
258	}
259
260	return conf
261}
262
263// TestCoreInit initializes the core with a single key, and returns
264// the key that must be used to unseal the core and a root token.
265func TestCoreInit(t testing.T, core *Core) ([][]byte, string) {
266	t.Helper()
267	secretShares, _, root := TestCoreInitClusterWrapperSetup(t, core, nil)
268	return secretShares, root
269}
270
271func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, handler http.Handler) ([][]byte, [][]byte, string) {
272	t.Helper()
273	core.SetClusterHandler(handler)
274
275	barrierConfig := &SealConfig{
276		SecretShares:    3,
277		SecretThreshold: 3,
278	}
279
280	switch core.seal.StoredKeysSupported() {
281	case seal.StoredKeysNotSupported:
282		barrierConfig.StoredShares = 0
283	default:
284		barrierConfig.StoredShares = 1
285	}
286
287	recoveryConfig := &SealConfig{
288		SecretShares:    3,
289		SecretThreshold: 3,
290	}
291
292	initParams := &InitParams{
293		BarrierConfig:  barrierConfig,
294		RecoveryConfig: recoveryConfig,
295	}
296	if core.seal.StoredKeysSupported() == seal.StoredKeysNotSupported {
297		initParams.LegacyShamirSeal = true
298	}
299	result, err := core.Initialize(context.Background(), initParams)
300	if err != nil {
301		t.Fatalf("err: %s", err)
302	}
303	return result.SecretShares, result.RecoveryShares, result.RootToken
304}
305
306func TestCoreUnseal(core *Core, key []byte) (bool, error) {
307	return core.Unseal(key)
308}
309
310// TestCoreUnsealed returns a pure in-memory core that is already
311// initialized and unsealed.
312func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) {
313	t.Helper()
314	core := TestCore(t)
315	return testCoreUnsealed(t, core)
316}
317
318func TestCoreUnsealedWithMetrics(t testing.T) (*Core, [][]byte, string, *metrics.InmemSink) {
319	t.Helper()
320	inmemSink := metrics.NewInmemSink(1000000*time.Hour, 2000000*time.Hour)
321	conf := &CoreConfig{
322		BuiltinRegistry: NewMockBuiltinRegistry(),
323		MetricSink:      metricsutil.NewClusterMetricSink("test-cluster", inmemSink),
324		MetricsHelper:   metricsutil.NewMetricsHelper(inmemSink, false),
325	}
326	core, keys, root := testCoreUnsealed(t, TestCoreWithSealAndUI(t, conf))
327	return core, keys, root, inmemSink
328}
329
330// TestCoreUnsealedRaw returns a pure in-memory core that is already
331// initialized, unsealed, and with raw endpoints enabled.
332func TestCoreUnsealedRaw(t testing.T) (*Core, [][]byte, string) {
333	t.Helper()
334	core := TestCoreRaw(t)
335	return testCoreUnsealed(t, core)
336}
337
338// TestCoreUnsealedWithConfig returns a pure in-memory core that is already
339// initialized, unsealed, with the any provided core config values overridden.
340func TestCoreUnsealedWithConfig(t testing.T, conf *CoreConfig) (*Core, [][]byte, string) {
341	t.Helper()
342	core := TestCoreWithConfig(t, conf)
343	return testCoreUnsealed(t, core)
344}
345
346func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) {
347	t.Helper()
348	keys, token := TestCoreInit(t, core)
349	for _, key := range keys {
350		if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
351			t.Fatalf("unseal err: %s", err)
352		}
353	}
354
355	if core.Sealed() {
356		t.Fatal("should not be sealed")
357	}
358
359	testCoreAddSecretMount(t, core, token)
360
361	t.Cleanup(func() {
362		core.Shutdown()
363	})
364	return core, keys, token
365}
366
367func testCoreAddSecretMount(t testing.T, core *Core, token string) {
368	kvReq := &logical.Request{
369		Operation:   logical.UpdateOperation,
370		ClientToken: token,
371		Path:        "sys/mounts/secret",
372		Data: map[string]interface{}{
373			"type":        "kv",
374			"path":        "secret/",
375			"description": "key/value secret storage",
376			"options": map[string]string{
377				"version": "1",
378			},
379		},
380	}
381	resp, err := core.HandleRequest(namespace.RootContext(nil), kvReq)
382	if err != nil {
383		t.Fatal(err)
384	}
385	if resp.IsError() {
386		t.Fatal(err)
387	}
388}
389
390func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) {
391	t.Helper()
392	logger := logging.NewVaultLogger(log.Trace)
393	conf := testCoreConfig(t, backend, logger)
394	conf.Seal = NewTestSeal(t, nil)
395	conf.NumExpirationWorkers = numExpirationWorkersTest
396
397	core, err := NewCore(conf)
398	if err != nil {
399		t.Fatalf("err: %s", err)
400	}
401
402	keys, token := TestCoreInit(t, core)
403	for _, key := range keys {
404		if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
405			t.Fatalf("unseal err: %s", err)
406		}
407	}
408
409	if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
410		t.Fatal(err)
411	}
412
413	if core.Sealed() {
414		t.Fatal("should not be sealed")
415	}
416
417	t.Cleanup(func() {
418		defer func() {
419			if r := recover(); r != nil {
420				t.Log("panic closing core during cleanup", "panic", r)
421			}
422		}()
423		core.Shutdown()
424	})
425
426	return core, keys, token
427}
428
429// TestKeyCopy is a silly little function to just copy the key so that
430// it can be used with Unseal easily.
431func TestKeyCopy(key []byte) []byte {
432	result := make([]byte, len(key))
433	copy(result, key)
434	return result
435}
436
437func TestDynamicSystemView(c *Core) *dynamicSystemView {
438	me := &MountEntry{
439		Config: MountConfig{
440			DefaultLeaseTTL: 24 * time.Hour,
441			MaxLeaseTTL:     2 * 24 * time.Hour,
442		},
443	}
444
445	return &dynamicSystemView{c, me}
446}
447
448// TestAddTestPlugin registers the testFunc as part of the plugin command to the
449// plugin catalog. If provided, uses tmpDir as the plugin directory.
450func TestAddTestPlugin(t testing.T, c *Core, name string, pluginType consts.PluginType, testFunc string, env []string, tempDir string) {
451	file, err := os.Open(os.Args[0])
452	if err != nil {
453		t.Fatal(err)
454	}
455	defer file.Close()
456
457	dirPath := filepath.Dir(os.Args[0])
458	fileName := filepath.Base(os.Args[0])
459
460	if tempDir != "" {
461		fi, err := file.Stat()
462		if err != nil {
463			t.Fatal(err)
464		}
465
466		// Copy over the file to the temp dir
467		dst := filepath.Join(tempDir, fileName)
468		out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
469		if err != nil {
470			t.Fatal(err)
471		}
472		defer out.Close()
473
474		if _, err = io.Copy(out, file); err != nil {
475			t.Fatal(err)
476		}
477		err = out.Sync()
478		if err != nil {
479			t.Fatal(err)
480		}
481
482		dirPath = tempDir
483	}
484
485	// Determine plugin directory full path, evaluating potential symlink path
486	fullPath, err := filepath.EvalSymlinks(dirPath)
487	if err != nil {
488		t.Fatal(err)
489	}
490
491	reader, err := os.Open(filepath.Join(fullPath, fileName))
492	if err != nil {
493		t.Fatal(err)
494	}
495	defer reader.Close()
496
497	// Find out the sha256
498	hash := sha256.New()
499
500	_, err = io.Copy(hash, reader)
501	if err != nil {
502		t.Fatal(err)
503	}
504
505	sum := hash.Sum(nil)
506
507	// Set core's plugin directory and plugin catalog directory
508	c.pluginDirectory = fullPath
509	c.pluginCatalog.directory = fullPath
510
511	args := []string{fmt.Sprintf("--test.run=%s", testFunc)}
512	err = c.pluginCatalog.Set(context.Background(), name, pluginType, fileName, args, env, sum)
513	if err != nil {
514		t.Fatal(err)
515	}
516}
517
518var (
519	testLogicalBackends    = map[string]logical.Factory{}
520	testCredentialBackends = map[string]logical.Factory{}
521)
522
523// This adds a credential backend for the test core. This needs to be
524// invoked before the test core is created.
525func AddTestCredentialBackend(name string, factory logical.Factory) error {
526	if name == "" {
527		return fmt.Errorf("missing backend name")
528	}
529	if factory == nil {
530		return fmt.Errorf("missing backend factory function")
531	}
532	testCredentialBackends[name] = factory
533	return nil
534}
535
536// This adds a logical backend for the test core. This needs to be
537// invoked before the test core is created.
538func AddTestLogicalBackend(name string, factory logical.Factory) error {
539	if name == "" {
540		return fmt.Errorf("missing backend name")
541	}
542	if factory == nil {
543		return fmt.Errorf("missing backend factory function")
544	}
545	testLogicalBackends[name] = factory
546	return nil
547}
548
549type noopAudit struct {
550	Config    *audit.BackendConfig
551	salt      *salt.Salt
552	saltMutex sync.RWMutex
553	formatter audit.AuditFormatter
554	records   [][]byte
555	l         sync.RWMutex
556}
557
558func (n *noopAudit) GetHash(ctx context.Context, data string) (string, error) {
559	salt, err := n.Salt(ctx)
560	if err != nil {
561		return "", err
562	}
563	return salt.GetIdentifiedHMAC(data), nil
564}
565
566func (n *noopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error {
567	n.l.Lock()
568	defer n.l.Unlock()
569	var w bytes.Buffer
570	err := n.formatter.FormatRequest(ctx, &w, audit.FormatterConfig{}, in)
571	if err != nil {
572		return err
573	}
574	n.records = append(n.records, w.Bytes())
575	return nil
576}
577
578func (n *noopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error {
579	n.l.Lock()
580	defer n.l.Unlock()
581	var w bytes.Buffer
582	err := n.formatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in)
583	if err != nil {
584		return err
585	}
586	n.records = append(n.records, w.Bytes())
587	return nil
588}
589
590func (n *noopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error {
591	n.l.Lock()
592	defer n.l.Unlock()
593	var w bytes.Buffer
594	tempFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"])
595	err := tempFormatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in)
596	if err != nil {
597		return err
598	}
599	n.records = append(n.records, w.Bytes())
600	return nil
601}
602
603func (n *noopAudit) Reload(_ context.Context) error {
604	return nil
605}
606
607func (n *noopAudit) Invalidate(_ context.Context) {
608	n.saltMutex.Lock()
609	defer n.saltMutex.Unlock()
610	n.salt = nil
611}
612
613func (n *noopAudit) Salt(ctx context.Context) (*salt.Salt, error) {
614	n.saltMutex.RLock()
615	if n.salt != nil {
616		defer n.saltMutex.RUnlock()
617		return n.salt, nil
618	}
619	n.saltMutex.RUnlock()
620	n.saltMutex.Lock()
621	defer n.saltMutex.Unlock()
622	if n.salt != nil {
623		return n.salt, nil
624	}
625	salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig)
626	if err != nil {
627		return nil, err
628	}
629	n.salt = salt
630	return salt, nil
631}
632
633func AddNoopAudit(conf *CoreConfig, records **[][]byte) {
634	conf.AuditBackends = map[string]audit.Factory{
635		"noop": func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) {
636			view := &logical.InmemStorage{}
637			view.Put(context.Background(), &logical.StorageEntry{
638				Key:   "salt",
639				Value: []byte("foo"),
640			})
641			n := &noopAudit{
642				Config: config,
643			}
644			n.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
645				SaltFunc: n.Salt,
646			}
647			if records != nil {
648				*records = &n.records
649			}
650			return n, nil
651		},
652	}
653}
654
655type rawHTTP struct{}
656
657func (n *rawHTTP) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) {
658	return &logical.Response{
659		Data: map[string]interface{}{
660			logical.HTTPStatusCode:  200,
661			logical.HTTPContentType: "plain/text",
662			logical.HTTPRawBody:     []byte("hello world"),
663		},
664	}, nil
665}
666
667func (n *rawHTTP) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) {
668	return false, false, nil
669}
670
671func (n *rawHTTP) SpecialPaths() *logical.Paths {
672	return &logical.Paths{Unauthenticated: []string{"*"}}
673}
674
675func (n *rawHTTP) System() logical.SystemView {
676	return logical.StaticSystemView{
677		DefaultLeaseTTLVal: time.Hour * 24,
678		MaxLeaseTTLVal:     time.Hour * 24 * 32,
679	}
680}
681
682func (n *rawHTTP) Logger() log.Logger {
683	return logging.NewVaultLogger(log.Trace)
684}
685
686func (n *rawHTTP) Cleanup(ctx context.Context) {
687	// noop
688}
689
690func (n *rawHTTP) Initialize(ctx context.Context, req *logical.InitializationRequest) error {
691	return nil
692}
693
694func (n *rawHTTP) InvalidateKey(context.Context, string) {
695	// noop
696}
697
698func (n *rawHTTP) Setup(ctx context.Context, config *logical.BackendConfig) error {
699	// noop
700	return nil
701}
702
703func (n *rawHTTP) Type() logical.BackendType {
704	return logical.TypeLogical
705}
706
707func GenerateRandBytes(length int) ([]byte, error) {
708	if length < 0 {
709		return nil, fmt.Errorf("length must be >= 0")
710	}
711
712	buf := make([]byte, length)
713	if length == 0 {
714		return buf, nil
715	}
716
717	n, err := rand.Read(buf)
718	if err != nil {
719		return nil, err
720	}
721	if n != length {
722		return nil, fmt.Errorf("unable to read %d bytes; only read %d", length, n)
723	}
724
725	return buf, nil
726}
727
728func TestWaitActive(t testing.T, core *Core) {
729	t.Helper()
730	if err := TestWaitActiveWithError(core); err != nil {
731		t.Fatal(err)
732	}
733}
734
735func TestWaitActiveForwardingReady(t testing.T, core *Core) {
736	TestWaitActive(t, core)
737
738	deadline := time.Now().Add(2 * time.Second)
739	for time.Now().Before(deadline) {
740		if _, ok := core.getClusterListener().Handler(consts.RequestForwardingALPN); ok {
741			return
742		}
743		time.Sleep(100 * time.Millisecond)
744	}
745	t.Fatal("timed out waiting for request forwarding handler to be registered")
746}
747
748func TestWaitActiveWithError(core *Core) error {
749	start := time.Now()
750	var standby bool
751	var err error
752	for time.Now().Sub(start) < 30*time.Second {
753		standby, err = core.Standby()
754		if err != nil {
755			return err
756		}
757		if !standby {
758			break
759		}
760	}
761	if standby {
762		return errors.New("should not be in standby mode")
763	}
764	return nil
765}
766
767type TestCluster struct {
768	BarrierKeys        [][]byte
769	RecoveryKeys       [][]byte
770	CACert             *x509.Certificate
771	CACertBytes        []byte
772	CACertPEM          []byte
773	CACertPEMFile      string
774	CAKey              *ecdsa.PrivateKey
775	CAKeyPEM           []byte
776	Cores              []*TestClusterCore
777	ID                 string
778	RootToken          string
779	RootCAs            *x509.CertPool
780	TempDir            string
781	ClientAuthRequired bool
782	Logger             log.Logger
783	CleanupFunc        func()
784	SetupFunc          func()
785
786	cleanupFuncs      []func()
787	base              *CoreConfig
788	LicensePublicKey  ed25519.PublicKey
789	LicensePrivateKey ed25519.PrivateKey
790}
791
792func (c *TestCluster) Start() {
793	for i, core := range c.Cores {
794		if core.Server != nil {
795			for _, ln := range core.Listeners {
796				c.Logger.Info("starting listener for test core", "core", i, "port", ln.Address.Port)
797				go core.Server.Serve(ln)
798			}
799		}
800	}
801	if c.SetupFunc != nil {
802		c.SetupFunc()
803	}
804}
805
806// UnsealCores uses the cluster barrier keys to unseal the test cluster cores
807func (c *TestCluster) UnsealCores(t testing.T) {
808	t.Helper()
809	if err := c.UnsealCoresWithError(false); err != nil {
810		t.Fatal(err)
811	}
812}
813
814func (c *TestCluster) UnsealCoresWithError(useStoredKeys bool) error {
815	unseal := func(core *Core) error {
816		for _, key := range c.BarrierKeys {
817			if _, err := core.Unseal(TestKeyCopy(key)); err != nil {
818				return err
819			}
820		}
821		return nil
822	}
823	if useStoredKeys {
824		unseal = func(core *Core) error {
825			return core.UnsealWithStoredKeys(context.Background())
826		}
827	}
828
829	// Unseal first core
830	if err := unseal(c.Cores[0].Core); err != nil {
831		return fmt.Errorf("unseal core %d err: %s", 0, err)
832	}
833
834	// Verify unsealed
835	if c.Cores[0].Sealed() {
836		return fmt.Errorf("should not be sealed")
837	}
838
839	if err := TestWaitActiveWithError(c.Cores[0].Core); err != nil {
840		return err
841	}
842
843	// Unseal other cores
844	for i := 1; i < len(c.Cores); i++ {
845		if err := unseal(c.Cores[i].Core); err != nil {
846			return fmt.Errorf("unseal core %d err: %s", i, err)
847		}
848	}
849
850	// Let them come fully up to standby
851	time.Sleep(2 * time.Second)
852
853	// Ensure cluster connection info is populated.
854	// Other cores should not come up as leaders.
855	for i := 1; i < len(c.Cores); i++ {
856		isLeader, _, _, err := c.Cores[i].Leader()
857		if err != nil {
858			return err
859		}
860		if isLeader {
861			return fmt.Errorf("core[%d] should not be leader", i)
862		}
863	}
864
865	return nil
866}
867
868func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
869	err := c.AttemptUnsealCore(core)
870	if err != nil {
871		t.Fatal(err)
872	}
873}
874
875func (c *TestCluster) AttemptUnsealCore(core *TestClusterCore) error {
876	var keys [][]byte
877	if core.seal.RecoveryKeySupported() {
878		keys = c.RecoveryKeys
879	} else {
880		keys = c.BarrierKeys
881	}
882	for _, key := range keys {
883		if _, err := core.Core.Unseal(TestKeyCopy(key)); err != nil {
884			return fmt.Errorf("unseal err: %w", err)
885		}
886	}
887	return nil
888}
889
890func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) {
891	t.Helper()
892	if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
893		t.Fatal(err)
894	}
895}
896
897func (c *TestCluster) EnsureCoresSealed(t testing.T) {
898	t.Helper()
899	if err := c.ensureCoresSealed(); err != nil {
900		t.Fatal(err)
901	}
902}
903
904func (c *TestClusterCore) Seal(t testing.T) {
905	t.Helper()
906	if err := c.Core.sealInternal(); err != nil {
907		t.Fatal(err)
908	}
909}
910
911func (c *TestClusterCore) stop() error {
912	c.Logger().Info("stopping vault test core")
913
914	if c.Listeners != nil {
915		for _, ln := range c.Listeners {
916			ln.Close()
917		}
918		c.Logger().Info("listeners successfully shut down")
919	}
920	if c.licensingStopCh != nil {
921		close(c.licensingStopCh)
922		c.licensingStopCh = nil
923	}
924
925	if err := c.Shutdown(); err != nil {
926		return err
927	}
928	timeout := time.Now().Add(60 * time.Second)
929	for {
930		if time.Now().After(timeout) {
931			return errors.New("timeout waiting for core to seal")
932		}
933		if c.Sealed() {
934			break
935		}
936		time.Sleep(250 * time.Millisecond)
937	}
938
939	c.Logger().Info("vault test core stopped")
940	return nil
941}
942
943func (c *TestCluster) Cleanup() {
944	c.Logger.Info("cleaning up vault cluster")
945	if tl, ok := c.Logger.(*TestLogger); ok {
946		tl.StopLogging()
947	}
948
949	wg := &sync.WaitGroup{}
950	for _, core := range c.Cores {
951		wg.Add(1)
952		lc := core
953
954		go func() {
955			defer wg.Done()
956			if err := lc.stop(); err != nil {
957				// Note that this log won't be seen if using TestLogger, due to
958				// the above call to StopLogging.
959				lc.Logger().Error("error during cleanup", "error", err)
960			}
961		}()
962	}
963
964	wg.Wait()
965
966	// Remove any temp dir that exists
967	if c.TempDir != "" {
968		os.RemoveAll(c.TempDir)
969	}
970
971	// Give time to actually shut down/clean up before the next test
972	time.Sleep(time.Second)
973	if c.CleanupFunc != nil {
974		c.CleanupFunc()
975	}
976}
977
978func (c *TestCluster) ensureCoresSealed() error {
979	for _, core := range c.Cores {
980		if err := core.Shutdown(); err != nil {
981			return err
982		}
983		timeout := time.Now().Add(60 * time.Second)
984		for {
985			if time.Now().After(timeout) {
986				return fmt.Errorf("timeout waiting for core to seal")
987			}
988			if core.Sealed() {
989				break
990			}
991			time.Sleep(250 * time.Millisecond)
992		}
993	}
994	return nil
995}
996
997func SetReplicationFailureMode(core *TestClusterCore, mode uint32) {
998	atomic.StoreUint32(core.Core.replicationFailure, mode)
999}
1000
1001type TestListener struct {
1002	net.Listener
1003	Address *net.TCPAddr
1004}
1005
1006type TestClusterCore struct {
1007	*Core
1008	CoreConfig           *CoreConfig
1009	Client               *api.Client
1010	Handler              http.Handler
1011	Address              *net.TCPAddr
1012	Listeners            []*TestListener
1013	ReloadFuncs          *map[string][]reloadutil.ReloadFunc
1014	ReloadFuncsLock      *sync.RWMutex
1015	Server               *http.Server
1016	ServerCert           *x509.Certificate
1017	ServerCertBytes      []byte
1018	ServerCertPEM        []byte
1019	ServerKey            *ecdsa.PrivateKey
1020	ServerKeyPEM         []byte
1021	TLSConfig            *tls.Config
1022	UnderlyingStorage    physical.Backend
1023	UnderlyingRawStorage physical.Backend
1024	UnderlyingHAStorage  physical.HABackend
1025	Barrier              SecurityBarrier
1026	NodeID               string
1027}
1028
1029type PhysicalBackendBundle struct {
1030	Backend   physical.Backend
1031	HABackend physical.HABackend
1032	Cleanup   func()
1033}
1034
1035type TestClusterOptions struct {
1036	KeepStandbysSealed       bool
1037	SkipInit                 bool
1038	HandlerFunc              func(*HandlerProperties) http.Handler
1039	DefaultHandlerProperties HandlerProperties
1040
1041	// BaseListenAddress is used to explicitly assign ports in sequence to the
1042	// listener of each core.  It should be a string of the form
1043	// "127.0.0.1:20000"
1044	//
1045	// WARNING: Using an explicitly assigned port above 30000 may clash with
1046	// ephemeral ports that have been assigned by the OS in other tests.  The
1047	// use of explicitly assigned ports below 30000 is strongly recommended.
1048	// In addition, you should be careful to use explicitly assigned ports that
1049	// do not clash with any other explicitly assigned ports in other tests.
1050	BaseListenAddress string
1051
1052	// BaseClusterListenPort is used to explicitly assign ports in sequence to
1053	// the cluster listener of each core.  If BaseClusterListenPort is
1054	// specified, then BaseListenAddress must also be specified.  Each cluster
1055	// listener will use the same host as the one specified in
1056	// BaseListenAddress.
1057	//
1058	// WARNING: Using an explicitly assigned port above 30000 may clash with
1059	// ephemeral ports that have been assigned by the OS in other tests.  The
1060	// use of explicitly assigned ports below 30000 is strongly recommended.
1061	// In addition, you should be careful to use explicitly assigned ports that
1062	// do not clash with any other explicitly assigned ports in other tests.
1063	BaseClusterListenPort int
1064
1065	NumCores       int
1066	SealFunc       func() Seal
1067	UnwrapSealFunc func() Seal
1068	Logger         log.Logger
1069	TempDir        string
1070	CACert         []byte
1071	CAKey          *ecdsa.PrivateKey
1072	// PhysicalFactory is used to create backends.
1073	// The int argument is the index of the core within the cluster, i.e. first
1074	// core in cluster will have 0, second 1, etc.
1075	// If the backend is shared across the cluster (i.e. is not Raft) then it
1076	// should return nil when coreIdx != 0.
1077	PhysicalFactory func(t testing.T, coreIdx int, logger log.Logger, conf map[string]interface{}) *PhysicalBackendBundle
1078	// FirstCoreNumber is used to assign a unique number to each core within
1079	// a multi-cluster setup.
1080	FirstCoreNumber   int
1081	RequireClientAuth bool
1082	// SetupFunc is called after the cluster is started.
1083	SetupFunc      func(t testing.T, c *TestCluster)
1084	PR1103Disabled bool
1085
1086	// ClusterLayers are used to override the default cluster connection layer
1087	ClusterLayers cluster.NetworkLayerSet
1088	// InmemClusterLayers is a shorthand way of asking for ClusterLayers to be
1089	// built using the inmem implementation.
1090	InmemClusterLayers bool
1091
1092	// RaftAddressProvider is used to set the raft ServerAddressProvider on
1093	// each core.
1094	//
1095	// If SkipInit is true, then RaftAddressProvider has no effect.
1096	// RaftAddressProvider should only be specified if the underlying physical
1097	// storage is Raft.
1098	RaftAddressProvider raftlib.ServerAddressProvider
1099
1100	CoreMetricSinkProvider func(clusterName string) (*metricsutil.ClusterMetricSink, *metricsutil.MetricsHelper)
1101
1102	PhysicalFactoryConfig map[string]interface{}
1103	LicensePublicKey      ed25519.PublicKey
1104	LicensePrivateKey     ed25519.PrivateKey
1105}
1106
1107var DefaultNumCores = 3
1108
1109type certInfo struct {
1110	cert      *x509.Certificate
1111	certPEM   []byte
1112	certBytes []byte
1113	key       *ecdsa.PrivateKey
1114	keyPEM    []byte
1115}
1116
1117type TestLogger struct {
1118	log.Logger
1119	Path string
1120	File *os.File
1121	sink log.SinkAdapter
1122}
1123
1124func NewTestLogger(t testing.T) *TestLogger {
1125	var logFile *os.File
1126	var logPath string
1127	output := os.Stderr
1128
1129	logDir := os.Getenv("VAULT_TEST_LOG_DIR")
1130	if logDir != "" {
1131		logPath = filepath.Join(logDir, t.Name()+".log")
1132		// t.Name may include slashes.
1133		dir, _ := filepath.Split(logPath)
1134		err := os.MkdirAll(dir, 0o755)
1135		if err != nil {
1136			t.Fatal(err)
1137		}
1138		logFile, err = os.Create(logPath)
1139		if err != nil {
1140			t.Fatal(err)
1141		}
1142		output = logFile
1143	}
1144
1145	// We send nothing on the regular logger, that way we can later deregister
1146	// the sink to stop logging during cluster cleanup.
1147	logger := log.NewInterceptLogger(&log.LoggerOptions{
1148		Output: ioutil.Discard,
1149	})
1150	sink := log.NewSinkAdapter(&log.LoggerOptions{
1151		Output: output,
1152		Level:  log.Trace,
1153	})
1154	logger.RegisterSink(sink)
1155	return &TestLogger{
1156		Path:   logPath,
1157		File:   logFile,
1158		Logger: logger,
1159		sink:   sink,
1160	}
1161}
1162
1163func (tl *TestLogger) StopLogging() {
1164	tl.Logger.(log.InterceptLogger).DeregisterSink(tl.sink)
1165}
1166
1167// NewTestCluster creates a new test cluster based on the provided core config
1168// and test cluster options.
1169//
1170// N.B. Even though a single base CoreConfig is provided, NewTestCluster will instantiate a
1171// core config for each core it creates. If separate seal per core is desired, opts.SealFunc
1172// can be provided to generate a seal for each one. Otherwise, the provided base.Seal will be
1173// shared among cores. NewCore's default behavior is to generate a new DefaultSeal if the
1174// provided Seal in coreConfig (i.e. base.Seal) is nil.
1175//
1176// If opts.Logger is provided, it takes precedence and will be used as the cluster
1177// logger and will be the basis for each core's logger.  If no opts.Logger is
1178// given, one will be generated based on t.Name() for the cluster logger, and if
1179// no base.Logger is given will also be used as the basis for each core's logger.
1180func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *TestCluster {
1181	var err error
1182
1183	var numCores int
1184	if opts == nil || opts.NumCores == 0 {
1185		numCores = DefaultNumCores
1186	} else {
1187		numCores = opts.NumCores
1188	}
1189
1190	certIPs := []net.IP{
1191		net.IPv6loopback,
1192		net.ParseIP("127.0.0.1"),
1193	}
1194	var baseAddr *net.TCPAddr
1195	if opts != nil && opts.BaseListenAddress != "" {
1196		baseAddr, err = net.ResolveTCPAddr("tcp", opts.BaseListenAddress)
1197		if err != nil {
1198			t.Fatal("could not parse given base IP")
1199		}
1200		certIPs = append(certIPs, baseAddr.IP)
1201	} else {
1202		baseAddr = &net.TCPAddr{
1203			IP:   net.ParseIP("127.0.0.1"),
1204			Port: 0,
1205		}
1206	}
1207
1208	var testCluster TestCluster
1209	testCluster.base = base
1210
1211	switch {
1212	case opts != nil && opts.Logger != nil:
1213		testCluster.Logger = opts.Logger
1214	default:
1215		testCluster.Logger = NewTestLogger(t)
1216	}
1217
1218	if opts != nil && opts.TempDir != "" {
1219		if _, err := os.Stat(opts.TempDir); os.IsNotExist(err) {
1220			if err := os.MkdirAll(opts.TempDir, 0o700); err != nil {
1221				t.Fatal(err)
1222			}
1223		}
1224		testCluster.TempDir = opts.TempDir
1225	} else {
1226		tempDir, err := ioutil.TempDir("", "vault-test-cluster-")
1227		if err != nil {
1228			t.Fatal(err)
1229		}
1230		testCluster.TempDir = tempDir
1231	}
1232
1233	var caKey *ecdsa.PrivateKey
1234	if opts != nil && opts.CAKey != nil {
1235		caKey = opts.CAKey
1236	} else {
1237		caKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
1238		if err != nil {
1239			t.Fatal(err)
1240		}
1241	}
1242	testCluster.CAKey = caKey
1243	var caBytes []byte
1244	if opts != nil && len(opts.CACert) > 0 {
1245		caBytes = opts.CACert
1246	} else {
1247		caCertTemplate := &x509.Certificate{
1248			Subject: pkix.Name{
1249				CommonName: "localhost",
1250			},
1251			DNSNames:              []string{"localhost"},
1252			IPAddresses:           certIPs,
1253			KeyUsage:              x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign),
1254			SerialNumber:          big.NewInt(mathrand.Int63()),
1255			NotBefore:             time.Now().Add(-30 * time.Second),
1256			NotAfter:              time.Now().Add(262980 * time.Hour),
1257			BasicConstraintsValid: true,
1258			IsCA:                  true,
1259		}
1260		caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey)
1261		if err != nil {
1262			t.Fatal(err)
1263		}
1264	}
1265	caCert, err := x509.ParseCertificate(caBytes)
1266	if err != nil {
1267		t.Fatal(err)
1268	}
1269	testCluster.CACert = caCert
1270	testCluster.CACertBytes = caBytes
1271	testCluster.RootCAs = x509.NewCertPool()
1272	testCluster.RootCAs.AddCert(caCert)
1273	caCertPEMBlock := &pem.Block{
1274		Type:  "CERTIFICATE",
1275		Bytes: caBytes,
1276	}
1277	testCluster.CACertPEM = pem.EncodeToMemory(caCertPEMBlock)
1278	testCluster.CACertPEMFile = filepath.Join(testCluster.TempDir, "ca_cert.pem")
1279	err = ioutil.WriteFile(testCluster.CACertPEMFile, testCluster.CACertPEM, 0o755)
1280	if err != nil {
1281		t.Fatal(err)
1282	}
1283	marshaledCAKey, err := x509.MarshalECPrivateKey(caKey)
1284	if err != nil {
1285		t.Fatal(err)
1286	}
1287	caKeyPEMBlock := &pem.Block{
1288		Type:  "EC PRIVATE KEY",
1289		Bytes: marshaledCAKey,
1290	}
1291	testCluster.CAKeyPEM = pem.EncodeToMemory(caKeyPEMBlock)
1292	err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_key.pem"), testCluster.CAKeyPEM, 0o755)
1293	if err != nil {
1294		t.Fatal(err)
1295	}
1296
1297	var certInfoSlice []*certInfo
1298
1299	//
1300	// Certs generation
1301	//
1302	for i := 0; i < numCores; i++ {
1303		key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
1304		if err != nil {
1305			t.Fatal(err)
1306		}
1307		certTemplate := &x509.Certificate{
1308			Subject: pkix.Name{
1309				CommonName: "localhost",
1310			},
1311			// Include host.docker.internal for the sake of benchmark-vault running on MacOS/Windows.
1312			// This allows Prometheus running in docker to scrape the cluster for metrics.
1313			DNSNames:    []string{"localhost", "host.docker.internal"},
1314			IPAddresses: certIPs,
1315			ExtKeyUsage: []x509.ExtKeyUsage{
1316				x509.ExtKeyUsageServerAuth,
1317				x509.ExtKeyUsageClientAuth,
1318			},
1319			KeyUsage:     x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
1320			SerialNumber: big.NewInt(mathrand.Int63()),
1321			NotBefore:    time.Now().Add(-30 * time.Second),
1322			NotAfter:     time.Now().Add(262980 * time.Hour),
1323		}
1324		certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, caCert, key.Public(), caKey)
1325		if err != nil {
1326			t.Fatal(err)
1327		}
1328		cert, err := x509.ParseCertificate(certBytes)
1329		if err != nil {
1330			t.Fatal(err)
1331		}
1332		certPEMBlock := &pem.Block{
1333			Type:  "CERTIFICATE",
1334			Bytes: certBytes,
1335		}
1336		certPEM := pem.EncodeToMemory(certPEMBlock)
1337		marshaledKey, err := x509.MarshalECPrivateKey(key)
1338		if err != nil {
1339			t.Fatal(err)
1340		}
1341		keyPEMBlock := &pem.Block{
1342			Type:  "EC PRIVATE KEY",
1343			Bytes: marshaledKey,
1344		}
1345		keyPEM := pem.EncodeToMemory(keyPEMBlock)
1346
1347		certInfoSlice = append(certInfoSlice, &certInfo{
1348			cert:      cert,
1349			certPEM:   certPEM,
1350			certBytes: certBytes,
1351			key:       key,
1352			keyPEM:    keyPEM,
1353		})
1354	}
1355
1356	//
1357	// Listener setup
1358	//
1359	addresses := []*net.TCPAddr{}
1360	listeners := [][]*TestListener{}
1361	servers := []*http.Server{}
1362	handlers := []http.Handler{}
1363	tlsConfigs := []*tls.Config{}
1364	certGetters := []*reloadutil.CertificateGetter{}
1365	for i := 0; i < numCores; i++ {
1366
1367		addr := &net.TCPAddr{
1368			IP:   baseAddr.IP,
1369			Port: 0,
1370		}
1371		if baseAddr.Port != 0 {
1372			addr.Port = baseAddr.Port + i
1373		}
1374
1375		ln, err := net.ListenTCP("tcp", addr)
1376		if err != nil {
1377			t.Fatal(err)
1378		}
1379		addresses = append(addresses, addr)
1380
1381		certFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_cert.pem", i+1, ln.Addr().(*net.TCPAddr).Port))
1382		keyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_key.pem", i+1, ln.Addr().(*net.TCPAddr).Port))
1383		err = ioutil.WriteFile(certFile, certInfoSlice[i].certPEM, 0o755)
1384		if err != nil {
1385			t.Fatal(err)
1386		}
1387		err = ioutil.WriteFile(keyFile, certInfoSlice[i].keyPEM, 0o755)
1388		if err != nil {
1389			t.Fatal(err)
1390		}
1391		tlsCert, err := tls.X509KeyPair(certInfoSlice[i].certPEM, certInfoSlice[i].keyPEM)
1392		if err != nil {
1393			t.Fatal(err)
1394		}
1395		certGetter := reloadutil.NewCertificateGetter(certFile, keyFile, "")
1396		certGetters = append(certGetters, certGetter)
1397		certGetter.Reload()
1398		tlsConfig := &tls.Config{
1399			Certificates:   []tls.Certificate{tlsCert},
1400			RootCAs:        testCluster.RootCAs,
1401			ClientCAs:      testCluster.RootCAs,
1402			ClientAuth:     tls.RequestClientCert,
1403			NextProtos:     []string{"h2", "http/1.1"},
1404			GetCertificate: certGetter.GetCertificate,
1405		}
1406		if opts != nil && opts.RequireClientAuth {
1407			tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
1408			testCluster.ClientAuthRequired = true
1409		}
1410		tlsConfig.BuildNameToCertificate()
1411		tlsConfigs = append(tlsConfigs, tlsConfig)
1412		lns := []*TestListener{
1413			{
1414				Listener: tls.NewListener(ln, tlsConfig),
1415				Address:  ln.Addr().(*net.TCPAddr),
1416			},
1417		}
1418		listeners = append(listeners, lns)
1419		var handler http.Handler = http.NewServeMux()
1420		handlers = append(handlers, handler)
1421		server := &http.Server{
1422			Handler:  handler,
1423			ErrorLog: testCluster.Logger.StandardLogger(nil),
1424		}
1425		servers = append(servers, server)
1426	}
1427
1428	// Create three cores with the same physical and different redirect/cluster
1429	// addrs.
1430	// N.B.: On OSX, instead of random ports, it assigns new ports to new
1431	// listeners sequentially. Aside from being a bad idea in a security sense,
1432	// it also broke tests that assumed it was OK to just use the port above
1433	// the redirect addr. This has now been changed to 105 ports above, but if
1434	// we ever do more than three nodes in a cluster it may need to be bumped.
1435	// Note: it's 105 so that we don't conflict with a running Consul by
1436	// default.
1437	coreConfig := &CoreConfig{
1438		LogicalBackends:    make(map[string]logical.Factory),
1439		CredentialBackends: make(map[string]logical.Factory),
1440		AuditBackends:      make(map[string]audit.Factory),
1441		RedirectAddr:       fmt.Sprintf("https://127.0.0.1:%d", listeners[0][0].Address.Port),
1442		ClusterAddr:        "https://127.0.0.1:0",
1443		DisableMlock:       true,
1444		EnableUI:           true,
1445		EnableRaw:          true,
1446		BuiltinRegistry:    NewMockBuiltinRegistry(),
1447	}
1448
1449	if base != nil {
1450		coreConfig.RawConfig = base.RawConfig
1451		coreConfig.DisableCache = base.DisableCache
1452		coreConfig.EnableUI = base.EnableUI
1453		coreConfig.DefaultLeaseTTL = base.DefaultLeaseTTL
1454		coreConfig.MaxLeaseTTL = base.MaxLeaseTTL
1455		coreConfig.CacheSize = base.CacheSize
1456		coreConfig.PluginDirectory = base.PluginDirectory
1457		coreConfig.Seal = base.Seal
1458		coreConfig.UnwrapSeal = base.UnwrapSeal
1459		coreConfig.DevToken = base.DevToken
1460		coreConfig.EnableRaw = base.EnableRaw
1461		coreConfig.DisableSealWrap = base.DisableSealWrap
1462		coreConfig.DisableCache = base.DisableCache
1463		coreConfig.LicensingConfig = base.LicensingConfig
1464		coreConfig.License = base.License
1465		coreConfig.LicensePath = base.LicensePath
1466		coreConfig.DisablePerformanceStandby = base.DisablePerformanceStandby
1467		coreConfig.MetricsHelper = base.MetricsHelper
1468		coreConfig.MetricSink = base.MetricSink
1469		coreConfig.SecureRandomReader = base.SecureRandomReader
1470		coreConfig.DisableSentinelTrace = base.DisableSentinelTrace
1471		coreConfig.ClusterName = base.ClusterName
1472		coreConfig.DisableAutopilot = base.DisableAutopilot
1473
1474		if base.BuiltinRegistry != nil {
1475			coreConfig.BuiltinRegistry = base.BuiltinRegistry
1476		}
1477
1478		if !coreConfig.DisableMlock {
1479			base.DisableMlock = false
1480		}
1481
1482		if base.Physical != nil {
1483			coreConfig.Physical = base.Physical
1484		}
1485
1486		if base.HAPhysical != nil {
1487			coreConfig.HAPhysical = base.HAPhysical
1488		}
1489
1490		// Used to set something non-working to test fallback
1491		switch base.ClusterAddr {
1492		case "empty":
1493			coreConfig.ClusterAddr = ""
1494		case "":
1495		default:
1496			coreConfig.ClusterAddr = base.ClusterAddr
1497		}
1498
1499		if base.LogicalBackends != nil {
1500			for k, v := range base.LogicalBackends {
1501				coreConfig.LogicalBackends[k] = v
1502			}
1503		}
1504		if base.CredentialBackends != nil {
1505			for k, v := range base.CredentialBackends {
1506				coreConfig.CredentialBackends[k] = v
1507			}
1508		}
1509		if base.AuditBackends != nil {
1510			for k, v := range base.AuditBackends {
1511				coreConfig.AuditBackends[k] = v
1512			}
1513		}
1514		if base.Logger != nil {
1515			coreConfig.Logger = base.Logger
1516		}
1517
1518		coreConfig.ClusterCipherSuites = base.ClusterCipherSuites
1519
1520		coreConfig.DisableCache = base.DisableCache
1521
1522		coreConfig.DevToken = base.DevToken
1523		coreConfig.CounterSyncInterval = base.CounterSyncInterval
1524		coreConfig.RecoveryMode = base.RecoveryMode
1525
1526		coreConfig.ActivityLogConfig = base.ActivityLogConfig
1527		coreConfig.EnableResponseHeaderHostname = base.EnableResponseHeaderHostname
1528		coreConfig.EnableResponseHeaderRaftNodeID = base.EnableResponseHeaderRaftNodeID
1529
1530		testApplyEntBaseConfig(coreConfig, base)
1531	}
1532	if coreConfig.ClusterName == "" {
1533		coreConfig.ClusterName = t.Name()
1534	}
1535
1536	if coreConfig.ClusterName == "" {
1537		coreConfig.ClusterName = t.Name()
1538	}
1539
1540	if coreConfig.ClusterHeartbeatInterval == 0 {
1541		// Set this lower so that state populates quickly to standby nodes
1542		coreConfig.ClusterHeartbeatInterval = 2 * time.Second
1543	}
1544
1545	if coreConfig.RawConfig == nil {
1546		c := new(server.Config)
1547		c.SharedConfig = &configutil.SharedConfig{LogFormat: logging.UnspecifiedFormat.String()}
1548		coreConfig.RawConfig = c
1549	}
1550
1551	addAuditBackend := len(coreConfig.AuditBackends) == 0
1552	if addAuditBackend {
1553		AddNoopAudit(coreConfig, nil)
1554	}
1555
1556	if coreConfig.Physical == nil && (opts == nil || opts.PhysicalFactory == nil) {
1557		coreConfig.Physical, err = physInmem.NewInmem(nil, testCluster.Logger)
1558		if err != nil {
1559			t.Fatal(err)
1560		}
1561	}
1562	if coreConfig.HAPhysical == nil && (opts == nil || opts.PhysicalFactory == nil) {
1563		haPhys, err := physInmem.NewInmemHA(nil, testCluster.Logger)
1564		if err != nil {
1565			t.Fatal(err)
1566		}
1567		coreConfig.HAPhysical = haPhys.(physical.HABackend)
1568	}
1569
1570	if testCluster.LicensePublicKey == nil {
1571		pubKey, priKey, err := GenerateTestLicenseKeys()
1572		if err != nil {
1573			t.Fatalf("err: %v", err)
1574		}
1575		testCluster.LicensePublicKey = pubKey
1576		testCluster.LicensePrivateKey = priKey
1577	}
1578
1579	if opts != nil && opts.InmemClusterLayers {
1580		if opts.ClusterLayers != nil {
1581			t.Fatalf("cannot specify ClusterLayers when InmemClusterLayers is true")
1582		}
1583		inmemCluster, err := cluster.NewInmemLayerCluster("inmem-cluster", numCores, testCluster.Logger.Named("inmem-cluster"))
1584		if err != nil {
1585			t.Fatal(err)
1586		}
1587		opts.ClusterLayers = inmemCluster
1588	}
1589
1590	// Create cores
1591	testCluster.cleanupFuncs = []func(){}
1592	cores := []*Core{}
1593	coreConfigs := []*CoreConfig{}
1594
1595	for i := 0; i < numCores; i++ {
1596		cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], testCluster.LicensePublicKey)
1597
1598		testCluster.cleanupFuncs = append(testCluster.cleanupFuncs, cleanup)
1599		cores = append(cores, c)
1600		coreConfigs = append(coreConfigs, &localConfig)
1601
1602		if handler != nil {
1603			handlers[i] = handler
1604			servers[i].Handler = handlers[i]
1605		}
1606	}
1607
1608	// Clustering setup
1609	for i := 0; i < numCores; i++ {
1610		testCluster.setupClusterListener(t, i, cores[i], coreConfigs[i], opts, listeners[i], handlers[i])
1611	}
1612
1613	// Create TestClusterCores
1614	var ret []*TestClusterCore
1615	for i := 0; i < numCores; i++ {
1616		tcc := &TestClusterCore{
1617			Core:                 cores[i],
1618			CoreConfig:           coreConfigs[i],
1619			ServerKey:            certInfoSlice[i].key,
1620			ServerKeyPEM:         certInfoSlice[i].keyPEM,
1621			ServerCert:           certInfoSlice[i].cert,
1622			ServerCertBytes:      certInfoSlice[i].certBytes,
1623			ServerCertPEM:        certInfoSlice[i].certPEM,
1624			Address:              addresses[i],
1625			Listeners:            listeners[i],
1626			Handler:              handlers[i],
1627			Server:               servers[i],
1628			TLSConfig:            tlsConfigs[i],
1629			Barrier:              cores[i].barrier,
1630			NodeID:               fmt.Sprintf("core-%d", i),
1631			UnderlyingRawStorage: coreConfigs[i].Physical,
1632			UnderlyingHAStorage:  coreConfigs[i].HAPhysical,
1633		}
1634		tcc.ReloadFuncs = &cores[i].reloadFuncs
1635		tcc.ReloadFuncsLock = &cores[i].reloadFuncsLock
1636		tcc.ReloadFuncsLock.Lock()
1637		(*tcc.ReloadFuncs)["listener|tcp"] = []reloadutil.ReloadFunc{certGetters[i].Reload}
1638		tcc.ReloadFuncsLock.Unlock()
1639
1640		testAdjustUnderlyingStorage(tcc)
1641
1642		ret = append(ret, tcc)
1643	}
1644	testCluster.Cores = ret
1645
1646	// Initialize cores
1647	if opts == nil || !opts.SkipInit {
1648		testCluster.initCores(t, opts, addAuditBackend)
1649	}
1650
1651	// Assign clients
1652	for i := 0; i < numCores; i++ {
1653		testCluster.Cores[i].Client =
1654			testCluster.getAPIClient(t, opts, listeners[i][0].Address.Port, tlsConfigs[i])
1655	}
1656
1657	// Extra Setup
1658	for _, tcc := range testCluster.Cores {
1659		testExtraTestCoreSetup(t, testCluster.LicensePrivateKey, tcc)
1660	}
1661
1662	// Cleanup
1663	testCluster.CleanupFunc = func() {
1664		for _, c := range testCluster.cleanupFuncs {
1665			c()
1666		}
1667		if l, ok := testCluster.Logger.(*TestLogger); ok {
1668			if t.Failed() {
1669				_ = l.File.Close()
1670			} else {
1671				_ = os.Remove(l.Path)
1672			}
1673		}
1674	}
1675
1676	// Setup
1677	if opts != nil {
1678		if opts.SetupFunc != nil {
1679			testCluster.SetupFunc = func() {
1680				opts.SetupFunc(t, &testCluster)
1681			}
1682		}
1683	}
1684
1685	return &testCluster
1686}
1687
1688// StopCore performs an orderly shutdown of a core.
1689func (cluster *TestCluster) StopCore(t testing.T, idx int) {
1690	t.Helper()
1691
1692	if idx < 0 || idx > len(cluster.Cores) {
1693		t.Fatalf("invalid core index %d", idx)
1694	}
1695	tcc := cluster.Cores[idx]
1696	tcc.Logger().Info("stopping core", "core", idx)
1697
1698	// Stop listeners and call Finalize()
1699	if err := tcc.stop(); err != nil {
1700		t.Fatal(err)
1701	}
1702
1703	// Run cleanup
1704	cluster.cleanupFuncs[idx]()
1705}
1706
1707// Restart a TestClusterCore that was stopped, by replacing the
1708// underlying Core.
1709func (cluster *TestCluster) StartCore(t testing.T, idx int, opts *TestClusterOptions) {
1710	t.Helper()
1711
1712	if idx < 0 || idx > len(cluster.Cores) {
1713		t.Fatalf("invalid core index %d", idx)
1714	}
1715	tcc := cluster.Cores[idx]
1716	tcc.Logger().Info("restarting core", "core", idx)
1717
1718	// Set up listeners
1719	ln, err := net.ListenTCP("tcp", tcc.Address)
1720	if err != nil {
1721		t.Fatal(err)
1722	}
1723	tcc.Listeners = []*TestListener{
1724		{
1725			Listener: tls.NewListener(ln, tcc.TLSConfig),
1726			Address:  ln.Addr().(*net.TCPAddr),
1727		},
1728	}
1729
1730	tcc.Handler = http.NewServeMux()
1731	tcc.Server = &http.Server{
1732		Handler:  tcc.Handler,
1733		ErrorLog: cluster.Logger.StandardLogger(nil),
1734	}
1735
1736	// Create a new Core
1737	cleanup, newCore, localConfig, coreHandler := cluster.newCore(t, idx, tcc.CoreConfig, opts, tcc.Listeners, cluster.LicensePublicKey)
1738	if coreHandler != nil {
1739		tcc.Handler = coreHandler
1740		tcc.Server.Handler = coreHandler
1741	}
1742
1743	cluster.cleanupFuncs[idx] = cleanup
1744	tcc.Core = newCore
1745	tcc.CoreConfig = &localConfig
1746	tcc.UnderlyingRawStorage = localConfig.Physical
1747
1748	cluster.setupClusterListener(
1749		t, idx, newCore, tcc.CoreConfig,
1750		opts, tcc.Listeners, tcc.Handler)
1751
1752	tcc.Client = cluster.getAPIClient(t, opts, tcc.Listeners[0].Address.Port, tcc.TLSConfig)
1753
1754	testAdjustUnderlyingStorage(tcc)
1755	testExtraTestCoreSetup(t, cluster.LicensePrivateKey, tcc)
1756
1757	// Start listeners
1758	for _, ln := range tcc.Listeners {
1759		tcc.Logger().Info("starting listener for core", "port", ln.Address.Port)
1760		go tcc.Server.Serve(ln)
1761	}
1762
1763	tcc.Logger().Info("restarted test core", "core", idx)
1764}
1765
1766func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey ed25519.PublicKey) (func(), *Core, CoreConfig, http.Handler) {
1767	localConfig := *coreConfig
1768	cleanupFunc := func() {}
1769	var handler http.Handler
1770
1771	var disablePR1103 bool
1772	if opts != nil && opts.PR1103Disabled {
1773		disablePR1103 = true
1774	}
1775
1776	var firstCoreNumber int
1777	if opts != nil {
1778		firstCoreNumber = opts.FirstCoreNumber
1779	}
1780
1781	localConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[0].Address.Port)
1782
1783	// if opts.SealFunc is provided, use that to generate a seal for the config instead
1784	if opts != nil && opts.SealFunc != nil {
1785		localConfig.Seal = opts.SealFunc()
1786	}
1787	if opts != nil && opts.UnwrapSealFunc != nil {
1788		localConfig.UnwrapSeal = opts.UnwrapSealFunc()
1789	}
1790
1791	if coreConfig.Logger == nil || (opts != nil && opts.Logger != nil) {
1792		localConfig.Logger = testCluster.Logger.Named(fmt.Sprintf("core%d", idx))
1793	}
1794	if opts != nil && opts.PhysicalFactory != nil {
1795		physBundle := opts.PhysicalFactory(t, idx, localConfig.Logger, opts.PhysicalFactoryConfig)
1796		switch {
1797		case physBundle == nil && coreConfig.Physical != nil:
1798		case physBundle == nil && coreConfig.Physical == nil:
1799			t.Fatal("PhysicalFactory produced no physical and none in CoreConfig")
1800		case physBundle != nil:
1801			// Storage backend setup
1802			if physBundle.Backend != nil {
1803				testCluster.Logger.Info("created physical backend", "instance", idx)
1804				coreConfig.Physical = physBundle.Backend
1805				localConfig.Physical = physBundle.Backend
1806			}
1807
1808			// HA Backend setup
1809			haBackend := physBundle.HABackend
1810			if haBackend == nil {
1811				if ha, ok := physBundle.Backend.(physical.HABackend); ok {
1812					haBackend = ha
1813				}
1814			}
1815			coreConfig.HAPhysical = haBackend
1816			localConfig.HAPhysical = haBackend
1817
1818			// Cleanup setup
1819			if physBundle.Cleanup != nil {
1820				cleanupFunc = physBundle.Cleanup
1821			}
1822		}
1823	}
1824
1825	if opts != nil && opts.ClusterLayers != nil {
1826		localConfig.ClusterNetworkLayer = opts.ClusterLayers.Layers()[idx]
1827		localConfig.ClusterAddr = "https://" + localConfig.ClusterNetworkLayer.Listeners()[0].Addr().String()
1828	}
1829
1830	switch {
1831	case localConfig.LicensingConfig != nil:
1832		if pubKey != nil {
1833			localConfig.LicensingConfig.AdditionalPublicKeys = append(localConfig.LicensingConfig.AdditionalPublicKeys, pubKey)
1834		}
1835	default:
1836		localConfig.LicensingConfig = testGetLicensingConfig(pubKey)
1837	}
1838
1839	if localConfig.MetricsHelper == nil {
1840		inm := metrics.NewInmemSink(10*time.Second, time.Minute)
1841		metrics.DefaultInmemSignal(inm)
1842		localConfig.MetricsHelper = metricsutil.NewMetricsHelper(inm, false)
1843	}
1844	if opts != nil && opts.CoreMetricSinkProvider != nil {
1845		localConfig.MetricSink, localConfig.MetricsHelper = opts.CoreMetricSinkProvider(localConfig.ClusterName)
1846	}
1847
1848	if opts != nil && opts.CoreMetricSinkProvider != nil {
1849		localConfig.MetricSink, localConfig.MetricsHelper = opts.CoreMetricSinkProvider(localConfig.ClusterName)
1850	}
1851
1852	localConfig.NumExpirationWorkers = numExpirationWorkersTest
1853
1854	c, err := NewCore(&localConfig)
1855	if err != nil {
1856		t.Fatalf("err: %v", err)
1857	}
1858	c.coreNumber = firstCoreNumber + idx
1859	c.PR1103disabled = disablePR1103
1860	if opts != nil && opts.HandlerFunc != nil {
1861		props := opts.DefaultHandlerProperties
1862		props.Core = c
1863		if props.ListenerConfig != nil && props.ListenerConfig.MaxRequestDuration == 0 {
1864			props.ListenerConfig.MaxRequestDuration = DefaultMaxRequestDuration
1865		}
1866		handler = opts.HandlerFunc(&props)
1867	}
1868
1869	// Set this in case the Seal was manually set before the core was
1870	// created
1871	if localConfig.Seal != nil {
1872		localConfig.Seal.SetCore(c)
1873	}
1874
1875	return cleanupFunc, c, localConfig, handler
1876}
1877
1878func (testCluster *TestCluster) setupClusterListener(
1879	t testing.T, idx int, core *Core, coreConfig *CoreConfig,
1880	opts *TestClusterOptions, listeners []*TestListener, handler http.Handler) {
1881	if coreConfig.ClusterAddr == "" {
1882		return
1883	}
1884
1885	clusterAddrGen := func(lns []*TestListener, port int) []*net.TCPAddr {
1886		ret := make([]*net.TCPAddr, len(lns))
1887		for i, ln := range lns {
1888			ret[i] = &net.TCPAddr{
1889				IP:   ln.Address.IP,
1890				Port: port,
1891			}
1892		}
1893		return ret
1894	}
1895
1896	baseClusterListenPort := 0
1897	if opts != nil && opts.BaseClusterListenPort != 0 {
1898		if opts.BaseListenAddress == "" {
1899			t.Fatal("BaseListenAddress is not specified")
1900		}
1901		baseClusterListenPort = opts.BaseClusterListenPort
1902	}
1903
1904	port := 0
1905	if baseClusterListenPort != 0 {
1906		port = baseClusterListenPort + idx
1907	}
1908	core.Logger().Info("assigning cluster listener for test core", "core", idx, "port", port)
1909	core.SetClusterListenerAddrs(clusterAddrGen(listeners, port))
1910	core.SetClusterHandler(handler)
1911}
1912
1913func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions, addAuditBackend bool) {
1914	leader := tc.Cores[0]
1915
1916	bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, leader.Core, leader.Handler)
1917	barrierKeys, _ := copystructure.Copy(bKeys)
1918	tc.BarrierKeys = barrierKeys.([][]byte)
1919	recoveryKeys, _ := copystructure.Copy(rKeys)
1920	tc.RecoveryKeys = recoveryKeys.([][]byte)
1921	tc.RootToken = root
1922
1923	// Write root token and barrier keys
1924	err := ioutil.WriteFile(filepath.Join(tc.TempDir, "root_token"), []byte(root), 0o755)
1925	if err != nil {
1926		t.Fatal(err)
1927	}
1928	var buf bytes.Buffer
1929	for i, key := range tc.BarrierKeys {
1930		buf.Write([]byte(base64.StdEncoding.EncodeToString(key)))
1931		if i < len(tc.BarrierKeys)-1 {
1932			buf.WriteRune('\n')
1933		}
1934	}
1935	err = ioutil.WriteFile(filepath.Join(tc.TempDir, "barrier_keys"), buf.Bytes(), 0o755)
1936	if err != nil {
1937		t.Fatal(err)
1938	}
1939	for i, key := range tc.RecoveryKeys {
1940		buf.Write([]byte(base64.StdEncoding.EncodeToString(key)))
1941		if i < len(tc.RecoveryKeys)-1 {
1942			buf.WriteRune('\n')
1943		}
1944	}
1945	err = ioutil.WriteFile(filepath.Join(tc.TempDir, "recovery_keys"), buf.Bytes(), 0o755)
1946	if err != nil {
1947		t.Fatal(err)
1948	}
1949
1950	// Unseal first core
1951	for _, key := range bKeys {
1952		if _, err := leader.Core.Unseal(TestKeyCopy(key)); err != nil {
1953			t.Fatalf("unseal err: %s", err)
1954		}
1955	}
1956
1957	ctx := context.Background()
1958
1959	// If stored keys is supported, the above will no no-op, so trigger auto-unseal
1960	// using stored keys to try to unseal
1961	if err := leader.Core.UnsealWithStoredKeys(ctx); err != nil {
1962		t.Fatal(err)
1963	}
1964
1965	// Verify unsealed
1966	if leader.Core.Sealed() {
1967		t.Fatal("should not be sealed")
1968	}
1969
1970	TestWaitActive(t, leader.Core)
1971
1972	// Existing tests rely on this; we can make a toggle to disable it
1973	// later if we want
1974	kvReq := &logical.Request{
1975		Operation:   logical.UpdateOperation,
1976		ClientToken: tc.RootToken,
1977		Path:        "sys/mounts/secret",
1978		Data: map[string]interface{}{
1979			"type":        "kv",
1980			"path":        "secret/",
1981			"description": "key/value secret storage",
1982			"options": map[string]string{
1983				"version": "1",
1984			},
1985		},
1986	}
1987	resp, err := leader.Core.HandleRequest(namespace.RootContext(ctx), kvReq)
1988	if err != nil {
1989		t.Fatal(err)
1990	}
1991	if resp.IsError() {
1992		t.Fatal(err)
1993	}
1994
1995	cfg, err := leader.Core.seal.BarrierConfig(ctx)
1996	if err != nil {
1997		t.Fatal(err)
1998	}
1999
2000	// Unseal other cores unless otherwise specified
2001	numCores := len(tc.Cores)
2002	if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 {
2003		for i := 1; i < numCores; i++ {
2004			tc.Cores[i].Core.seal.SetCachedBarrierConfig(cfg)
2005			for _, key := range bKeys {
2006				if _, err := tc.Cores[i].Core.Unseal(TestKeyCopy(key)); err != nil {
2007					t.Fatalf("unseal err: %s", err)
2008				}
2009			}
2010
2011			// If stored keys is supported, the above will no no-op, so trigger auto-unseal
2012			// using stored keys
2013			if err := tc.Cores[i].Core.UnsealWithStoredKeys(ctx); err != nil {
2014				t.Fatal(err)
2015			}
2016		}
2017
2018		// Let them come fully up to standby
2019		time.Sleep(2 * time.Second)
2020
2021		// Ensure cluster connection info is populated.
2022		// Other cores should not come up as leaders.
2023		for i := 1; i < numCores; i++ {
2024			isLeader, _, _, err := tc.Cores[i].Core.Leader()
2025			if err != nil {
2026				t.Fatal(err)
2027			}
2028			if isLeader {
2029				t.Fatalf("core[%d] should not be leader", i)
2030			}
2031		}
2032	}
2033
2034	//
2035	// Set test cluster core(s) and test cluster
2036	//
2037	cluster, err := leader.Core.Cluster(context.Background())
2038	if err != nil {
2039		t.Fatal(err)
2040	}
2041	tc.ID = cluster.ID
2042
2043	if addAuditBackend {
2044		// Enable auditing.
2045		auditReq := &logical.Request{
2046			Operation:   logical.UpdateOperation,
2047			ClientToken: tc.RootToken,
2048			Path:        "sys/audit/noop",
2049			Data: map[string]interface{}{
2050				"type": "noop",
2051			},
2052		}
2053		resp, err = leader.Core.HandleRequest(namespace.RootContext(ctx), auditReq)
2054		if err != nil {
2055			t.Fatal(err)
2056		}
2057
2058		if resp.IsError() {
2059			t.Fatal(err)
2060		}
2061	}
2062}
2063
2064func (testCluster *TestCluster) getAPIClient(
2065	t testing.T, opts *TestClusterOptions,
2066	port int, tlsConfig *tls.Config) *api.Client {
2067	transport := cleanhttp.DefaultPooledTransport()
2068	transport.TLSClientConfig = tlsConfig.Clone()
2069	if err := http2.ConfigureTransport(transport); err != nil {
2070		t.Fatal(err)
2071	}
2072	client := &http.Client{
2073		Transport: transport,
2074		CheckRedirect: func(*http.Request, []*http.Request) error {
2075			// This can of course be overridden per-test by using its own client
2076			return fmt.Errorf("redirects not allowed in these tests")
2077		},
2078	}
2079	config := api.DefaultConfig()
2080	if config.Error != nil {
2081		t.Fatal(config.Error)
2082	}
2083	config.Address = fmt.Sprintf("https://127.0.0.1:%d", port)
2084	config.HttpClient = client
2085	config.MaxRetries = 0
2086	apiClient, err := api.NewClient(config)
2087	if err != nil {
2088		t.Fatal(err)
2089	}
2090	if opts == nil || !opts.SkipInit {
2091		apiClient.SetToken(testCluster.RootToken)
2092	}
2093	return apiClient
2094}
2095
2096func NewMockBuiltinRegistry() *mockBuiltinRegistry {
2097	return &mockBuiltinRegistry{
2098		forTesting: map[string]consts.PluginType{
2099			"mysql-database-plugin":      consts.PluginTypeDatabase,
2100			"postgresql-database-plugin": consts.PluginTypeDatabase,
2101		},
2102	}
2103}
2104
2105type mockBuiltinRegistry struct {
2106	forTesting map[string]consts.PluginType
2107}
2108
2109func (m *mockBuiltinRegistry) Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) {
2110	testPluginType, ok := m.forTesting[name]
2111	if !ok {
2112		return nil, false
2113	}
2114	if pluginType != testPluginType {
2115		return nil, false
2116	}
2117	if name == "postgresql-database-plugin" {
2118		return dbPostgres.New, true
2119	}
2120	return dbMysql.New(dbMysql.DefaultUserNameTemplate), true
2121}
2122
2123// Keys only supports getting a realistic list of the keys for database plugins.
2124func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string {
2125	if pluginType != consts.PluginTypeDatabase {
2126		return []string{}
2127	}
2128	/*
2129		This is a hard-coded reproduction of the db plugin keys in helper/builtinplugins/registry.go.
2130		The registry isn't directly used because it causes import cycles.
2131	*/
2132	return []string{
2133		"mysql-database-plugin",
2134		"mysql-aurora-database-plugin",
2135		"mysql-rds-database-plugin",
2136		"mysql-legacy-database-plugin",
2137
2138		"cassandra-database-plugin",
2139		"couchbase-database-plugin",
2140		"elasticsearch-database-plugin",
2141		"hana-database-plugin",
2142		"influxdb-database-plugin",
2143		"mongodb-database-plugin",
2144		"mongodbatlas-database-plugin",
2145		"mssql-database-plugin",
2146		"postgresql-database-plugin",
2147		"redshift-database-plugin",
2148		"snowflake-database-plugin",
2149	}
2150}
2151
2152func (m *mockBuiltinRegistry) Contains(name string, pluginType consts.PluginType) bool {
2153	return false
2154}
2155
2156type NoopAudit struct {
2157	Config         *audit.BackendConfig
2158	ReqErr         error
2159	ReqAuth        []*logical.Auth
2160	Req            []*logical.Request
2161	ReqHeaders     []map[string][]string
2162	ReqNonHMACKeys []string
2163	ReqErrs        []error
2164
2165	RespErr            error
2166	RespAuth           []*logical.Auth
2167	RespReq            []*logical.Request
2168	Resp               []*logical.Response
2169	RespNonHMACKeys    []string
2170	RespReqNonHMACKeys []string
2171	RespErrs           []error
2172
2173	salt      *salt.Salt
2174	saltMutex sync.RWMutex
2175}
2176
2177func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error {
2178	n.ReqAuth = append(n.ReqAuth, in.Auth)
2179	n.Req = append(n.Req, in.Request)
2180	n.ReqHeaders = append(n.ReqHeaders, in.Request.Headers)
2181	n.ReqNonHMACKeys = in.NonHMACReqDataKeys
2182	n.ReqErrs = append(n.ReqErrs, in.OuterErr)
2183	return n.ReqErr
2184}
2185
2186func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error {
2187	n.RespAuth = append(n.RespAuth, in.Auth)
2188	n.RespReq = append(n.RespReq, in.Request)
2189	n.Resp = append(n.Resp, in.Response)
2190	n.RespErrs = append(n.RespErrs, in.OuterErr)
2191
2192	if in.Response != nil {
2193		n.RespNonHMACKeys = in.NonHMACRespDataKeys
2194		n.RespReqNonHMACKeys = in.NonHMACReqDataKeys
2195	}
2196
2197	return n.RespErr
2198}
2199
2200func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, options map[string]string) error {
2201	return nil
2202}
2203
2204func (n *NoopAudit) Salt(ctx context.Context) (*salt.Salt, error) {
2205	n.saltMutex.RLock()
2206	if n.salt != nil {
2207		defer n.saltMutex.RUnlock()
2208		return n.salt, nil
2209	}
2210	n.saltMutex.RUnlock()
2211	n.saltMutex.Lock()
2212	defer n.saltMutex.Unlock()
2213	if n.salt != nil {
2214		return n.salt, nil
2215	}
2216	salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig)
2217	if err != nil {
2218		return nil, err
2219	}
2220	n.salt = salt
2221	return salt, nil
2222}
2223
2224func (n *NoopAudit) GetHash(ctx context.Context, data string) (string, error) {
2225	salt, err := n.Salt(ctx)
2226	if err != nil {
2227		return "", err
2228	}
2229	return salt.GetIdentifiedHMAC(data), nil
2230}
2231
2232func (n *NoopAudit) Reload(ctx context.Context) error {
2233	return nil
2234}
2235
2236func (n *NoopAudit) Invalidate(ctx context.Context) {
2237	n.saltMutex.Lock()
2238	defer n.saltMutex.Unlock()
2239	n.salt = nil
2240}
2241