1package sealmigration
2
3import (
4	"context"
5	"encoding/base64"
6	"fmt"
7	"testing"
8	"time"
9
10	"github.com/go-test/deep"
11	"github.com/hashicorp/go-hclog"
12	wrapping "github.com/hashicorp/go-kms-wrapping"
13	"github.com/hashicorp/vault/api"
14	"github.com/hashicorp/vault/helper/namespace"
15	"github.com/hashicorp/vault/helper/testhelpers"
16	sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
17	"github.com/hashicorp/vault/helper/testhelpers/teststorage"
18	"github.com/hashicorp/vault/http"
19	"github.com/hashicorp/vault/physical/raft"
20	"github.com/hashicorp/vault/vault"
21)
22
23const (
24	numTestCores = 3
25	keyShares    = 3
26	keyThreshold = 3
27
28	BasePort_ShamirToTransit_Pre14  = 20000
29	BasePort_TransitToShamir_Pre14  = 21000
30	BasePort_ShamirToTransit_Post14 = 22000
31	BasePort_TransitToShamir_Post14 = 23000
32	BasePort_TransitToTransit       = 24000
33)
34
35func ParamTestSealMigrationTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) {
36	// Create the transit server.
37	tss := sealhelper.NewTransitSealServer(t, 0)
38	defer func() {
39		if tss != nil {
40			tss.Cleanup()
41		}
42	}()
43	sealKeyName := "transit-seal-key"
44	tss.MakeKey(t, sealKeyName)
45
46	// Initialize the backend with transit.
47	cluster, opts := InitializeTransit(t, logger, storage, basePort, tss, sealKeyName)
48	rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys
49	cluster.EnsureCoresSealed(t)
50	cluster.Cleanup()
51	storage.Cleanup(t, cluster)
52
53	// Migrate the backend from transit to shamir
54	migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, opts.SealFunc, rootToken, recoveryKeys)
55
56	// Now that migration is done, we can nuke the transit server, since we
57	// can unseal without it.
58	tss.Cleanup()
59	tss = nil
60
61	// Run the backend with shamir.  Note that the recovery keys are now the
62	// barrier keys.
63	runShamir(t, logger, storage, basePort, rootToken, recoveryKeys)
64}
65
66func ParamTestSealMigrationShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) {
67	// Initialize the backend using shamir
68	cluster, _ := initializeShamir(t, logger, storage, basePort)
69	rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys
70	cluster.Cleanup()
71	storage.Cleanup(t, cluster)
72
73	// Create the transit server.
74	tss := sealhelper.NewTransitSealServer(t, 0)
75	defer func() {
76		tss.EnsureCoresSealed(t)
77		tss.Cleanup()
78	}()
79	tss.MakeKey(t, "transit-seal-key-1")
80
81	// Migrate the backend from shamir to transit.  Note that the barrier keys
82	// are now the recovery keys.
83	sealFunc := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys)
84
85	// Run the backend with transit.
86	runAutoseal(t, logger, storage, basePort, rootToken, sealFunc)
87}
88
89func ParamTestSealMigrationTransitToShamir_Post14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) {
90	// Create the transit server.
91	tss := sealhelper.NewTransitSealServer(t, 0)
92	defer func() {
93		if tss != nil {
94			tss.Cleanup()
95		}
96	}()
97	sealKeyName := "transit-seal-key-1"
98	tss.MakeKey(t, sealKeyName)
99
100	// Initialize the backend with transit.
101	cluster, opts := InitializeTransit(t, logger, storage, basePort, tss, sealKeyName)
102	rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys
103
104	// Migrate the backend from transit to shamir
105	opts.UnwrapSealFunc = opts.SealFunc
106	opts.SealFunc = func() vault.Seal { return nil }
107	leaderIdx := migratePost14(t, storage, cluster, opts, cluster.RecoveryKeys)
108	validateMigration(t, storage, cluster, leaderIdx, verifySealConfigShamir)
109
110	cluster.Cleanup()
111	storage.Cleanup(t, cluster)
112
113	// Now that migration is done, we can nuke the transit server, since we
114	// can unseal without it.
115	tss.Cleanup()
116	tss = nil
117
118	// Run the backend with shamir.  Note that the recovery keys are now the
119	// barrier keys.
120	runShamir(t, logger, storage, basePort, rootToken, recoveryKeys)
121}
122
123func ParamTestSealMigrationShamirToTransit_Post14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) {
124	// Initialize the backend using shamir
125	cluster, opts := initializeShamir(t, logger, storage, basePort)
126
127	// Create the transit server.
128	tss := sealhelper.NewTransitSealServer(t, 0)
129	defer tss.Cleanup()
130	sealKeyName := "transit-seal-key-1"
131	tss.MakeKey(t, sealKeyName)
132
133	// Migrate the backend from shamir to transit.
134	opts.SealFunc = func() vault.Seal {
135		return tss.MakeSeal(t, sealKeyName)
136	}
137
138	// Restart each follower with the new config, and migrate to Transit.
139	// Note that the barrier keys are being used as recovery keys.
140	leaderIdx := migratePost14(t, storage, cluster, opts, cluster.BarrierKeys)
141	validateMigration(t, storage, cluster, leaderIdx, verifySealConfigTransit)
142	cluster.Cleanup()
143	storage.Cleanup(t, cluster)
144
145	// Run the backend with transit.
146	runAutoseal(t, logger, storage, basePort, cluster.RootToken, opts.SealFunc)
147}
148
149func ParamTestSealMigration_TransitToTransit(t *testing.T, logger hclog.Logger,
150	storage teststorage.ReusableStorage, basePort int) {
151
152	// Create the transit server.
153	tss1 := sealhelper.NewTransitSealServer(t, 0)
154	defer func() {
155		if tss1 != nil {
156			tss1.Cleanup()
157		}
158	}()
159	sealKeyName := "transit-seal-key-1"
160	tss1.MakeKey(t, sealKeyName)
161
162	// Initialize the backend with transit.
163	cluster, opts := InitializeTransit(t, logger, storage, basePort, tss1, sealKeyName)
164	rootToken := cluster.RootToken
165
166	// Create the transit server.
167	tss2 := sealhelper.NewTransitSealServer(t, 1)
168	defer func() {
169		tss2.Cleanup()
170	}()
171	tss2.MakeKey(t, "transit-seal-key-2")
172
173	// Migrate the backend from transit to transit.
174	opts.UnwrapSealFunc = opts.SealFunc
175	opts.SealFunc = func() vault.Seal {
176		return tss2.MakeSeal(t, "transit-seal-key-2")
177	}
178	leaderIdx := migratePost14(t, storage, cluster, opts, cluster.RecoveryKeys)
179	validateMigration(t, storage, cluster, leaderIdx, verifySealConfigTransit)
180	cluster.Cleanup()
181	storage.Cleanup(t, cluster)
182
183	// Now that migration is done, we can nuke the transit server, since we
184	// can unseal without it.
185	tss1.Cleanup()
186	tss1 = nil
187
188	// Run the backend with transit.
189	runAutoseal(t, logger, storage, basePort, rootToken, opts.SealFunc)
190}
191
192func migrateFromTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int,
193	tss *sealhelper.TransitSealServer, sealFunc func() vault.Seal, rootToken string, recoveryKeys [][]byte) {
194
195	baseClusterPort := basePort + 10
196
197	var conf vault.CoreConfig
198	opts := vault.TestClusterOptions{
199		Logger:                logger.Named("migrateFromTransitToShamir"),
200		HandlerFunc:           http.Handler,
201		NumCores:              numTestCores,
202		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
203		BaseClusterListenPort: baseClusterPort,
204		SkipInit:              true,
205		UnwrapSealFunc:        sealFunc,
206	}
207	storage.Setup(&conf, &opts)
208	conf.DisableAutopilot = true
209	cluster := vault.NewTestCluster(t, &conf, &opts)
210	cluster.Start()
211	defer func() {
212		cluster.Cleanup()
213		storage.Cleanup(t, cluster)
214	}()
215
216	leader := cluster.Cores[0]
217	client := leader.Client
218	client.SetToken(rootToken)
219
220	// Attempt to unseal while the transit server is unreachable.  Although
221	// we're unsealing using the recovery keys, this is still an
222	// autounseal, so it should fail.
223	tss.EnsureCoresSealed(t)
224	unsealMigrate(t, client, recoveryKeys, false)
225	tss.UnsealCores(t)
226	testhelpers.WaitForActiveNode(t, tss.TestCluster)
227
228	// Unseal and migrate to Shamir. Although we're unsealing using the
229	// recovery keys, this is still an autounseal.
230	unsealMigrate(t, client, recoveryKeys, true)
231	testhelpers.WaitForActiveNode(t, cluster)
232
233	// Wait for migration to finish.  Sadly there is no callback, and the
234	// test will fail later on if we don't do this.
235	time.Sleep(10 * time.Second)
236
237	// Read the secret
238	secret, err := client.Logical().Read("kv-wrapped/foo")
239	if err != nil {
240		t.Fatal(err)
241	}
242	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
243		t.Fatal(diff)
244	}
245
246	// Write a new secret
247	_, err = leader.Client.Logical().Write("kv-wrapped/test", map[string]interface{}{
248		"zork": "quux",
249	})
250	if err != nil {
251		t.Fatal(err)
252	}
253
254	// Make sure the seal configs were updated correctly.
255	b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
256	if err != nil {
257		t.Fatal(err)
258	}
259	verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1)
260	if r != nil {
261		t.Fatalf("expected nil recovery config, got: %#v", r)
262	}
263
264	cluster.EnsureCoresSealed(t)
265}
266
267func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte) func() vault.Seal {
268	baseClusterPort := basePort + 10
269
270	conf := vault.CoreConfig{
271		DisableAutopilot: true,
272	}
273	opts := vault.TestClusterOptions{
274		Logger:                logger.Named("migrateFromShamirToTransit"),
275		HandlerFunc:           http.Handler,
276		NumCores:              numTestCores,
277		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
278		BaseClusterListenPort: baseClusterPort,
279		SkipInit:              true,
280		// N.B. Providing a transit seal puts us in migration mode.
281		SealFunc: func() vault.Seal {
282			return tss.MakeSeal(t, "transit-seal-key")
283		},
284	}
285	storage.Setup(&conf, &opts)
286	cluster := vault.NewTestCluster(t, &conf, &opts)
287	cluster.Start()
288	defer func() {
289		cluster.Cleanup()
290		storage.Cleanup(t, cluster)
291	}()
292
293	leader := cluster.Cores[0]
294	leader.Client.SetToken(rootToken)
295
296	// Unseal and migrate to Transit.
297	unsealMigrate(t, leader.Client, recoveryKeys, true)
298
299	// Wait for migration to finish.
300	awaitMigration(t, leader.Client)
301
302	verifySealConfigTransit(t, leader)
303
304	// Read the secrets
305	secret, err := leader.Client.Logical().Read("kv-wrapped/foo")
306	if err != nil {
307		t.Fatal(err)
308	}
309	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
310		t.Fatal(diff)
311	}
312
313	// Write a new secret
314	_, err = leader.Client.Logical().Write("kv-wrapped/test", map[string]interface{}{
315		"zork": "quux",
316	})
317	if err != nil {
318		t.Fatal(err)
319	}
320
321	return opts.SealFunc
322}
323
324func validateMigration(t *testing.T, storage teststorage.ReusableStorage,
325	cluster *vault.TestCluster, leaderIdx int, f func(t *testing.T, core *vault.TestClusterCore)) {
326	t.Helper()
327
328	leader := cluster.Cores[leaderIdx]
329
330	secret, err := leader.Client.Logical().Read("kv-wrapped/foo")
331	if err != nil {
332		t.Fatal(err)
333	}
334	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
335		t.Fatal(diff)
336	}
337
338	var appliedIndex uint64
339	if storage.IsRaft {
340		appliedIndex = testhelpers.RaftAppliedIndex(leader)
341	}
342
343	for _, core := range cluster.Cores {
344		if storage.IsRaft {
345			testhelpers.WaitForRaftApply(t, core, appliedIndex)
346		}
347
348		f(t, core)
349	}
350}
351
352func migratePost14(t *testing.T, storage teststorage.ReusableStorage, cluster *vault.TestCluster,
353	opts *vault.TestClusterOptions, unsealKeys [][]byte) int {
354
355	cluster.Logger = cluster.Logger.Named("migration")
356	// Restart each follower with the new config, and migrate.
357	for i := 1; i < len(cluster.Cores); i++ {
358		cluster.StopCore(t, i)
359		if storage.IsRaft {
360			teststorage.CloseRaftStorage(t, cluster, i)
361		}
362		cluster.StartCore(t, i, opts)
363
364		unsealMigrate(t, cluster.Cores[i].Client, unsealKeys, true)
365	}
366	testhelpers.WaitForActiveNodeAndStandbys(t, cluster)
367
368	// Step down the active node which will kick off the migration on one of the
369	// other nodes.
370	err := cluster.Cores[0].Client.Sys().StepDown()
371	if err != nil {
372		t.Fatal(err)
373	}
374
375	// Wait for the followers to establish a new leader
376	var leaderIdx int
377	for i := 0; i < 30; i++ {
378		leaderIdx, err = testhelpers.AwaitLeader(t, cluster)
379		if err != nil {
380			t.Fatal(err)
381		}
382		if leaderIdx != 0 {
383			break
384		}
385		time.Sleep(1 * time.Second)
386	}
387	if leaderIdx == 0 {
388		t.Fatalf("Core 0 cannot be the leader right now")
389	}
390	leader := cluster.Cores[leaderIdx]
391
392	// Wait for migration to occur on the leader
393	awaitMigration(t, leader.Client)
394
395	var appliedIndex uint64
396	if storage.IsRaft {
397		appliedIndex = testhelpers.RaftAppliedIndex(leader)
398		testhelpers.WaitForRaftApply(t, cluster.Cores[0], appliedIndex)
399	}
400
401	// Bring down the leader
402	cluster.StopCore(t, 0)
403	if storage.IsRaft {
404		teststorage.CloseRaftStorage(t, cluster, 0)
405	}
406
407	// Bring core 0 back up; we still have the seal migration config in place,
408	// but now that migration has been performed we should be able to unseal
409	// with the new seal and without using the `migrate` unseal option.
410	cluster.StartCore(t, 0, opts)
411	unseal(t, cluster.Cores[0].Client, unsealKeys)
412
413	// Write a new secret
414	_, err = leader.Client.Logical().Write("kv-wrapped/test", map[string]interface{}{
415		"zork": "quux",
416	})
417	if err != nil {
418		t.Fatal(err)
419	}
420
421	return leaderIdx
422}
423
424func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) {
425	t.Helper()
426	if err := attemptUnseal(client, keys); err == nil {
427		t.Fatal("expected error due to lack of migrate parameter")
428	}
429	if err := attemptUnsealMigrate(client, keys, transitServerAvailable); err != nil {
430		t.Fatal(err)
431	}
432}
433
434func attemptUnsealMigrate(client *api.Client, keys [][]byte, transitServerAvailable bool) error {
435	for i, key := range keys {
436		resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
437			Key:     base64.StdEncoding.EncodeToString(key),
438			Migrate: true,
439		})
440
441		if i < keyThreshold-1 {
442			// Not enough keys have been provided yet.
443			if err != nil {
444				return err
445			}
446		} else {
447			if transitServerAvailable {
448				// The transit server is running.
449				if err != nil {
450					return err
451				}
452				if resp == nil || resp.Sealed {
453					return fmt.Errorf("expected unsealed state; got %#v", resp)
454				}
455			} else {
456				// The transit server is stopped.
457				if err == nil {
458					return fmt.Errorf("expected error due to transit server being stopped.")
459				}
460			}
461			break
462		}
463	}
464	return nil
465}
466
467// awaitMigration waits for migration to finish.
468func awaitMigration(t *testing.T, client *api.Client) {
469	timeout := time.Now().Add(60 * time.Second)
470	for {
471		if time.Now().After(timeout) {
472			break
473		}
474
475		resp, err := client.Sys().SealStatus()
476		if err != nil {
477			t.Fatal(err)
478		}
479		if !resp.Migration {
480			return
481		}
482
483		time.Sleep(time.Second)
484	}
485
486	t.Fatalf("migration did not complete.")
487}
488
489func unseal(t *testing.T, client *api.Client, keys [][]byte) {
490	t.Helper()
491	if err := attemptUnseal(client, keys); err != nil {
492		t.Fatal(err)
493	}
494}
495
496func attemptUnseal(client *api.Client, keys [][]byte) error {
497	for i, key := range keys {
498
499		resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
500			Key: base64.StdEncoding.EncodeToString(key),
501		})
502		if i < keyThreshold-1 {
503			// Not enough keys have been provided yet.
504			if err != nil {
505				return err
506			}
507		} else {
508			if err != nil {
509				return err
510			}
511			if resp == nil || resp.Sealed {
512				return fmt.Errorf("expected unsealed state; got %#v", resp)
513			}
514			break
515		}
516	}
517	return nil
518}
519
520func verifySealConfigShamir(t *testing.T, core *vault.TestClusterCore) {
521	t.Helper()
522	b, r, err := core.PhysicalSealConfigs(context.Background())
523	if err != nil {
524		t.Fatal(err)
525	}
526	verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1)
527	if r != nil {
528		t.Fatal("should not have recovery config for shamir")
529	}
530}
531
532func verifySealConfigTransit(t *testing.T, core *vault.TestClusterCore) {
533	t.Helper()
534	b, r, err := core.PhysicalSealConfigs(context.Background())
535	if err != nil {
536		t.Fatal(err)
537	}
538	verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
539	verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0)
540}
541
542// verifyBarrierConfig verifies that a barrier configuration is correct.
543func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) {
544	t.Helper()
545	if cfg.Type != sealType {
546		t.Fatalf("bad seal config: %#v, expected type=%q", cfg, sealType)
547	}
548	if cfg.SecretShares != shares {
549		t.Fatalf("bad seal config: %#v, expected SecretShares=%d", cfg, shares)
550	}
551	if cfg.SecretThreshold != threshold {
552		t.Fatalf("bad seal config: %#v, expected SecretThreshold=%d", cfg, threshold)
553	}
554	if cfg.StoredShares != stored {
555		t.Fatalf("bad seal config: %#v, expected StoredShares=%d", cfg, stored)
556	}
557}
558
559// initializeShamir initializes a brand new backend storage with Shamir.
560func initializeShamir(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) (*vault.TestCluster, *vault.TestClusterOptions) {
561	t.Helper()
562
563	baseClusterPort := basePort + 10
564
565	// Start the cluster
566	conf := vault.CoreConfig{
567		DisableAutopilot: true,
568	}
569	opts := vault.TestClusterOptions{
570		Logger:                logger.Named("initializeShamir"),
571		HandlerFunc:           http.Handler,
572		NumCores:              numTestCores,
573		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
574		BaseClusterListenPort: baseClusterPort,
575	}
576	storage.Setup(&conf, &opts)
577	cluster := vault.NewTestCluster(t, &conf, &opts)
578	cluster.Start()
579
580	leader := cluster.Cores[0]
581	client := leader.Client
582
583	// Unseal
584	if storage.IsRaft {
585		joinRaftFollowers(t, cluster, false)
586		if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
587			t.Fatal(err)
588		}
589	} else {
590		cluster.UnsealCores(t)
591	}
592	testhelpers.WaitForActiveNodeAndStandbys(t, cluster)
593
594	err := client.Sys().Mount("kv-wrapped", &api.MountInput{
595		SealWrap: true,
596		Type:     "kv",
597	})
598	if err != nil {
599		t.Fatal(err)
600	}
601
602	// Write a secret that we will read back out later.
603	_, err = client.Logical().Write("kv-wrapped/foo", map[string]interface{}{
604		"zork": "quux",
605	})
606	if err != nil {
607		t.Fatal(err)
608	}
609
610	return cluster, &opts
611}
612
613// runShamir uses a pre-populated backend storage with Shamir.
614func runShamir(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, rootToken string, barrierKeys [][]byte) {
615	t.Helper()
616	baseClusterPort := basePort + 10
617
618	// Start the cluster
619	conf := vault.CoreConfig{
620		DisableAutopilot: true,
621	}
622	opts := vault.TestClusterOptions{
623		Logger:                logger.Named("runShamir"),
624		HandlerFunc:           http.Handler,
625		NumCores:              numTestCores,
626		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
627		BaseClusterListenPort: baseClusterPort,
628		SkipInit:              true,
629	}
630	storage.Setup(&conf, &opts)
631	cluster := vault.NewTestCluster(t, &conf, &opts)
632	cluster.Start()
633	defer func() {
634		cluster.Cleanup()
635		storage.Cleanup(t, cluster)
636	}()
637
638	leader := cluster.Cores[0]
639
640	// Unseal
641	cluster.BarrierKeys = barrierKeys
642	if storage.IsRaft {
643		for _, core := range cluster.Cores {
644			cluster.UnsealCore(t, core)
645		}
646		// This is apparently necessary for the raft cluster to get itself
647		// situated.
648		time.Sleep(15 * time.Second)
649		if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
650			t.Fatal(err)
651		}
652	} else {
653		cluster.UnsealCores(t)
654	}
655	testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
656
657	// Ensure that we always use the leader's client for this read check
658	leaderIdx, err := testhelpers.AwaitLeader(t, cluster)
659	if err != nil {
660		t.Fatal(err)
661	}
662	client := cluster.Cores[leaderIdx].Client
663	client.SetToken(rootToken)
664
665	// Read the secrets
666	secret, err := client.Logical().Read("kv-wrapped/foo")
667	if err != nil {
668		t.Fatal(err)
669	}
670	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
671		t.Fatal(diff)
672	}
673	secret, err = client.Logical().Read("kv-wrapped/test")
674	if err != nil {
675		t.Fatal(err)
676	}
677	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
678		t.Fatal(diff)
679	}
680}
681
682// initializeTransit initializes a brand new backend storage with Transit.
683func InitializeTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int,
684	tss *sealhelper.TransitSealServer, sealKeyName string) (*vault.TestCluster, *vault.TestClusterOptions) {
685	t.Helper()
686
687	baseClusterPort := basePort + 10
688
689	// Start the cluster
690	conf := vault.CoreConfig{
691		DisableAutopilot: true,
692	}
693	opts := vault.TestClusterOptions{
694		Logger:                logger.Named("initializeTransit"),
695		HandlerFunc:           http.Handler,
696		NumCores:              numTestCores,
697		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
698		BaseClusterListenPort: baseClusterPort,
699		SealFunc: func() vault.Seal {
700			return tss.MakeSeal(t, sealKeyName)
701		},
702	}
703	storage.Setup(&conf, &opts)
704	cluster := vault.NewTestCluster(t, &conf, &opts)
705	cluster.Start()
706
707	leader := cluster.Cores[0]
708	client := leader.Client
709
710	// Join raft
711	if storage.IsRaft {
712		joinRaftFollowers(t, cluster, true)
713
714		if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
715			t.Fatal(err)
716		}
717	}
718	testhelpers.WaitForActiveNodeAndStandbys(t, cluster)
719
720	err := client.Sys().Mount("kv-wrapped", &api.MountInput{
721		SealWrap: true,
722		Type:     "kv",
723	})
724	if err != nil {
725		t.Fatal(err)
726	}
727
728	// Write a secret that we will read back out later.
729	_, err = client.Logical().Write("kv-wrapped/foo", map[string]interface{}{
730		"zork": "quux",
731	})
732	if err != nil {
733		t.Fatal(err)
734	}
735
736	return cluster, &opts
737}
738
739func runAutoseal(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage,
740	basePort int, rootToken string, sealFunc func() vault.Seal) {
741
742	baseClusterPort := basePort + 10
743
744	// Start the cluster
745	conf := vault.CoreConfig{
746		DisableAutopilot: true,
747	}
748	opts := vault.TestClusterOptions{
749		Logger:                logger.Named("runTransit"),
750		HandlerFunc:           http.Handler,
751		NumCores:              numTestCores,
752		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
753		BaseClusterListenPort: baseClusterPort,
754		SkipInit:              true,
755		SealFunc:              sealFunc,
756	}
757	storage.Setup(&conf, &opts)
758	cluster := vault.NewTestCluster(t, &conf, &opts)
759	cluster.Start()
760	defer func() {
761		cluster.Cleanup()
762		storage.Cleanup(t, cluster)
763	}()
764
765	for _, c := range cluster.Cores {
766		c.Client.SetToken(rootToken)
767	}
768
769	// Unseal.  Even though we are using autounseal, we have to unseal
770	// explicitly because we are using SkipInit.
771	if storage.IsRaft {
772		for _, core := range cluster.Cores {
773			cluster.UnsealCoreWithStoredKeys(t, core)
774		}
775		// This is apparently necessary for the raft cluster to get itself
776		// situated.
777		time.Sleep(15 * time.Second)
778		// We're taking the first core, but we're not assuming it's the leader here.
779		if err := testhelpers.VerifyRaftConfiguration(cluster.Cores[0], len(cluster.Cores)); err != nil {
780			t.Fatal(err)
781		}
782	} else {
783		if err := cluster.UnsealCoresWithError(true); err != nil {
784			t.Fatal(err)
785		}
786	}
787	testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
788
789	// Preceding code may have stepped down the leader, so we're not sure who it is
790	// at this point.
791	leaderCore := testhelpers.DeriveActiveCore(t, cluster)
792	client := leaderCore.Client
793
794	// Read the secrets
795	secret, err := client.Logical().Read("kv-wrapped/foo")
796	if err != nil {
797		t.Fatal(err)
798	}
799	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
800		t.Fatal(diff)
801	}
802	secret, err = client.Logical().Read("kv-wrapped/test")
803	if err != nil {
804		t.Fatal(err)
805	}
806	if secret == nil {
807		t.Fatal("secret is nil")
808	}
809	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
810		t.Fatal(diff)
811	}
812}
813
814// joinRaftFollowers unseals the leader, and then joins-and-unseals the
815// followers one at a time.  We assume that the ServerAddressProvider has
816// already been installed on all the nodes.
817func joinRaftFollowers(t *testing.T, cluster *vault.TestCluster, useStoredKeys bool) {
818	leader := cluster.Cores[0]
819
820	cluster.UnsealCore(t, leader)
821	vault.TestWaitActive(t, leader.Core)
822
823	leaderInfos := []*raft.LeaderJoinInfo{
824		{
825			LeaderAPIAddr: leader.Client.Address(),
826			TLSConfig:     leader.TLSConfig,
827		},
828	}
829
830	// Join followers
831	for i := 1; i < len(cluster.Cores); i++ {
832		core := cluster.Cores[i]
833		_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
834		if err != nil {
835			t.Fatal(err)
836		}
837
838		if useStoredKeys {
839			// For autounseal, the raft backend is not initialized right away
840			// after the join.  We need to wait briefly before we can unseal.
841			awaitUnsealWithStoredKeys(t, core)
842		} else {
843			cluster.UnsealCore(t, core)
844		}
845	}
846
847	testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
848}
849
850func awaitUnsealWithStoredKeys(t *testing.T, core *vault.TestClusterCore) {
851	timeout := time.Now().Add(30 * time.Second)
852	for {
853		if time.Now().After(timeout) {
854			t.Fatal("raft join: timeout waiting for core to unseal")
855		}
856		// Its actually ok for an error to happen here the first couple of
857		// times -- it means the raft join hasn't gotten around to initializing
858		// the backend yet.
859		err := core.UnsealWithStoredKeys(context.Background())
860		if err == nil {
861			return
862		}
863		core.Logger().Warn("raft join: failed to unseal core", "error", err)
864		time.Sleep(time.Second)
865	}
866}
867