Skip to content

Commit

Permalink
Test Shamir-to-Transit and Transit-to-Shamir Seal Migration for post-…
Browse files Browse the repository at this point in the history
…1.4 Vault. (#9214)

* move adjustForSealMigration to vault package

* fix adjustForSealMigration

* begin working on new seal migration test

* create shamir seal migration test

* refactor testhelpers

* add VerifyRaftConfiguration to testhelpers

* stub out TestTransit

* Revert "refactor testhelpers"

This reverts commit 39593defd0d4c6fd79aedfd37df6298391abb9db.

* get shamir test working again

* stub out transit join

* work on transit join

* remove debug code

* initTransit now works with raft join

* runTransit works with inmem

* work on runTransit with raft

* runTransit works with raft

* cleanup tests

* TestSealMigration_TransitToShamir_Pre14

* TestSealMigration_ShamirToTransit_Pre14

* split for pre-1.4 testing

* add simple tests for transit and shamir

* fix typo in test suite

* debug wrapper type

* test debug

* test-debug

* refactor core migration

* Revert "refactor core migration"

This reverts commit a776452.

* begin refactor of adjustForSealMigration

* fix bug in adjustForSealMigration

* clean up tests

* clean up core refactoring

* fix bug in shamir->transit migration

* stub out test that brings individual nodes up and down

* refactor NewTestCluster

* pass listeners into newCore()

* simplify cluster address setup

* simplify extra test core setup

* refactor TestCluster for readability

* refactor TestCluster for readability

* refactor TestCluster for readability

* add shutdown func to TestCore

* add cleanup func to TestCore

* create RestartCore

* stub out TestSealMigration_ShamirToTransit_Post14

* refactor address handling in NewTestCluster

* fix listener setup in newCore()

* remove unnecessary lock from setSealsForMigration()

* rename sealmigration test package

* use ephemeral ports below 30000

* work on post-1.4 migration testing

* clean up pre-1.4 test

* TestSealMigration_ShamirToTransit_Post14 works for non-raft

* work on raft TestSealMigration_ShamirToTransit_Post14

* clean up test code

* refactor TestClusterCore

* clean up TestClusterCore

* stub out some temporary tests

* use HardcodedServerAddressProvider in seal migration tests

* work on raft for TestSealMigration_ShamirToTransit_Post14

* always use hardcoded raft address provider in seal migration tests

* debug TestSealMigration_ShamirToTransit_Post14

* fix bug in RestartCore

* remove debug code

* TestSealMigration_ShamirToTransit_Post14 works now

* clean up debug code

* clean up tests

* cleanup tests

* refactor test code

* stub out TestSealMigration_TransitToShamir_Post14

* set seals properly for transit->shamir migration

* migrateFromTransitToShamir_Post14 works for inmem

* migrateFromTransitToShamir_Post14 works for raft

* use base ports per-test

* fix seal verification test code

* simplify seal migration test suite

* simplify test suite

* cleanup test suite

* use explicit ports below 30000

* simplify use of numTestCores

* Update vault/external_tests/sealmigration/seal_migration_test.go

Co-authored-by: Calvin Leung Huang <[email protected]>

* Update vault/external_tests/sealmigration/seal_migration_test.go

Co-authored-by: Calvin Leung Huang <[email protected]>

* clean up imports

* rename to StartCore()

* Update vault/testing.go

Co-authored-by: Calvin Leung Huang <[email protected]>

* simplify test suite

* clean up tests

Co-authored-by: Calvin Leung Huang <[email protected]>
  • Loading branch information
2 people authored and andaley committed Jul 17, 2020
1 parent c49c6a6 commit 2127461
Show file tree
Hide file tree
Showing 6 changed files with 955 additions and 485 deletions.
86 changes: 34 additions & 52 deletions helper/testhelpers/testhelpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -412,16 +412,9 @@ func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib
}

func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
raftClusterJoinNodes(t, cluster, false)
}

func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) {
raftClusterJoinNodes(t, cluster, true)
}

func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) {

addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}

atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1)

leader := cluster.Cores[0]
Expand All @@ -430,11 +423,7 @@ func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys
{
EnsureCoreSealed(t, leader)
leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
if useStoredKeys {
cluster.UnsealCoreWithStoredKeys(t, leader)
} else {
cluster.UnsealCore(t, leader)
}
cluster.UnsealCore(t, leader)
vault.TestWaitActive(t, leader.Core)
}

Expand All @@ -454,37 +443,12 @@ func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys
t.Fatal(err)
}

if useStoredKeys {
// For autounseal, the raft backend is not initialized right away
// after the join. We need to wait briefly before we can unseal.
awaitUnsealWithStoredKeys(t, core)
} else {
cluster.UnsealCore(t, core)
}
cluster.UnsealCore(t, core)
}

WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
}

func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) {

timeout := time.Now().Add(30 * time.Second)
for {
if time.Now().After(timeout) {
t.Fatal("raft join: timeout waiting for core to unseal")
}
// Its actually ok for an error to happen here the first couple of
// times -- it means the raft join hasn't gotten around to initializing
// the backend yet.
err := core.UnsealWithStoredKeys(context.Background())
if err == nil {
return
}
core.Logger().Warn("raft join: failed to unseal core", "error", err)
time.Sleep(time.Second)
}
}

// HardcodedServerAddressProvider is a ServerAddressProvider that uses
// a hardcoded map of raft node addresses.
//
Expand All @@ -505,11 +469,11 @@ func (p *HardcodedServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftli

// NewHardcodedServerAddressProvider is a convenience function that makes a
// ServerAddressProvider from a given cluster address base port.
func NewHardcodedServerAddressProvider(cluster *vault.TestCluster, baseClusterPort int) raftlib.ServerAddressProvider {
func NewHardcodedServerAddressProvider(numCores, baseClusterPort int) raftlib.ServerAddressProvider {

entries := make(map[raftlib.ServerID]raftlib.ServerAddress)

for i := 0; i < len(cluster.Cores); i++ {
for i := 0; i < numCores; i++ {
id := fmt.Sprintf("core-%d", i)
addr := fmt.Sprintf("127.0.0.1:%d", baseClusterPort+i)
entries[raftlib.ServerID(id)] = raftlib.ServerAddress(addr)
Expand All @@ -520,17 +484,6 @@ func NewHardcodedServerAddressProvider(cluster *vault.TestCluster, baseClusterPo
}
}

// SetRaftAddressProviders sets a ServerAddressProvider for all the nodes in a
// cluster.
func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider raftlib.ServerAddressProvider) {

atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1)

for _, core := range cluster.Cores {
core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(provider)
}
}

// VerifyRaftConfiguration checks that we have a valid raft configuration, i.e.
// the correct number of servers, having the correct NodeIDs, and exactly one
// leader.
Expand Down Expand Up @@ -565,6 +518,35 @@ func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error {
return nil
}

// AwaitLeader waits for one of the cluster's nodes to become leader.
func AwaitLeader(t testing.T, cluster *vault.TestCluster) (int, error) {

timeout := time.Now().Add(30 * time.Second)
for {
if time.Now().After(timeout) {
break
}

for i, core := range cluster.Cores {
if core.Core.Sealed() {
continue
}

isLeader, _, _, err := core.Leader()
if err != nil {
t.Fatal(err)
}
if isLeader {
return i, nil
}
}

time.Sleep(time.Second)
}

return 0, fmt.Errorf("timeout waiting leader")
}

func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} {
t.Helper()

Expand Down
30 changes: 22 additions & 8 deletions helper/testhelpers/teststorage/teststorage_reusable.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"github.com/mitchellh/go-testing-interface"

hclog "github.com/hashicorp/go-hclog"
raftlib "github.com/hashicorp/raft"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/vault"
)
Expand Down Expand Up @@ -73,7 +74,10 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica

// MakeReusableRaftStorage makes a physical raft backend that can be re-used
// across multiple test clusters in sequence.
func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (ReusableStorage, StorageCleanup) {
func MakeReusableRaftStorage(
t testing.T, logger hclog.Logger, numCores int,
addressProvider raftlib.ServerAddressProvider,
) (ReusableStorage, StorageCleanup) {

raftDirs := make([]string, numCores)
for i := 0; i < numCores; i++ {
Expand All @@ -87,17 +91,14 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (Re
conf.DisablePerformanceStandby = true
opts.KeepStandbysSealed = true
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger) *vault.PhysicalBackendBundle {
return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx])
return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], addressProvider)
}
},

// Close open files being used by raft.
Cleanup: func(t testing.T, cluster *vault.TestCluster) {
for _, core := range cluster.Cores {
raftStorage := core.UnderlyingRawStorage.(*raft.RaftBackend)
if err := raftStorage.Close(); err != nil {
t.Fatal(err)
}
for i := 0; i < len(cluster.Cores); i++ {
CloseRaftStorage(t, cluster, i)
}
},
}
Expand All @@ -111,6 +112,14 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (Re
return storage, cleanup
}

// CloseRaftStorage closes open files being used by raft.
func CloseRaftStorage(t testing.T, cluster *vault.TestCluster, idx int) {
raftStorage := cluster.Cores[idx].UnderlyingRawStorage.(*raft.RaftBackend)
if err := raftStorage.Close(); err != nil {
t.Fatal(err)
}
}

func makeRaftDir(t testing.T) string {
raftDir, err := ioutil.TempDir("", "vault-raft-")
if err != nil {
Expand All @@ -120,7 +129,10 @@ func makeRaftDir(t testing.T) string {
return raftDir
}

func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string) *vault.PhysicalBackendBundle {
func makeReusableRaftBackend(
t testing.T, coreIdx int, logger hclog.Logger, raftDir string,
addressProvider raftlib.ServerAddressProvider,
) *vault.PhysicalBackendBundle {

nodeID := fmt.Sprintf("core-%d", coreIdx)
conf := map[string]string{
Expand All @@ -134,6 +146,8 @@ func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raft
t.Fatal(err)
}

backend.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)

return &vault.PhysicalBackendBundle{
Backend: backend,
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
func TestSealMigration_TransitToShamir_Pre14(t *testing.T) {
// Note that we do not test integrated raft storage since this is
// a pre-1.4 test.
testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false)
testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, basePort_TransitToShamir_Pre14, false)
}

func testSealMigrationTransitToShamir_Pre14(
Expand All @@ -42,7 +42,11 @@ func testSealMigrationTransitToShamir_Pre14(
tss.MakeKey(t, "transit-seal-key")

// Initialize the backend with transit.
rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss)
cluster, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss)
rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys
cluster.EnsureCoresSealed(t)
storage.Cleanup(t, cluster)
cluster.Cleanup()

// Migrate the backend from transit to shamir
migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys)
Expand Down
Loading

0 comments on commit 2127461

Please sign in to comment.