Skip to content

Commit

Permalink
Test pre-1.4 seal migration (#9085)
Browse files Browse the repository at this point in the history
* enable seal wrap in all seal migration tests

* move adjustForSealMigration to vault package

* fix adjustForSealMigration

* begin working on new seal migration test

* create shamir seal migration test

* refactor testhelpers

* add VerifyRaftConfiguration to testhelpers

* stub out TestTransit

* Revert "refactor testhelpers"

This reverts commit 39593defd0d4c6fd79aedfd37df6298391abb9db.

* get shamir test working again

* stub out transit join

* work on transit join

* Revert "move resuable storage test to avoid creating import cycle"

This reverts commit b3ff231.

* remove debug code

* initTransit now works with raft join

* runTransit works with inmem

* work on runTransit with raft

* runTransit works with raft

* get rid of dis-used test

* cleanup tests

* TestSealMigration_TransitToShamir_Pre14

* TestSealMigration_ShamirToTransit_Pre14

* split for pre-1.4 testing

* add simple tests for transit and shamir

* fix typo in test suite

* debug wrapper type

* test debug

* test-debug

* refactor core migration

* Revert "refactor core migration"

This reverts commit a776452.

* begin refactor of adjustForSealMigration

* fix bug in adjustForSealMigration

* clean up tests

* clean up core refactoring

* fix bug in shamir->transit migration

* remove unnecessary lock from setSealsForMigration()

* rename sealmigration test package

* use ephemeral ports below 30000

* simplify use of numTestCores
  • Loading branch information
mjarmy authored and andaley committed Jul 17, 2020
1 parent d6e09db commit 680a23d
Show file tree
Hide file tree
Showing 9 changed files with 880 additions and 1,111 deletions.
750 changes: 0 additions & 750 deletions command/seal_migration_test.go

This file was deleted.

7 changes: 1 addition & 6 deletions command/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -1120,6 +1120,7 @@ func (c *ServerCommand) Run(args []string) int {
HAPhysical: nil,
ServiceRegistration: configSR,
Seal: barrierSeal,
UnwrapSeal: unwrapSeal,
AuditBackends: c.AuditBackends,
CredentialBackends: c.CredentialBackends,
LogicalBackends: c.LogicalBackends,
Expand Down Expand Up @@ -1528,12 +1529,6 @@ CLUSTER_SYNTHESIS_COMPLETE:
Core: core,
}))

// Before unsealing with stored keys, setup seal migration if needed
if err := adjustCoreForSealMigration(c.logger, core, barrierSeal, unwrapSeal); err != nil {
c.UI.Error(err.Error())
return 1
}

// Attempt unsealing in a background goroutine. This is needed for when a
// Vault cluster with multiple servers is configured with auto-unseal but is
// uninitialized. Once one server initializes the storage backend, this
Expand Down
111 changes: 0 additions & 111 deletions command/server_util.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,8 @@
package command

import (
"context"
"fmt"

log "github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/vault"
vaultseal "github.com/hashicorp/vault/vault/seal"
"github.com/pkg/errors"
)

var (
Expand All @@ -19,106 +11,3 @@ var (

func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) {
}

func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal, unwrapSeal vault.Seal) error {
existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background())
if err != nil {
return fmt.Errorf("Error checking for existing seal: %s", err)
}

// If we don't have an existing config or if it's the deprecated auto seal
// which needs an upgrade, skip out
if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated {
return nil
}

if unwrapSeal == nil {
// We have the same barrier type and the unwrap seal is nil so we're not
// migrating from same to same, IOW we assume it's not a migration
if existBarrierSealConfig.Type == barrierSeal.BarrierType() {
return nil
}

// If we're not coming from Shamir, and the existing type doesn't match
// the barrier type, we need both the migration seal and the new seal
if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir {
return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`)
}
} else {
if unwrapSeal.BarrierType() == wrapping.Shamir {
return errors.New("Shamir seals cannot be set disabled (they should simply not be set)")
}
}

if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil {
return errors.New(`Recovery seal configuration not found for existing seal`)
}

var migrationSeal vault.Seal
var newSeal vault.Seal

// Determine the migrationSeal. This is either going to be an instance of
// shamir or the unwrapSeal.
switch existBarrierSealConfig.Type {
case wrapping.Shamir:
// The value reflected in config is what we're going to
migrationSeal = vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})

default:
// If we're not coming from Shamir we expect the previous seal to be
// in the config and disabled.
migrationSeal = unwrapSeal
}

// newSeal will be the barrierSeal
newSeal = barrierSeal

if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() {
return errors.New("Migrating between same seal types is currently not supported")
}

if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() {
// In this case our migration seal is set so we are using it
// (potentially) for unwrapping. Set it on core for that purpose then
// exit.
core.SetSealsForMigration(nil, nil, unwrapSeal)
return nil
}

// Set the appropriate barrier and recovery configs.
switch {
case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():
// Migrating from auto->auto, copy the configs over
newSeal.SetCachedBarrierConfig(existBarrierSealConfig)
newSeal.SetCachedRecoveryConfig(existRecoverySealConfig)
case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported():
// Migrating from auto->shamir, clone auto's recovery config and set
// stored keys to 1.
newSealConfig := existRecoverySealConfig.Clone()
newSealConfig.StoredShares = 1
newSeal.SetCachedBarrierConfig(newSealConfig)
case newSeal != nil && newSeal.RecoveryKeySupported():
// Migrating from shamir->auto, set a new barrier config and set
// recovery config to a clone of shamir's barrier config with stored
// keys set to 0.
newBarrierSealConfig := &vault.SealConfig{
Type: newSeal.BarrierType(),
SecretShares: 1,
SecretThreshold: 1,
StoredShares: 1,
}
newSeal.SetCachedBarrierConfig(newBarrierSealConfig)

newRecoveryConfig := existBarrierSealConfig.Clone()
newRecoveryConfig.StoredShares = 0
newSeal.SetCachedRecoveryConfig(newRecoveryConfig)
}

core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal)

return nil
}
99 changes: 85 additions & 14 deletions helper/testhelpers/testhelpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -412,42 +412,79 @@ func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib
}

func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
raftClusterJoinNodes(t, cluster, false)
}

func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) {
raftClusterJoinNodes(t, cluster, true)
}

func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) {

leaderCore := cluster.Cores[0]
leaderAPI := leaderCore.Client.Address()
addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1)

leader := cluster.Cores[0]

// Seal the leader so we can install an address provider
{
EnsureCoreSealed(t, leaderCore)
leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
cluster.UnsealCore(t, leaderCore)
vault.TestWaitActive(t, leaderCore.Core)
EnsureCoreSealed(t, leader)
leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
if useStoredKeys {
cluster.UnsealCoreWithStoredKeys(t, leader)
} else {
cluster.UnsealCore(t, leader)
}
vault.TestWaitActive(t, leader.Core)
}

leaderInfo := &raft.LeaderJoinInfo{
LeaderAPIAddr: leaderAPI,
TLSConfig: leaderCore.TLSConfig,
leaderInfos := []*raft.LeaderJoinInfo{
&raft.LeaderJoinInfo{
LeaderAPIAddr: leader.Client.Address(),
TLSConfig: leader.TLSConfig,
},
}

// Join followers
for i := 1; i < len(cluster.Cores); i++ {
core := cluster.Cores[i]
core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
leaderInfos := []*raft.LeaderJoinInfo{
leaderInfo,
}
_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
if err != nil {
t.Fatal(err)
}

cluster.UnsealCore(t, core)
if useStoredKeys {
// For autounseal, the raft backend is not initialized right away
// after the join. We need to wait briefly before we can unseal.
awaitUnsealWithStoredKeys(t, core)
} else {
cluster.UnsealCore(t, core)
}
}

WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
}

func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) {

timeout := time.Now().Add(30 * time.Second)
for {
if time.Now().After(timeout) {
t.Fatal("raft join: timeout waiting for core to unseal")
}
// Its actually ok for an error to happen here the first couple of
// times -- it means the raft join hasn't gotten around to initializing
// the backend yet.
err := core.UnsealWithStoredKeys(context.Background())
if err == nil {
return
}
core.Logger().Warn("raft join: failed to unseal core", "error", err)
time.Sleep(time.Second)
}
}

// HardcodedServerAddressProvider is a ServerAddressProvider that uses
// a hardcoded map of raft node addresses.
//
Expand Down Expand Up @@ -494,6 +531,40 @@ func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider r
}
}

// VerifyRaftConfiguration checks that we have a valid raft configuration, i.e.
// the correct number of servers, having the correct NodeIDs, and exactly one
// leader.
func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error {

backend := core.UnderlyingRawStorage.(*raft.RaftBackend)
ctx := namespace.RootContext(context.Background())
config, err := backend.GetConfiguration(ctx)
if err != nil {
return err
}

servers := config.Servers
if len(servers) != numCores {
return fmt.Errorf("Found %d servers, not %d", len(servers), numCores)
}

leaders := 0
for i, s := range servers {
if s.NodeID != fmt.Sprintf("core-%d", i) {
return fmt.Errorf("Found unexpected node ID %q", s.NodeID)
}
if s.Leader {
leaders++
}
}

if leaders != 1 {
return fmt.Errorf("Found %d leaders", leaders)
}

return nil
}

func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} {
t.Helper()

Expand Down
Loading

0 comments on commit 680a23d

Please sign in to comment.