From 7864437d2d83db1689bb7bd301a260198631fc73 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 16 Apr 2020 12:11:06 -0400 Subject: [PATCH 01/86] move adjustForSealMigration to vault package --- command/server.go | 56 +++++++++++++++++++++++------------------------ vault/core.go | 11 +++++----- 2 files changed, 33 insertions(+), 34 deletions(-) diff --git a/command/server.go b/command/server.go index 9d2a038af38d..15a1fbc15a0f 100644 --- a/command/server.go +++ b/command/server.go @@ -1359,6 +1359,34 @@ CLUSTER_SYNTHESIS_COMPLETE: } } + // Attempt unsealing in a background goroutine. This is needed for when a + // Vault cluster with multiple servers is configured with auto-unseal but is + // uninitialized. Once one server initializes the storage backend, this + // goroutine will pick up the unseal keys and unseal this instance. + if !core.IsInSealMigration() { + go func() { + for { + err := core.UnsealWithStoredKeys(context.Background()) + if err == nil { + return + } + + if vault.IsFatalError(err) { + c.logger.Error("error unsealing core", "error", err) + return + } else { + c.logger.Warn("failed to unseal core", "error", err) + } + + select { + case <-c.ShutdownCh: + return + case <-time.After(5 * time.Second): + } + } + }() + } + // Copy the reload funcs pointers back c.reloadFuncs = coreConfig.ReloadFuncs c.reloadFuncsLock = coreConfig.ReloadFuncsLock @@ -1529,34 +1557,6 @@ CLUSTER_SYNTHESIS_COMPLETE: Core: core, })) - // Attempt unsealing in a background goroutine. This is needed for when a - // Vault cluster with multiple servers is configured with auto-unseal but is - // uninitialized. Once one server initializes the storage backend, this - // goroutine will pick up the unseal keys and unseal this instance. - if !core.IsInSealMigration() { - go func() { - for { - err := core.UnsealWithStoredKeys(context.Background()) - if err == nil { - return - } - - if vault.IsFatalError(err) { - c.logger.Error("error unsealing core", "error", err) - return - } else { - c.logger.Warn("failed to unseal core", "error", err) - } - - select { - case <-c.ShutdownCh: - return - case <-time.After(5 * time.Second): - } - } - }() - } - // When the underlying storage is raft, kick off retry join if it was specified // in the configuration if config.Storage.Type == "raft" { diff --git a/vault/core.go b/vault/core.go index 98c0241d9dca..046d77154416 100644 --- a/vault/core.go +++ b/vault/core.go @@ -943,7 +943,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.clusterListener.Store((*cluster.Listener)(nil)) - err = c.adjustForSealMigration(conf.UnwrapSeal) + err = c.adjustForSealMigration(conf.Seal, conf.UnwrapSeal) if err != nil { return nil, err } @@ -2230,10 +2230,7 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi return barrierConf, recoveryConf, nil } -func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { - - barrierSeal := c.seal - +func (c *Core) adjustForSealMigration(barrierSeal, unwrapSeal Seal) error { existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) if err != nil { return fmt.Errorf("Error checking for existing seal: %s", err) @@ -2276,7 +2273,7 @@ func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { case wrapping.Shamir: // The value reflected in config is what we're going to migrationSeal = NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ Logger: c.logger.Named("shamir"), }), }) @@ -2337,6 +2334,8 @@ func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { } func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { + c.stateLock.Lock() + defer c.stateLock.Unlock() c.unwrapSeal = unwrapSeal if c.unwrapSeal != nil { c.unwrapSeal.SetCore(c) From 9360ddd1e16f916fd7f23e132282d3eae2947a54 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 16 Apr 2020 12:59:10 -0400 Subject: [PATCH 02/86] fix adjustForSealMigration --- command/server.go | 58 ++++++++++++++++++++++++----------------------- vault/core.go | 12 +++++----- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/command/server.go b/command/server.go index 15a1fbc15a0f..b45a53161a68 100644 --- a/command/server.go +++ b/command/server.go @@ -551,6 +551,8 @@ func (c *ServerCommand) runRecoveryMode() int { } } + //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + if err := core.InitializeRecovery(context.Background()); err != nil { c.UI.Error(fmt.Sprintf("Error initializing core in recovery mode: %s", err)) return 1 @@ -1359,34 +1361,6 @@ CLUSTER_SYNTHESIS_COMPLETE: } } - // Attempt unsealing in a background goroutine. This is needed for when a - // Vault cluster with multiple servers is configured with auto-unseal but is - // uninitialized. Once one server initializes the storage backend, this - // goroutine will pick up the unseal keys and unseal this instance. - if !core.IsInSealMigration() { - go func() { - for { - err := core.UnsealWithStoredKeys(context.Background()) - if err == nil { - return - } - - if vault.IsFatalError(err) { - c.logger.Error("error unsealing core", "error", err) - return - } else { - c.logger.Warn("failed to unseal core", "error", err) - } - - select { - case <-c.ShutdownCh: - return - case <-time.After(5 * time.Second): - } - } - }() - } - // Copy the reload funcs pointers back c.reloadFuncs = coreConfig.ReloadFuncs c.reloadFuncsLock = coreConfig.ReloadFuncsLock @@ -1557,6 +1531,34 @@ CLUSTER_SYNTHESIS_COMPLETE: Core: core, })) + // Attempt unsealing in a background goroutine. This is needed for when a + // Vault cluster with multiple servers is configured with auto-unseal but is + // uninitialized. Once one server initializes the storage backend, this + // goroutine will pick up the unseal keys and unseal this instance. + if !core.IsInSealMigration() { + go func() { + for { + err := core.UnsealWithStoredKeys(context.Background()) + if err == nil { + return + } + + if vault.IsFatalError(err) { + c.logger.Error("error unsealing core", "error", err) + return + } else { + c.logger.Warn("failed to unseal core", "error", err) + } + + select { + case <-c.ShutdownCh: + return + case <-time.After(5 * time.Second): + } + } + }() + } + // When the underlying storage is raft, kick off retry join if it was specified // in the configuration if config.Storage.Type == "raft" { diff --git a/vault/core.go b/vault/core.go index 046d77154416..942a4d45c9a7 100644 --- a/vault/core.go +++ b/vault/core.go @@ -943,7 +943,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.clusterListener.Store((*cluster.Listener)(nil)) - err = c.adjustForSealMigration(conf.Seal, conf.UnwrapSeal) + err = c.adjustForSealMigration(conf.UnwrapSeal) if err != nil { return nil, err } @@ -2230,7 +2230,7 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi return barrierConf, recoveryConf, nil } -func (c *Core) adjustForSealMigration(barrierSeal, unwrapSeal Seal) error { +func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) if err != nil { return fmt.Errorf("Error checking for existing seal: %s", err) @@ -2245,13 +2245,13 @@ func (c *Core) adjustForSealMigration(barrierSeal, unwrapSeal Seal) error { if unwrapSeal == nil { // We have the same barrier type and the unwrap seal is nil so we're not // migrating from same to same, IOW we assume it's not a migration - if existBarrierSealConfig.Type == barrierSeal.BarrierType() { + if existBarrierSealConfig.Type == c.seal.BarrierType() { return nil } // If we're not coming from Shamir, and the existing type doesn't match // the barrier type, we need both the migration seal and the new seal - if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir { + if existBarrierSealConfig.Type != wrapping.Shamir && c.seal.BarrierType() != wrapping.Shamir { return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) } } else { @@ -2285,13 +2285,13 @@ func (c *Core) adjustForSealMigration(barrierSeal, unwrapSeal Seal) error { } // newSeal will be the barrierSeal - newSeal = barrierSeal + newSeal = c.seal if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { return errors.New("Migrating between same seal types is currently not supported") } - if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() { + if unwrapSeal != nil && existBarrierSealConfig.Type == c.seal.BarrierType() { // In this case our migration seal is set so we are using it // (potentially) for unwrapping. Set it on core for that purpose then // exit. From 05a612c7f6b49fede6ff3c84f247fce5b85f4496 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 17 Apr 2020 09:30:36 -0400 Subject: [PATCH 03/86] begin working on new seal migration test --- command/server.go | 2 - .../seal_migration/seal_migration_test.go | 219 ++++++++++++++++++ 2 files changed, 219 insertions(+), 2 deletions(-) create mode 100644 vault/external_tests/seal_migration/seal_migration_test.go diff --git a/command/server.go b/command/server.go index b45a53161a68..9d2a038af38d 100644 --- a/command/server.go +++ b/command/server.go @@ -551,8 +551,6 @@ func (c *ServerCommand) runRecoveryMode() int { } } - //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - if err := core.InitializeRecovery(context.Background()); err != nil { c.UI.Error(fmt.Sprintf("Error initializing core in recovery mode: %s", err)) return 1 diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go new file mode 100644 index 000000000000..eabba66dad9c --- /dev/null +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -0,0 +1,219 @@ +package seal_migration + +import ( + "encoding/base64" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers" + sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal" + "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault" + vaultseal "github.com/hashicorp/vault/vault/seal" +) + +func TestSealMigration_TransitToShamir(t *testing.T) { + t.Parallel() + + t.Run("inmem", func(t *testing.T) { + t.Parallel() + + logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) + inm, err := inmem.NewTransactionalInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + testSealMigrationTransitToShamir(t, logger, inm) + }) + + ////t.Run("file", func(t *testing.T) { + //// t.Parallel() + //// testSealMigrationTransitToShamir(t, teststorage.FileBackendSetup) + ////}) + + ////t.Run("consul", func(t *testing.T) { + //// t.Parallel() + //// testSealMigrationTransitToShamir(t, teststorage.ConsulBackendSetup) + ////}) + + ////t.Run("raft", func(t *testing.T) { + //// t.Parallel() + //// testSealMigrationTransitToShamir(t, teststorage.RaftBackendSetup) + ////}) +} + +func testSealMigrationTransitToShamir(t *testing.T, logger log.Logger, backend physical.Backend) { + + var testEntry = map[string]interface{}{"bar": "quux"} + + // Create the transit server. + tss := sealhelper.NewTransitSealServer(t) + defer func() { + if tss != nil { + tss.Cleanup() + } + }() + + // Create a transit seal + tss.MakeKey(t, "key1") + transitSeal := tss.MakeSeal(t, "key1") + + // Create a cluster that uses transit + var rootToken string + var recoveryKeys [][]byte + { + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Physical: backend, + Logger: logger.Named("transit_cluster"), + Seal: transitSeal, + }, + &vault.TestClusterOptions{ + HandlerFunc: http.Handler, + NumCores: 1, + }) + cluster.Start() + defer cluster.Cleanup() + + // save the root token and recovery keys + client := cluster.Cores[0].Client + rootToken = client.Token() + recoveryKeys = cluster.RecoveryKeys + + // Write a secret that we will read back out later. + _, err := client.Logical().Write("secret/foo", testEntry) + if err != nil { + t.Fatal(err) + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) + } + + // Create a shamir seal + shamirSeal := vault.NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{}), + }) + + // Create a cluster that migrates from transit to shamir + { + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Physical: backend, + Logger: logger.Named("transit_to_shamir_cluster"), + Seal: shamirSeal, + // Setting an UnwrapSeal puts us in migration mode. This is the + // equivalent of doing the following in HCL: + // + // seal "transit" { + // // ... + // disabled = "true" + // } + // + UnwrapSeal: transitSeal, + }, + &vault.TestClusterOptions{ + HandlerFunc: http.Handler, + NumCores: 1, + SkipInit: true, + }) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + client.SetToken(rootToken) + + // Unseal and migrate to Shamir. Although we're unsealing using the + // recovery keys, this is still an autounseal; if we stopped the + // transit server this would fail. + var resp *api.SealStatusResponse + var err error + for _, key := range recoveryKeys { + strKey := base64.RawStdEncoding.EncodeToString(key) + + resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: strKey}) + if err == nil { + t.Fatal("expected error due to lack of migrate parameter") + } + + resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: strKey, Migrate: true}) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.Sealed { + break + } + } + if resp == nil || resp.Sealed { + t.Fatalf("expected unsealed state; got %#v", resp) + } + testhelpers.WaitForActiveNode(t, cluster) + + // Await migration to finish. Sadly there is no callback, and the test + // will fail later on if we don't do this. + // TODO maybe try to read? + time.Sleep(10 * time.Second) + + // Read our secret + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, testEntry); len(diff) > 0 { + t.Fatal(diff) + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) + } + + // Nuke the transit server + tss.EnsureCoresSealed(t) + tss.Cleanup() + tss = nil + + // Create a cluster that uses shamir + { + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Physical: backend, + Logger: logger.Named("shamir_cluster"), + Seal: shamirSeal, + }, + &vault.TestClusterOptions{ + HandlerFunc: http.Handler, + NumCores: 1, + SkipInit: true, + }) + cluster.Start() + defer cluster.Cleanup() + + // Note that the recovery keys are now the barrier keys. + cluster.BarrierKeys = recoveryKeys + testhelpers.EnsureCoresUnsealed(t, cluster) + + client := cluster.Cores[0].Client + client.SetToken(rootToken) + + // Read our secret + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, testEntry); len(diff) > 0 { + t.Fatal(diff) + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) + } +} From 28386e98fa1f0cf3c9b50f2f1147c26861cdcec6 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 12 May 2020 16:57:45 -0400 Subject: [PATCH 04/86] create shamir seal migration test --- .../seal_migration/seal_migration_test.go | 356 +++++++++--------- 1 file changed, 180 insertions(+), 176 deletions(-) diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index eabba66dad9c..da91cdedda6d 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -1,219 +1,223 @@ package seal_migration import ( - "encoding/base64" + "context" + "fmt" "testing" "time" "github.com/go-test/deep" + "github.com/hashicorp/go-hclog" - log "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping" - aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" - "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/testhelpers" - sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal" - "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/sdk/physical/inmem" "github.com/hashicorp/vault/vault" - vaultseal "github.com/hashicorp/vault/vault/seal" ) -func TestSealMigration_TransitToShamir(t *testing.T) { - t.Parallel() +func TestShamir(t *testing.T) { + testVariousBackends(t, testShamir) +} + +func testVariousBackends(t *testing.T, tf testFunc) { + + logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) t.Run("inmem", func(t *testing.T) { t.Parallel() - logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - inm, err := inmem.NewTransactionalInmemHA(nil, logger) - if err != nil { - t.Fatal(err) - } - testSealMigrationTransitToShamir(t, logger, inm) + logger := logger.Named("inmem") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeInmemBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 51000) + }) + + t.Run("file", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("file") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeFileBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 52000) + }) + + t.Run("consul", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) }) - ////t.Run("file", func(t *testing.T) { - //// t.Parallel() - //// testSealMigrationTransitToShamir(t, teststorage.FileBackendSetup) - ////}) + t.Run("raft", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("raft") + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger) + defer cleanup() + tf(t, logger, storage, 54000) + }) +} - ////t.Run("consul", func(t *testing.T) { - //// t.Parallel() - //// testSealMigrationTransitToShamir(t, teststorage.ConsulBackendSetup) - ////}) +type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) - ////t.Run("raft", func(t *testing.T) { - //// t.Parallel() - //// testSealMigrationTransitToShamir(t, teststorage.RaftBackendSetup) - ////}) +func testShamir( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + rootToken, keys := initializeShamir(t, logger, storage, basePort) + reuseShamir(t, logger, storage, basePort, rootToken, keys) } -func testSealMigrationTransitToShamir(t *testing.T, logger log.Logger, backend physical.Backend) { +// initializeShamir initializes a brand new backend storage with Shamir. +func initializeShamir( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) (string, [][]byte) { - var testEntry = map[string]interface{}{"bar": "quux"} + var baseClusterPort = basePort + 10 - // Create the transit server. - tss := sealhelper.NewTransitSealServer(t) + // Start the cluster + var conf = vault.CoreConfig{ + Logger: logger.Named("initializeShamir"), + } + var opts = vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), + BaseClusterListenPort: baseClusterPort, + } + storage.Setup(&conf, &opts) + cluster := vault.NewTestCluster(t, &conf, &opts) + cluster.Start() defer func() { - if tss != nil { - tss.Cleanup() - } + storage.Cleanup(t, cluster) + cluster.Cleanup() }() - // Create a transit seal - tss.MakeKey(t, "key1") - transitSeal := tss.MakeSeal(t, "key1") - - // Create a cluster that uses transit - var rootToken string - var recoveryKeys [][]byte - { - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Physical: backend, - Logger: logger.Named("transit_cluster"), - Seal: transitSeal, - }, - &vault.TestClusterOptions{ - HandlerFunc: http.Handler, - NumCores: 1, - }) - cluster.Start() - defer cluster.Cleanup() - - // save the root token and recovery keys - client := cluster.Cores[0].Client - rootToken = client.Token() - recoveryKeys = cluster.RecoveryKeys - - // Write a secret that we will read back out later. - _, err := client.Logical().Write("secret/foo", testEntry) - if err != nil { - t.Fatal(err) - } + leader := cluster.Cores[0] + client := leader.Client + + if storage.IsRaft { + // Join raft cluster + testhelpers.RaftClusterJoinNodes(t, cluster) + time.Sleep(15 * time.Second) + verifyRaftConfiguration(t, leader) + } else { + // Unseal + cluster.UnsealCores(t) + } - // Seal the cluster - cluster.EnsureCoresSealed(t) + // Wait until unsealed + testhelpers.WaitForNCoresUnsealed(t, cluster, vault.DefaultNumCores) + + // Write a secret that we will read back out later. + _, err := client.Logical().Write( + "secret/foo", + map[string]interface{}{"zork": "quux"}) + if err != nil { + t.Fatal(err) } - // Create a shamir seal - shamirSeal := vault.NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{}), - }) + // Seal the cluster + cluster.EnsureCoresSealed(t) - // Create a cluster that migrates from transit to shamir - { - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Physical: backend, - Logger: logger.Named("transit_to_shamir_cluster"), - Seal: shamirSeal, - // Setting an UnwrapSeal puts us in migration mode. This is the - // equivalent of doing the following in HCL: - // - // seal "transit" { - // // ... - // disabled = "true" - // } - // - UnwrapSeal: transitSeal, - }, - &vault.TestClusterOptions{ - HandlerFunc: http.Handler, - NumCores: 1, - SkipInit: true, - }) - cluster.Start() - defer cluster.Cleanup() - - client := cluster.Cores[0].Client - client.SetToken(rootToken) - - // Unseal and migrate to Shamir. Although we're unsealing using the - // recovery keys, this is still an autounseal; if we stopped the - // transit server this would fail. - var resp *api.SealStatusResponse - var err error - for _, key := range recoveryKeys { - strKey := base64.RawStdEncoding.EncodeToString(key) - - resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: strKey}) - if err == nil { - t.Fatal("expected error due to lack of migrate parameter") - } - - resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: strKey, Migrate: true}) - if err != nil { - t.Fatal(err) - } - if resp == nil || !resp.Sealed { - break - } - } - if resp == nil || resp.Sealed { - t.Fatalf("expected unsealed state; got %#v", resp) - } - testhelpers.WaitForActiveNode(t, cluster) + return cluster.RootToken, cluster.BarrierKeys +} - // Await migration to finish. Sadly there is no callback, and the test - // will fail later on if we don't do this. - // TODO maybe try to read? - time.Sleep(10 * time.Second) +// reuseShamir uses a pre-populated backend storage with Shamir. +func reuseShamir( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int, + rootToken string, keys [][]byte) { - // Read our secret - secret, err := client.Logical().Read("secret/foo") - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(secret.Data, testEntry); len(diff) > 0 { - t.Fatal(diff) - } + var baseClusterPort = basePort + 10 - // Seal the cluster - cluster.EnsureCoresSealed(t) + // Start the cluster + var conf = vault.CoreConfig{ + Logger: logger.Named("reuseShamir"), + } + var opts = vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), + BaseClusterListenPort: baseClusterPort, + SkipInit: true, } + storage.Setup(&conf, &opts) + cluster := vault.NewTestCluster(t, &conf, &opts) + cluster.Start() + defer func() { + storage.Cleanup(t, cluster) + cluster.Cleanup() + }() + + leader := cluster.Cores[0] + client := leader.Client + client.SetToken(rootToken) + + cluster.BarrierKeys = keys + if storage.IsRaft { + // Set hardcoded Raft address providers + provider := testhelpers.NewHardcodedServerAddressProvider(baseClusterPort) + testhelpers.SetRaftAddressProviders(t, cluster, provider) - // Nuke the transit server - tss.EnsureCoresSealed(t) - tss.Cleanup() - tss = nil - - // Create a cluster that uses shamir - { - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Physical: backend, - Logger: logger.Named("shamir_cluster"), - Seal: shamirSeal, - }, - &vault.TestClusterOptions{ - HandlerFunc: http.Handler, - NumCores: 1, - SkipInit: true, - }) - cluster.Start() - defer cluster.Cleanup() - - // Note that the recovery keys are now the barrier keys. - cluster.BarrierKeys = recoveryKeys - testhelpers.EnsureCoresUnsealed(t, cluster) - - client := cluster.Cores[0].Client - client.SetToken(rootToken) - - // Read our secret - secret, err := client.Logical().Read("secret/foo") - if err != nil { - t.Fatal(err) + // Unseal cores + for _, core := range cluster.Cores { + cluster.UnsealCore(t, core) } - if diff := deep.Equal(secret.Data, testEntry); len(diff) > 0 { + time.Sleep(15 * time.Second) + verifyRaftConfiguration(t, leader) + } else { + // Unseal + cluster.UnsealCores(t) + } + + // Wait until unsealed + testhelpers.WaitForNCoresUnsealed(t, cluster, vault.DefaultNumCores) + + // Read the secret + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + t.Fatal(diff) + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) +} + +func verifyRaftConfiguration(t *testing.T, core *vault.TestClusterCore) { + + backend := core.UnderlyingRawStorage.(*raft.RaftBackend) + ctx := namespace.RootContext(context.Background()) + config, err := backend.GetConfiguration(ctx) + if err != nil { + t.Fatal(err) + } + servers := config.Servers + + if len(servers) != vault.DefaultNumCores { + t.Fatalf("Found %d servers, not %d", len(servers), vault.DefaultNumCores) + } + + leaders := 0 + for i, s := range servers { + if diff := deep.Equal(s.NodeID, fmt.Sprintf("core-%d", i)); len(diff) > 0 { t.Fatal(diff) } + if s.Leader { + leaders++ + } + } - // Seal the cluster - cluster.EnsureCoresSealed(t) + if leaders != 1 { + t.Fatalf("Found %d leaders, not 1", leaders) } } From 1f8c5f1063033f029223b8ea7a32ec3098e8870d Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 13 May 2020 08:25:25 -0400 Subject: [PATCH 05/86] refactor testhelpers --- helper/testhelpers/testhelpers.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index b9aff79f3b14..e242cd4ba1ab 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -446,7 +446,7 @@ func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys } // Join followers - for i := 1; i < len(cluster.Cores); i++ { + for i := 1; i < vault.DefaultNumCores; i++ { core := cluster.Cores[i] core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) @@ -463,7 +463,7 @@ func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys } } - WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) + WaitForNCoresUnsealed(t, cluster, 3) } func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) { @@ -505,11 +505,11 @@ func (p *HardcodedServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftli // NewHardcodedServerAddressProvider is a convenience function that makes a // ServerAddressProvider from a given cluster address base port. -func NewHardcodedServerAddressProvider(cluster *vault.TestCluster, baseClusterPort int) raftlib.ServerAddressProvider { +func NewHardcodedServerAddressProvider(baseClusterPort int) raftlib.ServerAddressProvider { entries := make(map[raftlib.ServerID]raftlib.ServerAddress) - for i := 0; i < len(cluster.Cores); i++ { + for i := 0; i < vault.DefaultNumCores; i++ { id := fmt.Sprintf("core-%d", i) addr := fmt.Sprintf("127.0.0.1:%d", baseClusterPort+i) entries[raftlib.ServerID(id)] = raftlib.ServerAddress(addr) From 4cb0394b4564c40c08746d2c5abc2a1a50d5739e Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 13 May 2020 09:15:40 -0400 Subject: [PATCH 06/86] add VerifyRaftConfiguration to testhelpers --- helper/testhelpers/testhelpers.go | 11 +++-- .../seal_migration/seal_migration_test.go | 42 ++++--------------- 2 files changed, 12 insertions(+), 41 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index e242cd4ba1ab..c57b0861f7c8 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -531,10 +531,9 @@ func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider r } } -// VerifyRaftConfiguration checks that we have a valid raft configuration, i.e. -// the correct number of servers, having the correct NodeIDs, and exactly one -// leader. -func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error { +// VerifyRaftConfiguration checks that we have a valid raft configuration, +// i.e. three servers with one leader and two followers. +func VerifyRaftConfiguration(t testing.T, core *vault.TestClusterCore) error { backend := core.UnderlyingRawStorage.(*raft.RaftBackend) ctx := namespace.RootContext(context.Background()) @@ -544,8 +543,8 @@ func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error { } servers := config.Servers - if len(servers) != numCores { - return fmt.Errorf("Found %d servers, not %d", len(servers), numCores) + if len(servers) != vault.DefaultNumCores { + return fmt.Errorf("Found %d servers, not %d", len(servers), vault.DefaultNumCores) } leaders := 0 diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index da91cdedda6d..edc57b4a316f 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -1,7 +1,6 @@ package seal_migration import ( - "context" "fmt" "testing" "time" @@ -9,11 +8,9 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/testhelpers" "github.com/hashicorp/vault/helper/testhelpers/teststorage" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/vault" ) @@ -106,8 +103,9 @@ func initializeShamir( if storage.IsRaft { // Join raft cluster testhelpers.RaftClusterJoinNodes(t, cluster) - time.Sleep(15 * time.Second) - verifyRaftConfiguration(t, leader) + if err := testhelpers.VerifyRaftConfiguration(t, leader); err != nil { + t.Fatal(err) + } } else { // Unseal cluster.UnsealCores(t) @@ -170,8 +168,11 @@ func reuseShamir( for _, core := range cluster.Cores { cluster.UnsealCore(t, core) } + // It saddens me that this is necessary time.Sleep(15 * time.Second) - verifyRaftConfiguration(t, leader) + if err := testhelpers.VerifyRaftConfiguration(t, leader); err != nil { + t.Fatal(err) + } } else { // Unseal cluster.UnsealCores(t) @@ -192,32 +193,3 @@ func reuseShamir( // Seal the cluster cluster.EnsureCoresSealed(t) } - -func verifyRaftConfiguration(t *testing.T, core *vault.TestClusterCore) { - - backend := core.UnderlyingRawStorage.(*raft.RaftBackend) - ctx := namespace.RootContext(context.Background()) - config, err := backend.GetConfiguration(ctx) - if err != nil { - t.Fatal(err) - } - servers := config.Servers - - if len(servers) != vault.DefaultNumCores { - t.Fatalf("Found %d servers, not %d", len(servers), vault.DefaultNumCores) - } - - leaders := 0 - for i, s := range servers { - if diff := deep.Equal(s.NodeID, fmt.Sprintf("core-%d", i)); len(diff) > 0 { - t.Fatal(diff) - } - if s.Leader { - leaders++ - } - } - - if leaders != 1 { - t.Fatalf("Found %d leaders, not 1", leaders) - } -} From 6e8caae1da3b92375bf509a3d997972cc6866d1d Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 13 May 2020 10:10:59 -0400 Subject: [PATCH 07/86] stub out TestTransit --- .../seal_migration/seal_migration_test.go | 164 +++++++++++++----- 1 file changed, 121 insertions(+), 43 deletions(-) diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index edc57b4a316f..3dafb00df846 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/testhelpers" + sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal" "github.com/hashicorp/vault/helper/testhelpers/teststorage" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" @@ -19,6 +20,12 @@ func TestShamir(t *testing.T) { testVariousBackends(t, testShamir) } +func TestTransit(t *testing.T) { + testVariousBackends(t, testTransit) +} + +type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) + func testVariousBackends(t *testing.T, tf testFunc) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) @@ -33,44 +40,60 @@ func testVariousBackends(t *testing.T, tf testFunc) { tf(t, logger, storage, 51000) }) - t.Run("file", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("file") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeFileBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 52000) - }) - - t.Run("consul", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 53000) - }) + //t.Run("file", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("file") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeFileBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 52000) + //}) + + //t.Run("consul", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("consul") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeConsulBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 53000) + //}) + + //t.Run("raft", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("raft") + // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger) + // defer cleanup() + // tf(t, logger, storage, 54000) + //}) +} - t.Run("raft", func(t *testing.T) { - t.Parallel() +func testShamir( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { - logger := logger.Named("raft") - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger) - defer cleanup() - tf(t, logger, storage, 54000) - }) + rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) + reuseShamir(t, logger, storage, basePort, rootToken, barrierKeys) } -type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) - -func testShamir( +func testTransit( t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) { - rootToken, keys := initializeShamir(t, logger, storage, basePort) - reuseShamir(t, logger, storage, basePort, rootToken, keys) + // Create the transit server. + tss := sealhelper.NewTransitSealServer(t) + defer func() { + if tss != nil { + tss.Cleanup() + } + }() + tss.MakeKey(t, "transit-seal-key") + + rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + println("rootToken, recoveryKeys, transitSeal", rootToken, recoveryKeys, transitSeal) + //reuseShamir(t, logger, storage, basePort, rootToken, barrierKeys) } // initializeShamir initializes a brand new backend storage with Shamir. @@ -100,18 +123,15 @@ func initializeShamir( leader := cluster.Cores[0] client := leader.Client + // Unseal if storage.IsRaft { - // Join raft cluster testhelpers.RaftClusterJoinNodes(t, cluster) if err := testhelpers.VerifyRaftConfiguration(t, leader); err != nil { t.Fatal(err) } } else { - // Unseal cluster.UnsealCores(t) } - - // Wait until unsealed testhelpers.WaitForNCoresUnsealed(t, cluster, vault.DefaultNumCores) // Write a secret that we will read back out later. @@ -132,7 +152,7 @@ func initializeShamir( func reuseShamir( t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, - rootToken string, keys [][]byte) { + rootToken string, barrierKeys [][]byte) { var baseClusterPort = basePort + 10 @@ -158,27 +178,23 @@ func reuseShamir( client := leader.Client client.SetToken(rootToken) - cluster.BarrierKeys = keys + // Unseal + cluster.BarrierKeys = barrierKeys if storage.IsRaft { - // Set hardcoded Raft address providers provider := testhelpers.NewHardcodedServerAddressProvider(baseClusterPort) testhelpers.SetRaftAddressProviders(t, cluster, provider) - // Unseal cores for _, core := range cluster.Cores { cluster.UnsealCore(t, core) } - // It saddens me that this is necessary time.Sleep(15 * time.Second) + if err := testhelpers.VerifyRaftConfiguration(t, leader); err != nil { t.Fatal(err) } } else { - // Unseal cluster.UnsealCores(t) } - - // Wait until unsealed testhelpers.WaitForNCoresUnsealed(t, cluster, vault.DefaultNumCores) // Read the secret @@ -193,3 +209,65 @@ func reuseShamir( // Seal the cluster cluster.EnsureCoresSealed(t) } + +// initializeTransit initializes a brand new backend storage with Transit. +func initializeTransit( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int, + tss *sealhelper.TransitSealServer) (string, [][]byte, vault.Seal) { + + var transitSeal vault.Seal + + var baseClusterPort = basePort + 10 + + // Start the cluster + var conf = vault.CoreConfig{ + Logger: logger.Named("initializeTransit"), + } + var opts = vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), + BaseClusterListenPort: baseClusterPort, + SealFunc: func() vault.Seal { + // Each core will create its own transit seal here. Later + // on it won't matter which one of these we end up using, since + // they were all created from the same transit key. + transitSeal = tss.MakeSeal(t, "transit-seal-key") + return transitSeal + }, + } + storage.Setup(&conf, &opts) + cluster := vault.NewTestCluster(t, &conf, &opts) + cluster.Start() + defer func() { + storage.Cleanup(t, cluster) + cluster.Cleanup() + }() + + leader := cluster.Cores[0] + client := leader.Client + + // Unseal + if storage.IsRaft { + testhelpers.RaftClusterJoinNodes(t, cluster) + if err := testhelpers.VerifyRaftConfiguration(t, leader); err != nil { + t.Fatal(err) + } + } else { + cluster.UnsealCores(t) + } + testhelpers.WaitForNCoresUnsealed(t, cluster, vault.DefaultNumCores) + + // Write a secret that we will read back out later. + _, err := client.Logical().Write( + "secret/foo", + map[string]interface{}{"zork": "quux"}) + if err != nil { + t.Fatal(err) + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) + + return cluster.RootToken, cluster.RecoveryKeys, transitSeal +} From 2abca4a628910cbd42a77cbac0da01b2c3a28c9f Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 13 May 2020 12:35:00 -0400 Subject: [PATCH 08/86] Revert "refactor testhelpers" This reverts commit 39593defd0d4c6fd79aedfd37df6298391abb9db. --- helper/testhelpers/testhelpers.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index c57b0861f7c8..5a74cce991c9 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -445,8 +445,7 @@ func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys }, } - // Join followers - for i := 1; i < vault.DefaultNumCores; i++ { + for i := 1; i < len(cluster.Cores); i++ { core := cluster.Cores[i] core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) @@ -463,7 +462,7 @@ func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys } } - WaitForNCoresUnsealed(t, cluster, 3) + WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) } func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) { @@ -505,11 +504,11 @@ func (p *HardcodedServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftli // NewHardcodedServerAddressProvider is a convenience function that makes a // ServerAddressProvider from a given cluster address base port. -func NewHardcodedServerAddressProvider(baseClusterPort int) raftlib.ServerAddressProvider { +func NewHardcodedServerAddressProvider(cluster *vault.TestCluster, baseClusterPort int) raftlib.ServerAddressProvider { entries := make(map[raftlib.ServerID]raftlib.ServerAddress) - for i := 0; i < vault.DefaultNumCores; i++ { + for i := 0; i < len(cluster.Cores); i++ { id := fmt.Sprintf("core-%d", i) addr := fmt.Sprintf("127.0.0.1:%d", baseClusterPort+i) entries[raftlib.ServerID(id)] = raftlib.ServerAddress(addr) From f7536c265ee29b8ac93769c3600d2f41f16d116d Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 13 May 2020 12:53:58 -0400 Subject: [PATCH 09/86] get shamir test working again --- helper/testhelpers/testhelpers.go | 11 +-- .../seal_migration/seal_migration_test.go | 75 ++++++++++--------- 2 files changed, 46 insertions(+), 40 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 5a74cce991c9..e02a0b9f5b1a 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -530,9 +530,10 @@ func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider r } } -// VerifyRaftConfiguration checks that we have a valid raft configuration, -// i.e. three servers with one leader and two followers. -func VerifyRaftConfiguration(t testing.T, core *vault.TestClusterCore) error { +// VerifyRaftConfiguration checks that we have a valid raft configuration, i.e. +// the correct number of servers, having the correct NodeIDs, and exactly one +// leader. +func VerifyRaftConfiguration(t testing.T, core *vault.TestClusterCore, numCores int) error { backend := core.UnderlyingRawStorage.(*raft.RaftBackend) ctx := namespace.RootContext(context.Background()) @@ -542,8 +543,8 @@ func VerifyRaftConfiguration(t testing.T, core *vault.TestClusterCore) error { } servers := config.Servers - if len(servers) != vault.DefaultNumCores { - return fmt.Errorf("Found %d servers, not %d", len(servers), vault.DefaultNumCores) + if len(servers) != numCores { + return fmt.Errorf("Found %d servers, not %d", len(servers), numCores) } leaders := 0 diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 3dafb00df846..707df9f8218b 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -16,6 +16,8 @@ import ( "github.com/hashicorp/vault/vault" ) +const numTestCores = 5 + func TestShamir(t *testing.T) { testVariousBackends(t, testShamir) } @@ -40,34 +42,34 @@ func testVariousBackends(t *testing.T, tf testFunc) { tf(t, logger, storage, 51000) }) - //t.Run("file", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("file") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeFileBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 52000) - //}) - - //t.Run("consul", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("consul") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeConsulBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 53000) - //}) - - //t.Run("raft", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("raft") - // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger) - // defer cleanup() - // tf(t, logger, storage, 54000) - //}) + t.Run("file", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("file") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeFileBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 52000) + }) + + t.Run("consul", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) + }) + + t.Run("raft", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("raft") + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + defer cleanup() + tf(t, logger, storage, 54000) + }) } func testShamir( @@ -109,6 +111,7 @@ func initializeShamir( } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, + NumCores: numTestCores, BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseClusterListenPort: baseClusterPort, } @@ -126,13 +129,13 @@ func initializeShamir( // Unseal if storage.IsRaft { testhelpers.RaftClusterJoinNodes(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(t, leader); err != nil { + if err := testhelpers.VerifyRaftConfiguration(t, leader, numTestCores); err != nil { t.Fatal(err) } } else { cluster.UnsealCores(t) } - testhelpers.WaitForNCoresUnsealed(t, cluster, vault.DefaultNumCores) + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) // Write a secret that we will read back out later. _, err := client.Logical().Write( @@ -162,6 +165,7 @@ func reuseShamir( } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, + NumCores: numTestCores, BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseClusterListenPort: baseClusterPort, SkipInit: true, @@ -181,7 +185,7 @@ func reuseShamir( // Unseal cluster.BarrierKeys = barrierKeys if storage.IsRaft { - provider := testhelpers.NewHardcodedServerAddressProvider(baseClusterPort) + provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort) testhelpers.SetRaftAddressProviders(t, cluster, provider) for _, core := range cluster.Cores { @@ -189,13 +193,13 @@ func reuseShamir( } time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(t, leader); err != nil { + if err := testhelpers.VerifyRaftConfiguration(t, leader, numTestCores); err != nil { t.Fatal(err) } } else { cluster.UnsealCores(t) } - testhelpers.WaitForNCoresUnsealed(t, cluster, vault.DefaultNumCores) + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) // Read the secret secret, err := client.Logical().Read("secret/foo") @@ -226,6 +230,7 @@ func initializeTransit( } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, + NumCores: numTestCores, BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseClusterListenPort: baseClusterPort, SealFunc: func() vault.Seal { @@ -250,13 +255,13 @@ func initializeTransit( // Unseal if storage.IsRaft { testhelpers.RaftClusterJoinNodes(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(t, leader); err != nil { + if err := testhelpers.VerifyRaftConfiguration(t, leader, numTestCores); err != nil { t.Fatal(err) } } else { cluster.UnsealCores(t) } - testhelpers.WaitForNCoresUnsealed(t, cluster, vault.DefaultNumCores) + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) // Write a secret that we will read back out later. _, err := client.Logical().Write( From f3039dfa651a81cb6cf9f0e78f25c4b2bcb93584 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 14 May 2020 09:47:55 -0400 Subject: [PATCH 10/86] stub out transit join --- helper/testhelpers/testhelpers.go | 43 ++--------- .../seal_migration/seal_migration_test.go | 73 +++++++++---------- vault/testing.go | 3 +- 3 files changed, 45 insertions(+), 74 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index e02a0b9f5b1a..e4c00ff0fd76 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -412,14 +412,6 @@ func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib } func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { - raftClusterJoinNodes(t, cluster, false) -} - -func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) { - raftClusterJoinNodes(t, cluster, true) -} - -func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) { addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) @@ -430,19 +422,13 @@ func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys { EnsureCoreSealed(t, leader) leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - if useStoredKeys { - cluster.UnsealCoreWithStoredKeys(t, leader) - } else { - cluster.UnsealCore(t, leader) - } + cluster.UnsealCore(t, leader) vault.TestWaitActive(t, leader.Core) } - leaderInfos := []*raft.LeaderJoinInfo{ - &raft.LeaderJoinInfo{ - LeaderAPIAddr: leader.Client.Address(), - TLSConfig: leader.TLSConfig, - }, + leaderInfo := &raft.LeaderJoinInfo{ + LeaderAPIAddr: leader.Client.Address(), + TLSConfig: leader.TLSConfig, } for i := 1; i < len(cluster.Cores); i++ { @@ -465,23 +451,10 @@ func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) } -func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) { +func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) { - timeout := time.Now().Add(30 * time.Second) - for { - if time.Now().After(timeout) { - t.Fatal("raft join: timeout waiting for core to unseal") - } - // Its actually ok for an error to happen here the first couple of - // times -- it means the raft join hasn't gotten around to initializing - // the backend yet. - err := core.UnsealWithStoredKeys(context.Background()) - if err == nil { - return - } - core.Logger().Warn("raft join: failed to unseal core", "error", err) - time.Sleep(time.Second) - } + leader := cluster.Cores[0] + debugRaftConfiguration(t, leader) } // HardcodedServerAddressProvider is a ServerAddressProvider that uses @@ -533,7 +506,7 @@ func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider r // VerifyRaftConfiguration checks that we have a valid raft configuration, i.e. // the correct number of servers, having the correct NodeIDs, and exactly one // leader. -func VerifyRaftConfiguration(t testing.T, core *vault.TestClusterCore, numCores int) error { +func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error { backend := core.UnderlyingRawStorage.(*raft.RaftBackend) ctx := namespace.RootContext(context.Background()) diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 707df9f8218b..3538062622e5 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -32,35 +32,35 @@ func testVariousBackends(t *testing.T, tf testFunc) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - t.Run("inmem", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("inmem") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeInmemBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 51000) - }) - - t.Run("file", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("file") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeFileBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 52000) - }) - - t.Run("consul", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 53000) - }) + //t.Run("inmem", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("inmem") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeInmemBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 51000) + //}) + + //t.Run("file", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("file") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeFileBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 52000) + //}) + + //t.Run("consul", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("consul") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeConsulBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 53000) + //}) t.Run("raft", func(t *testing.T) { t.Parallel() @@ -95,7 +95,6 @@ func testTransit( rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss) println("rootToken, recoveryKeys, transitSeal", rootToken, recoveryKeys, transitSeal) - //reuseShamir(t, logger, storage, basePort, rootToken, barrierKeys) } // initializeShamir initializes a brand new backend storage with Shamir. @@ -129,7 +128,7 @@ func initializeShamir( // Unseal if storage.IsRaft { testhelpers.RaftClusterJoinNodes(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(t, leader, numTestCores); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } } else { @@ -193,7 +192,7 @@ func reuseShamir( } time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(t, leader, numTestCores); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } } else { @@ -254,12 +253,10 @@ func initializeTransit( // Unseal if storage.IsRaft { - testhelpers.RaftClusterJoinNodes(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(t, leader, numTestCores); err != nil { - t.Fatal(err) - } - } else { - cluster.UnsealCores(t) + testhelpers.RaftClusterJoinNodesWithStoredKeys(t, cluster) + //if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + // t.Fatal(err) + //} } testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) diff --git a/vault/testing.go b/vault/testing.go index 0c73b1f29ae5..646e62ebe840 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -817,7 +817,8 @@ func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) { } func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) { - if err := core.UnsealWithStoredKeys(context.Background()); err != nil { + err := core.UnsealWithStoredKeys(context.Background()) + if err != nil { t.Fatal(err) } } From 3066b5181eb38cd937bb3839e2f3a8c6efdb66fa Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 14 May 2020 10:52:24 -0400 Subject: [PATCH 11/86] work on transit join --- helper/testhelpers/testhelpers.go | 60 ++++++++++++++++++- .../seal_migration/seal_migration_test.go | 3 +- vault/init.go | 5 ++ vault/testing.go | 7 --- 4 files changed, 64 insertions(+), 11 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index e4c00ff0fd76..3dce7c4f9d16 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -426,11 +426,14 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { vault.TestWaitActive(t, leader.Core) } - leaderInfo := &raft.LeaderJoinInfo{ - LeaderAPIAddr: leader.Client.Address(), - TLSConfig: leader.TLSConfig, + leaderInfos := []*raft.LeaderJoinInfo{ + &raft.LeaderJoinInfo{ + LeaderAPIAddr: leader.Client.Address(), + TLSConfig: leader.TLSConfig, + }, } + // Join followers for i := 1; i < len(cluster.Cores); i++ { core := cluster.Cores[i] core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) @@ -453,8 +456,59 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) { + addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} + atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) + leader := cluster.Cores[0] + + // Seal the leader so we can install an address provider + { + EnsureCoreSealed(t, leader) + leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + if err := leader.UnsealWithStoredKeys(context.Background()); err != nil { + t.Fatal(err) + } + vault.TestWaitActive(t, leader.Core) + } + + leaderInfo := &raft.LeaderJoinInfo{ + LeaderAPIAddr: leader.Client.Address(), + TLSConfig: leader.TLSConfig, + } + + for i := 1; i < len(cluster.Cores); i++ { + core := cluster.Cores[i] + core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + leaderInfos := []*raft.LeaderJoinInfo{ + leaderInfo, + } + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) + if err != nil { + t.Fatal(err) + } + + // The raft backend is not initialized right away after the join. We + // need to wait briefly before we can unseal. + timeout := time.Now().Add(30 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("timeout waiting for core to unseal") + } + err := core.UnsealWithStoredKeys(context.Background()) + if err == nil { + return + } + core.Logger().Warn("failed to unseal core", "error", err) + time.Sleep(time.Second) + } + } + debugRaftConfiguration(t, leader) + for i, c := range cluster.Cores { + fmt.Printf(">>> core sealed %d %t\n", i, c.Core.Sealed()) + } + + WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) } // HardcodedServerAddressProvider is a ServerAddressProvider that uses diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 3538062622e5..b53595b43c4b 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -257,8 +257,9 @@ func initializeTransit( //if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { // t.Fatal(err) //} + } else { + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) // Write a secret that we will read back out later. _, err := client.Logical().Write( diff --git a/vault/init.go b/vault/init.go index 2fd8a2548f66..72acc5e0c6ee 100644 --- a/vault/init.go +++ b/vault/init.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "net/url" + "runtime/debug" "sync/atomic" wrapping "github.com/hashicorp/go-kms-wrapping" @@ -416,6 +417,10 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error { c.unsealWithStoredKeysLock.Lock() defer c.unsealWithStoredKeysLock.Unlock() + fmt.Printf("--------------------------------------------------------------------------\n") + fmt.Printf("Core.UnsealWithStoredKeys\n") + debug.PrintStack() + if c.seal.BarrierType() == wrapping.Shamir { return nil } diff --git a/vault/testing.go b/vault/testing.go index 646e62ebe840..91dc60d6538c 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -816,13 +816,6 @@ func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) { } } -func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) { - err := core.UnsealWithStoredKeys(context.Background()) - if err != nil { - t.Fatal(err) - } -} - func (c *TestCluster) EnsureCoresSealed(t testing.T) { t.Helper() if err := c.ensureCoresSealed(); err != nil { From 594bccfc066c01ce3733de6f163f2a12287f51d7 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 14 May 2020 11:22:20 -0400 Subject: [PATCH 12/86] remove debug code --- vault/init.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/vault/init.go b/vault/init.go index 72acc5e0c6ee..2fd8a2548f66 100644 --- a/vault/init.go +++ b/vault/init.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "net/url" - "runtime/debug" "sync/atomic" wrapping "github.com/hashicorp/go-kms-wrapping" @@ -417,10 +416,6 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error { c.unsealWithStoredKeysLock.Lock() defer c.unsealWithStoredKeysLock.Unlock() - fmt.Printf("--------------------------------------------------------------------------\n") - fmt.Printf("Core.UnsealWithStoredKeys\n") - debug.PrintStack() - if c.seal.BarrierType() == wrapping.Shamir { return nil } From b223d503a6f2233dfdc697a94fa185c340302b28 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 14 May 2020 12:09:08 -0400 Subject: [PATCH 13/86] initTransit now works with raft join --- helper/testhelpers/testhelpers.go | 77 ++++++------------- .../seal_migration/seal_migration_test.go | 64 +++++++-------- 2 files changed, 57 insertions(+), 84 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 3dce7c4f9d16..5ecd971fc2b5 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -412,6 +412,14 @@ func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib } func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { + raftClusterJoinNodes(t, cluster, false) +} + +func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) { + raftClusterJoinNodes(t, cluster, true) +} + +func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) { addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) @@ -422,7 +430,13 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { { EnsureCoreSealed(t, leader) leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.UnsealCore(t, leader) + if useStoredKeys { + if err := leader.UnsealWithStoredKeys(context.Background()); err != nil { + t.Fatal(err) + } + } else { + cluster.UnsealCore(t, leader) + } vault.TestWaitActive(t, leader.Core) } @@ -454,61 +468,20 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) } -func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) { - - addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} - atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) - - leader := cluster.Cores[0] +func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) { - // Seal the leader so we can install an address provider - { - EnsureCoreSealed(t, leader) - leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - if err := leader.UnsealWithStoredKeys(context.Background()); err != nil { - t.Fatal(err) - } - vault.TestWaitActive(t, leader.Core) - } - - leaderInfo := &raft.LeaderJoinInfo{ - LeaderAPIAddr: leader.Client.Address(), - TLSConfig: leader.TLSConfig, - } - - for i := 1; i < len(cluster.Cores); i++ { - core := cluster.Cores[i] - core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - leaderInfos := []*raft.LeaderJoinInfo{ - leaderInfo, - } - _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) - if err != nil { - t.Fatal(err) + timeout := time.Now().Add(30 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("raft join: timeout waiting for core to unseal") } - - // The raft backend is not initialized right away after the join. We - // need to wait briefly before we can unseal. - timeout := time.Now().Add(30 * time.Second) - for { - if time.Now().After(timeout) { - t.Fatal("timeout waiting for core to unseal") - } - err := core.UnsealWithStoredKeys(context.Background()) - if err == nil { - return - } - core.Logger().Warn("failed to unseal core", "error", err) - time.Sleep(time.Second) + err := core.UnsealWithStoredKeys(context.Background()) + if err == nil { + return } + core.Logger().Warn("raft join: failed to unseal core", "error", err) + time.Sleep(time.Second) } - - debugRaftConfiguration(t, leader) - for i, c := range cluster.Cores { - fmt.Printf(">>> core sealed %d %t\n", i, c.Core.Sealed()) - } - - WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) } // HardcodedServerAddressProvider is a ServerAddressProvider that uses diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index b53595b43c4b..27b394605441 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -32,35 +32,35 @@ func testVariousBackends(t *testing.T, tf testFunc) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - //t.Run("inmem", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("inmem") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeInmemBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 51000) - //}) - - //t.Run("file", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("file") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeFileBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 52000) - //}) - - //t.Run("consul", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("consul") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeConsulBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 53000) - //}) + t.Run("inmem", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("inmem") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeInmemBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 51000) + }) + + t.Run("file", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("file") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeFileBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 52000) + }) + + t.Run("consul", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) + }) t.Run("raft", func(t *testing.T) { t.Parallel() @@ -254,9 +254,9 @@ func initializeTransit( // Unseal if storage.IsRaft { testhelpers.RaftClusterJoinNodesWithStoredKeys(t, cluster) - //if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { - // t.Fatal(err) - //} + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + t.Fatal(err) + } } else { testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) } From 26b7a94fa0e5aa3eaaf4c2236fff1eef345eef8c Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 14 May 2020 12:30:35 -0400 Subject: [PATCH 14/86] runTransit works with inmem --- helper/testhelpers/testhelpers.go | 3 + .../seal_migration/seal_migration_test.go | 136 ++++++++++++------ 2 files changed, 98 insertions(+), 41 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 5ecd971fc2b5..f5e773d0cf9b 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -475,6 +475,9 @@ func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) { if time.Now().After(timeout) { t.Fatal("raft join: timeout waiting for core to unseal") } + // Its actually ok for an error to happen here the first couple of + // times -- it means the raft join hasn't gotten around to initializing + // the backend yet. err := core.UnsealWithStoredKeys(context.Background()) if err == nil { return diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 27b394605441..be0631cadf83 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -42,34 +42,34 @@ func testVariousBackends(t *testing.T, tf testFunc) { tf(t, logger, storage, 51000) }) - t.Run("file", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("file") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeFileBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 52000) - }) - - t.Run("consul", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 53000) - }) - - t.Run("raft", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("raft") - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - defer cleanup() - tf(t, logger, storage, 54000) - }) + //t.Run("file", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("file") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeFileBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 52000) + //}) + + //t.Run("consul", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("consul") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeConsulBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 53000) + //}) + + //t.Run("raft", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("raft") + // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + // defer cleanup() + // tf(t, logger, storage, 54000) + //}) } func testShamir( @@ -77,7 +77,7 @@ func testShamir( storage teststorage.ReusableStorage, basePort int) { rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) - reuseShamir(t, logger, storage, basePort, rootToken, barrierKeys) + runShamir(t, logger, storage, basePort, rootToken, barrierKeys) } func testTransit( @@ -86,15 +86,12 @@ func testTransit( // Create the transit server. tss := sealhelper.NewTransitSealServer(t) - defer func() { - if tss != nil { - tss.Cleanup() - } - }() + defer tss.Cleanup() tss.MakeKey(t, "transit-seal-key") - rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss) - println("rootToken, recoveryKeys, transitSeal", rootToken, recoveryKeys, transitSeal) + rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + //println("rootToken, recoveryKeys, transitSeal", rootToken, recoveryKeys, transitSeal) + runTransit(t, logger, storage, basePort, rootToken, transitSeal) } // initializeShamir initializes a brand new backend storage with Shamir. @@ -150,8 +147,8 @@ func initializeShamir( return cluster.RootToken, cluster.BarrierKeys } -// reuseShamir uses a pre-populated backend storage with Shamir. -func reuseShamir( +// runShamir uses a pre-populated backend storage with Shamir. +func runShamir( t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, rootToken string, barrierKeys [][]byte) { @@ -160,7 +157,7 @@ func reuseShamir( // Start the cluster var conf = vault.CoreConfig{ - Logger: logger.Named("reuseShamir"), + Logger: logger.Named("runShamir"), } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, @@ -257,9 +254,8 @@ func initializeTransit( if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } - } else { - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) } + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) // Write a secret that we will read back out later. _, err := client.Logical().Write( @@ -274,3 +270,61 @@ func initializeTransit( return cluster.RootToken, cluster.RecoveryKeys, transitSeal } + +func runTransit( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int, + rootToken string, transitSeal vault.Seal) { + + var baseClusterPort = basePort + 10 + + // Start the cluster + var conf = vault.CoreConfig{ + Logger: logger.Named("runShamir"), + Seal: transitSeal, + } + var opts = vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: numTestCores, + BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), + BaseClusterListenPort: baseClusterPort, + SkipInit: true, + } + storage.Setup(&conf, &opts) + cluster := vault.NewTestCluster(t, &conf, &opts) + cluster.Start() + defer func() { + storage.Cleanup(t, cluster) + cluster.Cleanup() + }() + + leader := cluster.Cores[0] + client := leader.Client + client.SetToken(rootToken) + + // Even though we are using autounseal, we have to unseal explicitly + // because we are using SkipInit. + if storage.IsRaft { + panic("dasfdsf") + } else { + if err := cluster.UnsealCoresWithError(true); err != nil { + t.Fatal(err) + } + //for i, c := range cluster.Cores { + // fmt.Printf(">>> core sealed %d %t\n", i, c.Core.Sealed()) + //} + } + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) + + // Read the secret + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + t.Fatal(diff) + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) +} From 9a523df39af9b9cd0479cae5b1361c106f48f24e Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 14 May 2020 13:56:31 -0400 Subject: [PATCH 15/86] work on runTransit with raft --- helper/testhelpers/testhelpers.go | 4 +- .../seal_migration/seal_migration_test.go | 62 ++++++++++--------- vault/testing.go | 6 ++ 3 files changed, 41 insertions(+), 31 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index f5e773d0cf9b..b9aff79f3b14 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -431,9 +431,7 @@ func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys EnsureCoreSealed(t, leader) leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) if useStoredKeys { - if err := leader.UnsealWithStoredKeys(context.Background()); err != nil { - t.Fatal(err) - } + cluster.UnsealCoreWithStoredKeys(t, leader) } else { cluster.UnsealCore(t, leader) } diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index be0631cadf83..5988a1a0b5a2 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -32,15 +32,15 @@ func testVariousBackends(t *testing.T, tf testFunc) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - t.Run("inmem", func(t *testing.T) { - t.Parallel() + //t.Run("inmem", func(t *testing.T) { + // t.Parallel() - logger := logger.Named("inmem") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeInmemBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 51000) - }) + // logger := logger.Named("inmem") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeInmemBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 51000) + //}) //t.Run("file", func(t *testing.T) { // t.Parallel() @@ -62,14 +62,14 @@ func testVariousBackends(t *testing.T, tf testFunc) { // tf(t, logger, storage, 53000) //}) - //t.Run("raft", func(t *testing.T) { - // t.Parallel() + t.Run("raft", func(t *testing.T) { + t.Parallel() - // logger := logger.Named("raft") - // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - // defer cleanup() - // tf(t, logger, storage, 54000) - //}) + logger := logger.Named("raft") + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + defer cleanup() + tf(t, logger, storage, 54000) + }) } func testShamir( @@ -248,7 +248,7 @@ func initializeTransit( leader := cluster.Cores[0] client := leader.Client - // Unseal + // Join raft if storage.IsRaft { testhelpers.RaftClusterJoinNodesWithStoredKeys(t, cluster) if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { @@ -305,25 +305,31 @@ func runTransit( // Even though we are using autounseal, we have to unseal explicitly // because we are using SkipInit. if storage.IsRaft { - panic("dasfdsf") + for _, core := range cluster.Cores { + cluster.UnsealCoreWithStoredKeys(t, core) + } + //time.Sleep(15 * time.Second) + + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + t.Fatal(err) + } } else { if err := cluster.UnsealCoresWithError(true); err != nil { t.Fatal(err) } - //for i, c := range cluster.Cores { - // fmt.Printf(">>> core sealed %d %t\n", i, c.Core.Sealed()) - //} } testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) - // Read the secret - secret, err := client.Logical().Read("secret/foo") - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { - t.Fatal(diff) - } + testhelpers.DebugCores(t, cluster) + + //// Read the secret + //secret, err := client.Logical().Read("secret/foo") + //if err != nil { + // t.Fatal(err) + //} + //if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + // t.Fatal(diff) + //} // Seal the cluster cluster.EnsureCoresSealed(t) diff --git a/vault/testing.go b/vault/testing.go index 91dc60d6538c..0c73b1f29ae5 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -816,6 +816,12 @@ func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) { } } +func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) { + if err := core.UnsealWithStoredKeys(context.Background()); err != nil { + t.Fatal(err) + } +} + func (c *TestCluster) EnsureCoresSealed(t testing.T) { t.Helper() if err := c.ensureCoresSealed(); err != nil { From 0e68260cdaa702ab3f4cd3bae35b39b28ad32453 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 14 May 2020 15:06:30 -0400 Subject: [PATCH 16/86] runTransit works with raft --- .../seal_migration/seal_migration_test.go | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 5988a1a0b5a2..c4161bbc7cb2 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -302,13 +302,16 @@ func runTransit( client := leader.Client client.SetToken(rootToken) - // Even though we are using autounseal, we have to unseal explicitly - // because we are using SkipInit. + // Unseal. Even though we are using autounseal, we have to unseal + // explicitly because we are using SkipInit. if storage.IsRaft { + provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort) + testhelpers.SetRaftAddressProviders(t, cluster, provider) + for _, core := range cluster.Cores { cluster.UnsealCoreWithStoredKeys(t, core) } - //time.Sleep(15 * time.Second) + time.Sleep(15 * time.Second) if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) @@ -322,14 +325,14 @@ func runTransit( testhelpers.DebugCores(t, cluster) - //// Read the secret - //secret, err := client.Logical().Read("secret/foo") - //if err != nil { - // t.Fatal(err) - //} - //if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { - // t.Fatal(diff) - //} + // Read the secret + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + t.Fatal(diff) + } // Seal the cluster cluster.EnsureCoresSealed(t) From ee20dbab9f7caa21794e4c3fcd91d39830e8e2d4 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 14 May 2020 15:25:58 -0400 Subject: [PATCH 17/86] cleanup tests --- .../seal_migration/seal_migration_test.go | 68 ++++++++++--------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index c4161bbc7cb2..aed0d280a445 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -32,35 +32,35 @@ func testVariousBackends(t *testing.T, tf testFunc) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - //t.Run("inmem", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("inmem") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeInmemBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 51000) - //}) - - //t.Run("file", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("file") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeFileBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 52000) - //}) - - //t.Run("consul", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("consul") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeConsulBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 53000) - //}) + t.Run("inmem", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("inmem") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeInmemBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 51000) + }) + + t.Run("file", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("file") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeFileBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 52000) + }) + + t.Run("consul", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) + }) t.Run("raft", func(t *testing.T) { t.Parallel() @@ -187,6 +187,9 @@ func runShamir( for _, core := range cluster.Cores { cluster.UnsealCore(t, core) } + + // This is apparently necessary for the raft cluster to get itself + // situated. time.Sleep(15 * time.Second) if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { @@ -280,7 +283,7 @@ func runTransit( // Start the cluster var conf = vault.CoreConfig{ - Logger: logger.Named("runShamir"), + Logger: logger.Named("runTransit"), Seal: transitSeal, } var opts = vault.TestClusterOptions{ @@ -311,6 +314,9 @@ func runTransit( for _, core := range cluster.Cores { cluster.UnsealCoreWithStoredKeys(t, core) } + + // This is apparently necessary for the raft cluster to get itself + // situated. time.Sleep(15 * time.Second) if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { @@ -323,8 +329,6 @@ func runTransit( } testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) - testhelpers.DebugCores(t, cluster) - // Read the secret secret, err := client.Logical().Read("secret/foo") if err != nil { From 926a957ffb530a1edef567a5ccf1b4335536940c Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 11:36:02 -0400 Subject: [PATCH 18/86] TestSealMigration_TransitToShamir_Pre14 --- .../seal_migration/seal_migration_test.go | 246 ++++++++++++++++-- 1 file changed, 218 insertions(+), 28 deletions(-) diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index aed0d280a445..7ea4b4e4ea2e 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -1,6 +1,8 @@ package seal_migration import ( + "context" + "encoding/base64" "fmt" "testing" "time" @@ -8,6 +10,8 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" + "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/testhelpers" sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal" "github.com/hashicorp/vault/helper/testhelpers/teststorage" @@ -16,19 +20,15 @@ import ( "github.com/hashicorp/vault/vault" ) -const numTestCores = 5 - -func TestShamir(t *testing.T) { - testVariousBackends(t, testShamir) -} - -func TestTransit(t *testing.T) { - testVariousBackends(t, testTransit) -} +const ( + numTestCores = 5 + keyShares = 3 + keyThreshold = 3 +) type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) -func testVariousBackends(t *testing.T, tf testFunc) { +func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) @@ -62,36 +62,226 @@ func testVariousBackends(t *testing.T, tf testFunc) { tf(t, logger, storage, 53000) }) - t.Run("raft", func(t *testing.T) { - t.Parallel() + if includeRaft { + t.Run("raft", func(t *testing.T) { + t.Parallel() - logger := logger.Named("raft") - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - defer cleanup() - tf(t, logger, storage, 54000) - }) -} + logger := logger.Named("raft") + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + defer cleanup() + tf(t, logger, storage, 54000) + }) -func testShamir( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { + } +} - rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) - runShamir(t, logger, storage, basePort, rootToken, barrierKeys) +//func TestShamir(t *testing.T) { +// testVariousBackends(t, testShamir, true) +//} +// +//func testShamir( +// t *testing.T, logger hclog.Logger, +// storage teststorage.ReusableStorage, basePort int) { +// +// rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) +// runShamir(t, logger, storage, basePort, rootToken, barrierKeys) +//} +// +//func TestTransit(t *testing.T) { +// testVariousBackends(t, testTransit, true) +//} +// +//func testTransit( +// t *testing.T, logger hclog.Logger, +// storage teststorage.ReusableStorage, basePort int) { +// +// // Create the transit server. +// tss := sealhelper.NewTransitSealServer(t) +// defer tss.Cleanup() +// tss.MakeKey(t, "transit-seal-key") +// +// rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) +// //println("rootToken, recoveryKeys, transitSeal", rootToken, recoveryKeys, transitSeal) +// runTransit(t, logger, storage, basePort, rootToken, transitSeal) +//} + +//--------------------------------------------------------- + +// TODO skip enterprise --> merge in to double check + +func TestSealMigration_TransitToShamir_Pre14(t *testing.T) { + testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false) } -func testTransit( +func testSealMigrationTransitToShamir_Pre14( t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) { // Create the transit server. tss := sealhelper.NewTransitSealServer(t) - defer tss.Cleanup() + defer func() { + if tss != nil { + tss.Cleanup() + } + }() tss.MakeKey(t, "transit-seal-key") - rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) - //println("rootToken, recoveryKeys, transitSeal", rootToken, recoveryKeys, transitSeal) - runTransit(t, logger, storage, basePort, rootToken, transitSeal) + // Initialize the backend with transit. + rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + + // Migrate the backend from transit to shamir + migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys) + + // Now that migration is done, we can nuke the transit server, since we + // can unseal without it. + tss.Cleanup() + tss = nil + + // Run the backend with shamir. Note that the recovery keys are now the + // barrier keys. + runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) +} + +// migrateFromTransitToShamir migrates the backend from transit to shamir +func migrateFromTransitToShamir_Pre14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int, + tss *sealhelper.TransitSealServer, transitSeal vault.Seal, + rootToken string, recoveryKeys [][]byte) { + + var baseClusterPort = basePort + 10 + + var conf = vault.CoreConfig{ + Logger: logger.Named("migrateFromTransitToShamir"), + // N.B. Providing an UnwrapSeal puts us in migration mode. This is the + // equivalent of doing the following in HCL: + // seal "transit" { + // // ... + // disabled = "true" + // } + UnwrapSeal: transitSeal, + } + var opts = vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), + BaseClusterListenPort: baseClusterPort, + SkipInit: true, + } + storage.Setup(&conf, &opts) + cluster := vault.NewTestCluster(t, &conf, &opts) + cluster.Start() + defer func() { + storage.Cleanup(t, cluster) + cluster.Cleanup() + }() + + leader := cluster.Cores[0] + client := leader.Client + client.SetToken(rootToken) + + // Attempt to unseal while the transit server is unreachable. Although + // we're unsealing using the recovery keys, this is still an + // autounseal, so it should fail. + tss.EnsureCoresSealed(t) + unsealMigrate(t, client, recoveryKeys, false) + tss.UnsealCores(t) + testhelpers.WaitForActiveNode(t, tss.TestCluster) + + // Unseal and migrate to Shamir. Although we're unsealing using the + // recovery keys, this is still an autounseal. + //for _, node := range cluster.Cores { + // nclient := node.Client + // nclient.SetToken(rootToken) + // unsealMigrate(t, nclient, recoveryKeys, true) + //} + unsealMigrate(t, client, recoveryKeys, true) + testhelpers.WaitForActiveNode(t, cluster) + + // Wait for migration to finish. Sadly there is no callback, and the + // test will fail later on if we don't do this. TODO Maybe we should add a + // timeout loop of some kind here? + time.Sleep(10 * time.Second) + + // Read the secret + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + t.Fatal(diff) + } + + // Make sure the seal configs were updated correctly. + b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background()) + if err != nil { + t.Fatal(err) + } + verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1) + if r != nil { + t.Fatalf("expected nil recovery config, got: %#v", r) + } + + cluster.EnsureCoresSealed(t) +} + +func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { + + for i, key := range keys { + + // Try to unseal with missing "migrate" parameter + _, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ + Key: base64.StdEncoding.EncodeToString(key), + }) + if err == nil { + t.Fatal("expected error due to lack of migrate parameter") + } + + // Unseal with "migrate" parameter + resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ + Key: base64.StdEncoding.EncodeToString(key), + Migrate: true, + }) + + if i < keyThreshold-1 { + // Not enough keys have been provided yet. + if err != nil { + t.Fatal(err) + } + } else { + if transitServerAvailable { + // The transit server is running. + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Sealed { + t.Fatalf("expected unsealed state; got %#v", resp) + } + } else { + // The transit server is stopped. + if err == nil { + t.Fatal("expected error due to transit server being stopped.") + } + } + break + } + } +} + +// verifyBarrierConfig verifies that a barrier configuration is correct. +func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) { + t.Helper() + if cfg.Type != sealType { + t.Fatalf("bad seal config: %#v, expected type=%q", cfg, sealType) + } + if cfg.SecretShares != shares { + t.Fatalf("bad seal config: %#v, expected SecretShares=%d", cfg, shares) + } + if cfg.SecretThreshold != threshold { + t.Fatalf("bad seal config: %#v, expected SecretThreshold=%d", cfg, threshold) + } + if cfg.StoredShares != stored { + t.Fatalf("bad seal config: %#v, expected StoredShares=%d", cfg, stored) + } } // initializeShamir initializes a brand new backend storage with Shamir. From 6781c7efd119a8ef834d6776eafc900c8d745569 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 12:21:15 -0400 Subject: [PATCH 19/86] TestSealMigration_ShamirToTransit_Pre14 --- .../seal_migration/seal_migration_test.go | 146 +++++++++++++----- 1 file changed, 105 insertions(+), 41 deletions(-) diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 7ea4b4e4ea2e..6e092dba04c6 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -71,45 +71,14 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { defer cleanup() tf(t, logger, storage, 54000) }) - } } -//func TestShamir(t *testing.T) { -// testVariousBackends(t, testShamir, true) -//} -// -//func testShamir( -// t *testing.T, logger hclog.Logger, -// storage teststorage.ReusableStorage, basePort int) { -// -// rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) -// runShamir(t, logger, storage, basePort, rootToken, barrierKeys) -//} -// -//func TestTransit(t *testing.T) { -// testVariousBackends(t, testTransit, true) -//} -// -//func testTransit( -// t *testing.T, logger hclog.Logger, -// storage teststorage.ReusableStorage, basePort int) { -// -// // Create the transit server. -// tss := sealhelper.NewTransitSealServer(t) -// defer tss.Cleanup() -// tss.MakeKey(t, "transit-seal-key") -// -// rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) -// //println("rootToken, recoveryKeys, transitSeal", rootToken, recoveryKeys, transitSeal) -// runTransit(t, logger, storage, basePort, rootToken, transitSeal) -//} - -//--------------------------------------------------------- - // TODO skip enterprise --> merge in to double check func TestSealMigration_TransitToShamir_Pre14(t *testing.T) { + // Note that we do not test integrated raft storage since this is + // a pre-1.4 test. testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false) } @@ -142,7 +111,6 @@ func testSealMigrationTransitToShamir_Pre14( runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) } -// migrateFromTransitToShamir migrates the backend from transit to shamir func migrateFromTransitToShamir_Pre14( t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, @@ -163,6 +131,7 @@ func migrateFromTransitToShamir_Pre14( } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, + NumCores: numTestCores, BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseClusterListenPort: baseClusterPort, SkipInit: true, @@ -189,17 +158,11 @@ func migrateFromTransitToShamir_Pre14( // Unseal and migrate to Shamir. Although we're unsealing using the // recovery keys, this is still an autounseal. - //for _, node := range cluster.Cores { - // nclient := node.Client - // nclient.SetToken(rootToken) - // unsealMigrate(t, nclient, recoveryKeys, true) - //} unsealMigrate(t, client, recoveryKeys, true) testhelpers.WaitForActiveNode(t, cluster) // Wait for migration to finish. Sadly there is no callback, and the - // test will fail later on if we don't do this. TODO Maybe we should add a - // timeout loop of some kind here? + // test will fail later on if we don't do this. time.Sleep(10 * time.Second) // Read the secret @@ -224,6 +187,107 @@ func migrateFromTransitToShamir_Pre14( cluster.EnsureCoresSealed(t) } +func TestSealMigration_ShamirToTransit_Pre14(t *testing.T) { + // Note that we do not test integrated raft storage since this is + // a pre-1.4 test. + testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false) +} + +func testSealMigrationShamirToTransit_Pre14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + // Initialize the backend using shamir + rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) + + // Create the transit server. + tss := sealhelper.NewTransitSealServer(t) + defer func() { + tss.EnsureCoresSealed(t) + tss.Cleanup() + }() + tss.MakeKey(t, "transit-seal-key") + + // Migrate the backend from transit to shamir + transitSeal := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys) + + // Run the backend with transit. + runTransit(t, logger, storage, basePort, rootToken, transitSeal) +} + +func migrateFromShamirToTransit_Pre14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int, + tss *sealhelper.TransitSealServer, rootToken string, keys [][]byte, +) vault.Seal { + + var baseClusterPort = basePort + 10 + + var transitSeal vault.Seal + + var conf = vault.CoreConfig{ + Logger: logger.Named("migrateFromShamirToTransit"), + } + var opts = vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: numTestCores, + BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), + BaseClusterListenPort: baseClusterPort, + SkipInit: true, + // N.B. Providing a transit seal puts us in migration mode. + SealFunc: func() vault.Seal { + // Each core will create its own transit seal here. Later + // on it won't matter which one of these we end up using, since + // they were all created from the same transit key. + transitSeal = tss.MakeSeal(t, "transit-seal-key") + return transitSeal + }, + } + storage.Setup(&conf, &opts) + cluster := vault.NewTestCluster(t, &conf, &opts) + cluster.Start() + defer func() { + storage.Cleanup(t, cluster) + cluster.Cleanup() + }() + + leader := cluster.Cores[0] + client := leader.Client + client.SetToken(rootToken) + + // Unseal with the recovery keys + cluster.RecoveryKeys = keys + for _, core := range cluster.Cores { + cluster.UnsealCore(t, core) + } + testhelpers.WaitForActiveNode(t, cluster) + + // Wait for migration to finish. Sadly there is no callback, and the + // test will fail later on if we don't do this. + time.Sleep(10 * time.Second) + + // Read the secret + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + t.Fatal(diff) + } + + // Make sure the seal configs were updated correctly. + b, r, err := leader.Core.PhysicalSealConfigs(context.Background()) + if err != nil { + t.Fatal(err) + } + verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1) + verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0) + + cluster.EnsureCoresSealed(t) + + return transitSeal +} + func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { for i, key := range keys { From 3939bf3d73c0f1881f0988ec707c077f9f888e3d Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 13:37:41 -0400 Subject: [PATCH 20/86] split for pre-1.4 testing --- .../seal_migration_pre14_test.go | 134 ++++++++++++++++++ .../seal_migration/seal_migration_test.go | 118 +-------------- 2 files changed, 138 insertions(+), 114 deletions(-) create mode 100644 vault/external_tests/seal_migration/seal_migration_pre14_test.go diff --git a/vault/external_tests/seal_migration/seal_migration_pre14_test.go b/vault/external_tests/seal_migration/seal_migration_pre14_test.go new file mode 100644 index 000000000000..1d3f7b9d08bb --- /dev/null +++ b/vault/external_tests/seal_migration/seal_migration_pre14_test.go @@ -0,0 +1,134 @@ +// +build !enterprise + +package seal_migration + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/go-test/deep" + + "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" + "github.com/hashicorp/vault/helper/testhelpers" + sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/vault" +) + +// TestSealMigration_TransitToShamir_Pre14 tests transit-to-shamir seal +// migration, using the pre-1.4 method of bring down the whole cluster to do +// the migration. +func TestSealMigration_TransitToShamir_Pre14(t *testing.T) { + // Note that we do not test integrated raft storage since this is + // a pre-1.4 test. + testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false) +} + +func testSealMigrationTransitToShamir_Pre14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + // Create the transit server. + tss := sealhelper.NewTransitSealServer(t) + defer func() { + if tss != nil { + tss.Cleanup() + } + }() + tss.MakeKey(t, "transit-seal-key") + + // Initialize the backend with transit. + rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + + // Migrate the backend from transit to shamir + migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys) + + // Now that migration is done, we can nuke the transit server, since we + // can unseal without it. + tss.Cleanup() + tss = nil + + // Run the backend with shamir. Note that the recovery keys are now the + // barrier keys. + runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) +} + +func migrateFromTransitToShamir_Pre14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int, + tss *sealhelper.TransitSealServer, transitSeal vault.Seal, + rootToken string, recoveryKeys [][]byte) { + + var baseClusterPort = basePort + 10 + + var conf = vault.CoreConfig{ + Logger: logger.Named("migrateFromTransitToShamir"), + // N.B. Providing an UnwrapSeal puts us in migration mode. This is the + // equivalent of doing the following in HCL: + // seal "transit" { + // // ... + // disabled = "true" + // } + UnwrapSeal: transitSeal, + } + var opts = vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: numTestCores, + BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), + BaseClusterListenPort: baseClusterPort, + SkipInit: true, + } + storage.Setup(&conf, &opts) + cluster := vault.NewTestCluster(t, &conf, &opts) + cluster.Start() + defer func() { + storage.Cleanup(t, cluster) + cluster.Cleanup() + }() + + leader := cluster.Cores[0] + client := leader.Client + client.SetToken(rootToken) + + // Attempt to unseal while the transit server is unreachable. Although + // we're unsealing using the recovery keys, this is still an + // autounseal, so it should fail. + tss.EnsureCoresSealed(t) + unsealMigrate(t, client, recoveryKeys, false) + tss.UnsealCores(t) + testhelpers.WaitForActiveNode(t, tss.TestCluster) + + // Unseal and migrate to Shamir. Although we're unsealing using the + // recovery keys, this is still an autounseal. + unsealMigrate(t, client, recoveryKeys, true) + testhelpers.WaitForActiveNode(t, cluster) + + // Wait for migration to finish. Sadly there is no callback, and the + // test will fail later on if we don't do this. + time.Sleep(10 * time.Second) + + // Read the secret + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + t.Fatal(diff) + } + + // Make sure the seal configs were updated correctly. + b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background()) + if err != nil { + t.Fatal(err) + } + verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1) + if r != nil { + t.Fatalf("expected nil recovery config, got: %#v", r) + } + + cluster.EnsureCoresSealed(t) +} diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 6e092dba04c6..45b6e7046fe8 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -74,122 +74,12 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { } } -// TODO skip enterprise --> merge in to double check - -func TestSealMigration_TransitToShamir_Pre14(t *testing.T) { - // Note that we do not test integrated raft storage since this is - // a pre-1.4 test. - testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false) -} - -func testSealMigrationTransitToShamir_Pre14( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - // Create the transit server. - tss := sealhelper.NewTransitSealServer(t) - defer func() { - if tss != nil { - tss.Cleanup() - } - }() - tss.MakeKey(t, "transit-seal-key") - - // Initialize the backend with transit. - rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss) - - // Migrate the backend from transit to shamir - migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys) - - // Now that migration is done, we can nuke the transit server, since we - // can unseal without it. - tss.Cleanup() - tss = nil - - // Run the backend with shamir. Note that the recovery keys are now the - // barrier keys. - runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) -} - -func migrateFromTransitToShamir_Pre14( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int, - tss *sealhelper.TransitSealServer, transitSeal vault.Seal, - rootToken string, recoveryKeys [][]byte) { - - var baseClusterPort = basePort + 10 - - var conf = vault.CoreConfig{ - Logger: logger.Named("migrateFromTransitToShamir"), - // N.B. Providing an UnwrapSeal puts us in migration mode. This is the - // equivalent of doing the following in HCL: - // seal "transit" { - // // ... - // disabled = "true" - // } - UnwrapSeal: transitSeal, - } - var opts = vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: numTestCores, - BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), - BaseClusterListenPort: baseClusterPort, - SkipInit: true, - } - storage.Setup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() - - leader := cluster.Cores[0] - client := leader.Client - client.SetToken(rootToken) - - // Attempt to unseal while the transit server is unreachable. Although - // we're unsealing using the recovery keys, this is still an - // autounseal, so it should fail. - tss.EnsureCoresSealed(t) - unsealMigrate(t, client, recoveryKeys, false) - tss.UnsealCores(t) - testhelpers.WaitForActiveNode(t, tss.TestCluster) - - // Unseal and migrate to Shamir. Although we're unsealing using the - // recovery keys, this is still an autounseal. - unsealMigrate(t, client, recoveryKeys, true) - testhelpers.WaitForActiveNode(t, cluster) - - // Wait for migration to finish. Sadly there is no callback, and the - // test will fail later on if we don't do this. - time.Sleep(10 * time.Second) - - // Read the secret - secret, err := client.Logical().Read("secret/foo") - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { - t.Fatal(diff) - } - - // Make sure the seal configs were updated correctly. - b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background()) - if err != nil { - t.Fatal(err) - } - verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1) - if r != nil { - t.Fatalf("expected nil recovery config, got: %#v", r) - } - - cluster.EnsureCoresSealed(t) -} - +// TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal +// migration, using the pre-1.4 method of bring down the whole cluster to do +// the migration. func TestSealMigration_ShamirToTransit_Pre14(t *testing.T) { // Note that we do not test integrated raft storage since this is - // a pre-1.4 test. + // a pre-1.4 test. testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false) } From f7da8135838afd2544221b1caabfc2531bb1f851 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 13:56:54 -0400 Subject: [PATCH 21/86] add simple tests for transit and shamir --- .../seal_migration_pre14_test.go | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/vault/external_tests/seal_migration/seal_migration_pre14_test.go b/vault/external_tests/seal_migration/seal_migration_pre14_test.go index 1d3f7b9d08bb..c029ecdf07e7 100644 --- a/vault/external_tests/seal_migration/seal_migration_pre14_test.go +++ b/vault/external_tests/seal_migration/seal_migration_pre14_test.go @@ -132,3 +132,32 @@ func migrateFromTransitToShamir_Pre14( cluster.EnsureCoresSealed(t) } + +func TestShamir(t *testing.T) { + testVariousBackends(t, testShamir, true) +} + +func testShamir( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) + runShamir(t, logger, storage, basePort, rootToken, barrierKeys) +} + +func TestTransit(t *testing.T) { + testVariousBackends(t, testTransit, true) +} + +func testTransit( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + // Create the transit server. + tss := sealhelper.NewTransitSealServer(t) + defer tss.Cleanup() + tss.MakeKey(t, "transit-seal-key") + + rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + runTransit(t, logger, storage, basePort, rootToken, transitSeal) +} From ccc79c930734f1ddeb977ea96294e8e2d3a9ec6d Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 14:26:58 -0400 Subject: [PATCH 22/86] fix typo in test suite --- .../seal_migration_pre14_test.go | 29 ----------------- .../seal_migration/seal_migration_test.go | 31 ++++++++++++++++++- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/vault/external_tests/seal_migration/seal_migration_pre14_test.go b/vault/external_tests/seal_migration/seal_migration_pre14_test.go index c029ecdf07e7..1d3f7b9d08bb 100644 --- a/vault/external_tests/seal_migration/seal_migration_pre14_test.go +++ b/vault/external_tests/seal_migration/seal_migration_pre14_test.go @@ -132,32 +132,3 @@ func migrateFromTransitToShamir_Pre14( cluster.EnsureCoresSealed(t) } - -func TestShamir(t *testing.T) { - testVariousBackends(t, testShamir, true) -} - -func testShamir( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) - runShamir(t, logger, storage, basePort, rootToken, barrierKeys) -} - -func TestTransit(t *testing.T) { - testVariousBackends(t, testTransit, true) -} - -func testTransit( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - // Create the transit server. - tss := sealhelper.NewTransitSealServer(t) - defer tss.Cleanup() - tss.MakeKey(t, "transit-seal-key") - - rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) - runTransit(t, logger, storage, basePort, rootToken, transitSeal) -} diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 45b6e7046fe8..5c5851324fd7 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -80,7 +80,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { func TestSealMigration_ShamirToTransit_Pre14(t *testing.T) { // Note that we do not test integrated raft storage since this is // a pre-1.4 test. - testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false) + testVariousBackends(t, testSealMigrationShamirToTransit_Pre14, false) } func testSealMigrationShamirToTransit_Pre14( @@ -485,3 +485,32 @@ func runTransit( // Seal the cluster cluster.EnsureCoresSealed(t) } + +func TestShamir(t *testing.T) { + testVariousBackends(t, testShamir, true) +} + +func testShamir( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) + runShamir(t, logger, storage, basePort, rootToken, barrierKeys) +} + +func TestTransit(t *testing.T) { + testVariousBackends(t, testTransit, true) +} + +func testTransit( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + // Create the transit server. + tss := sealhelper.NewTransitSealServer(t) + defer tss.Cleanup() + tss.MakeKey(t, "transit-seal-key") + + rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + runTransit(t, logger, storage, basePort, rootToken, transitSeal) +} From e830135895597b0d251a50c640ba94860cb94f4b Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 14:39:31 -0400 Subject: [PATCH 23/86] debug wrapper type --- vault/core.go | 1 + .../seal_migration/seal_migration_test.go | 60 +++++++++---------- 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/vault/core.go b/vault/core.go index 942a4d45c9a7..c99e0e26a544 100644 --- a/vault/core.go +++ b/vault/core.go @@ -1293,6 +1293,7 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover c.migrationInfo.shamirCombinedKey = make([]byte, len(recoveredKey)) copy(c.migrationInfo.shamirCombinedKey, recoveredKey) if seal.StoredKeysSupported() == vaultseal.StoredKeysSupportedShamirMaster { + fmt.Printf("(c *Core) unsealPart zzz %T\n", seal.GetAccess().Wrapper) err = seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(recoveredKey) if err != nil { return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err) diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 5c5851324fd7..7aadddf19d99 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -42,36 +42,36 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { tf(t, logger, storage, 51000) }) - t.Run("file", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("file") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeFileBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 52000) - }) - - t.Run("consul", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 53000) - }) - - if includeRaft { - t.Run("raft", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("raft") - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - defer cleanup() - tf(t, logger, storage, 54000) - }) - } + //t.Run("file", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("file") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeFileBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 52000) + //}) + + //t.Run("consul", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("consul") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeConsulBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 53000) + //}) + + //if includeRaft { + // t.Run("raft", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("raft") + // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + // defer cleanup() + // tf(t, logger, storage, 54000) + // }) + //} } // TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal From 9dd793f1f2def27f05a8b95a2e01f35f074c857c Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 14:50:17 -0400 Subject: [PATCH 24/86] test debug --- vault/core.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vault/core.go b/vault/core.go index c99e0e26a544..29d3bcf52259 100644 --- a/vault/core.go +++ b/vault/core.go @@ -1293,7 +1293,8 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover c.migrationInfo.shamirCombinedKey = make([]byte, len(recoveredKey)) copy(c.migrationInfo.shamirCombinedKey, recoveredKey) if seal.StoredKeysSupported() == vaultseal.StoredKeysSupportedShamirMaster { - fmt.Printf("(c *Core) unsealPart zzz %T\n", seal.GetAccess().Wrapper) + fmt.Printf("test-debug (c *Core) unsealPart zzz %T\n", seal.GetAccess().Wrapper) + fmt.Printf("test-debug (c *Core) unsealPart zzz type %v\n", seal.GetAccess().Wrapper.Type()) err = seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(recoveredKey) if err != nil { return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err) From 23aad624ce9cd67140395b161816075e73f8895e Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 15:39:43 -0400 Subject: [PATCH 25/86] test-debug --- vault/core.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vault/core.go b/vault/core.go index 29d3bcf52259..d0d71a708df1 100644 --- a/vault/core.go +++ b/vault/core.go @@ -2338,6 +2338,9 @@ func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { c.stateLock.Lock() defer c.stateLock.Unlock() + + fmt.Printf("test-debug (c *Core) setSealsForMigration %T %T %T\n", migrationSeal, newSeal, unwrapSeal) + c.unwrapSeal = unwrapSeal if c.unwrapSeal != nil { c.unwrapSeal.SetCore(c) From 784102e1eb19286bbe7ee47334b38f4fe2c00bf0 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 15:45:31 -0400 Subject: [PATCH 26/86] refactor core migration --- command/server_util.go | 110 +++++++++++++++++++ vault/core.go | 239 ++++++++++++++++++++++------------------- 2 files changed, 239 insertions(+), 110 deletions(-) diff --git a/command/server_util.go b/command/server_util.go index dd95e72a9437..5291ba7b7fb2 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -1,8 +1,15 @@ package command import ( + "context" + "errors" + "fmt" + + log "github.com/hashicorp/go-hclog" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/vault" + vaultseal "github.com/hashicorp/vault/vault/seal" ) var ( @@ -11,3 +18,106 @@ var ( func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) { } + +func AdjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal, unwrapSeal vault.Seal) error { + existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background()) + if err != nil { + return fmt.Errorf("Error checking for existing seal: %s", err) + } + + // If we don't have an existing config or if it's the deprecated auto seal + // which needs an upgrade, skip out + if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { + return nil + } + + if unwrapSeal == nil { + // We have the same barrier type and the unwrap seal is nil so we're not + // migrating from same to same, IOW we assume it's not a migration + if existBarrierSealConfig.Type == barrierSeal.BarrierType() { + return nil + } + + // If we're not coming from Shamir, and the existing type doesn't match + // the barrier type, we need both the migration seal and the new seal + if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir { + return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) + } + } else { + if unwrapSeal.BarrierType() == wrapping.Shamir { + return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") + } + } + + if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { + return errors.New(`Recovery seal configuration not found for existing seal`) + } + + var migrationSeal vault.Seal + var newSeal vault.Seal + + // Determine the migrationSeal. This is either going to be an instance of + // shamir or the unwrapSeal. + switch existBarrierSealConfig.Type { + case wrapping.Shamir: + // The value reflected in config is what we're going to + migrationSeal = vault.NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{ + Logger: logger.Named("shamir"), + }), + }) + + default: + // If we're not coming from Shamir we expect the previous seal to be + // in the config and disabled. + migrationSeal = unwrapSeal + } + + // newSeal will be the barrierSeal + newSeal = barrierSeal + + if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { + return errors.New("Migrating between same seal types is currently not supported") + } + + if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() { + // In this case our migration seal is set so we are using it + // (potentially) for unwrapping. Set it on core for that purpose then + // exit. + core.SetSealsForMigration(nil, nil, unwrapSeal) + return nil + } + + // Set the appropriate barrier and recovery configs. + switch { + case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): + // Migrating from auto->auto, copy the configs over + newSeal.SetCachedBarrierConfig(existBarrierSealConfig) + newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) + case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): + // Migrating from auto->shamir, clone auto's recovery config and set + // stored keys to 1. + newSealConfig := existRecoverySealConfig.Clone() + newSealConfig.StoredShares = 1 + newSeal.SetCachedBarrierConfig(newSealConfig) + case newSeal != nil && newSeal.RecoveryKeySupported(): + // Migrating from shamir->auto, set a new barrier config and set + // recovery config to a clone of shamir's barrier config with stored + // keys set to 0. + newBarrierSealConfig := &vault.SealConfig{ + Type: newSeal.BarrierType(), + SecretShares: 1, + SecretThreshold: 1, + StoredShares: 1, + } + newSeal.SetCachedBarrierConfig(newBarrierSealConfig) + + newRecoveryConfig := existBarrierSealConfig.Clone() + newRecoveryConfig.StoredShares = 0 + newSeal.SetCachedRecoveryConfig(newRecoveryConfig) + } + + core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal) + + return nil +} diff --git a/vault/core.go b/vault/core.go index d0d71a708df1..322128598c4e 100644 --- a/vault/core.go +++ b/vault/core.go @@ -943,10 +943,10 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.clusterListener.Store((*cluster.Listener)(nil)) - err = c.adjustForSealMigration(conf.UnwrapSeal) - if err != nil { - return nil, err - } + //err = c.adjustForSealMigration(conf.UnwrapSeal) + //if err != nil { + // return nil, err + //} return c, nil } @@ -2232,115 +2232,134 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi return barrierConf, recoveryConf, nil } -func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { - existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) - if err != nil { - return fmt.Errorf("Error checking for existing seal: %s", err) - } - - // If we don't have an existing config or if it's the deprecated auto seal - // which needs an upgrade, skip out - if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { - return nil - } - - if unwrapSeal == nil { - // We have the same barrier type and the unwrap seal is nil so we're not - // migrating from same to same, IOW we assume it's not a migration - if existBarrierSealConfig.Type == c.seal.BarrierType() { - return nil - } - - // If we're not coming from Shamir, and the existing type doesn't match - // the barrier type, we need both the migration seal and the new seal - if existBarrierSealConfig.Type != wrapping.Shamir && c.seal.BarrierType() != wrapping.Shamir { - return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) - } - } else { - if unwrapSeal.BarrierType() == wrapping.Shamir { - return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") - } - } - - if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { - return errors.New(`Recovery seal configuration not found for existing seal`) - } - - var migrationSeal Seal - var newSeal Seal - - // Determine the migrationSeal. This is either going to be an instance of - // shamir or the unwrapSeal. - switch existBarrierSealConfig.Type { - case wrapping.Shamir: - // The value reflected in config is what we're going to - migrationSeal = NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ - Logger: c.logger.Named("shamir"), - }), - }) - - default: - // If we're not coming from Shamir we expect the previous seal to be - // in the config and disabled. - migrationSeal = unwrapSeal - } - - // newSeal will be the barrierSeal - newSeal = c.seal - - if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { - return errors.New("Migrating between same seal types is currently not supported") - } - - if unwrapSeal != nil && existBarrierSealConfig.Type == c.seal.BarrierType() { - // In this case our migration seal is set so we are using it - // (potentially) for unwrapping. Set it on core for that purpose then - // exit. - c.setSealsForMigration(nil, nil, unwrapSeal) - return nil - } - - // Set the appropriate barrier and recovery configs. - switch { - case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): - // Migrating from auto->auto, copy the configs over - newSeal.SetCachedBarrierConfig(existBarrierSealConfig) - newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) - case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): - // Migrating from auto->shamir, clone auto's recovery config and set - // stored keys to 1. - newSealConfig := existRecoverySealConfig.Clone() - newSealConfig.StoredShares = 1 - newSeal.SetCachedBarrierConfig(newSealConfig) - case newSeal != nil && newSeal.RecoveryKeySupported(): - // Migrating from shamir->auto, set a new barrier config and set - // recovery config to a clone of shamir's barrier config with stored - // keys set to 0. - newBarrierSealConfig := &SealConfig{ - Type: newSeal.BarrierType(), - SecretShares: 1, - SecretThreshold: 1, - StoredShares: 1, - } - newSeal.SetCachedBarrierConfig(newBarrierSealConfig) - - newRecoveryConfig := existBarrierSealConfig.Clone() - newRecoveryConfig.StoredShares = 0 - newSeal.SetCachedRecoveryConfig(newRecoveryConfig) - } - - c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal) - - return nil -} +//func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { +// existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) +// if err != nil { +// return fmt.Errorf("Error checking for existing seal: %s", err) +// } +// +// // If we don't have an existing config or if it's the deprecated auto seal +// // which needs an upgrade, skip out +// if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { +// return nil +// } +// +// if unwrapSeal == nil { +// // We have the same barrier type and the unwrap seal is nil so we're not +// // migrating from same to same, IOW we assume it's not a migration +// if existBarrierSealConfig.Type == c.seal.BarrierType() { +// return nil +// } +// +// // If we're not coming from Shamir, and the existing type doesn't match +// // the barrier type, we need both the migration seal and the new seal +// if existBarrierSealConfig.Type != wrapping.Shamir && c.seal.BarrierType() != wrapping.Shamir { +// return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) +// } +// } else { +// if unwrapSeal.BarrierType() == wrapping.Shamir { +// return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") +// } +// } +// +// if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { +// return errors.New(`Recovery seal configuration not found for existing seal`) +// } +// +// var migrationSeal Seal +// var newSeal Seal +// +// // Determine the migrationSeal. This is either going to be an instance of +// // shamir or the unwrapSeal. +// switch existBarrierSealConfig.Type { +// case wrapping.Shamir: +// // The value reflected in config is what we're going to +// migrationSeal = NewDefaultSeal(&vaultseal.Access{ +// Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ +// Logger: c.logger.Named("shamir"), +// }), +// }) +// +// default: +// // If we're not coming from Shamir we expect the previous seal to be +// // in the config and disabled. +// migrationSeal = unwrapSeal +// } +// +// // newSeal will be the barrierSeal +// newSeal = c.seal +// +// if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { +// return errors.New("Migrating between same seal types is currently not supported") +// } +// +// if unwrapSeal != nil && existBarrierSealConfig.Type == c.seal.BarrierType() { +// // In this case our migration seal is set so we are using it +// // (potentially) for unwrapping. Set it on core for that purpose then +// // exit. +// c.setSealsForMigration(nil, nil, unwrapSeal) +// return nil +// } +// +// // Set the appropriate barrier and recovery configs. +// switch { +// case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): +// // Migrating from auto->auto, copy the configs over +// newSeal.SetCachedBarrierConfig(existBarrierSealConfig) +// newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) +// case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): +// // Migrating from auto->shamir, clone auto's recovery config and set +// // stored keys to 1. +// newSealConfig := existRecoverySealConfig.Clone() +// newSealConfig.StoredShares = 1 +// newSeal.SetCachedBarrierConfig(newSealConfig) +// case newSeal != nil && newSeal.RecoveryKeySupported(): +// // Migrating from shamir->auto, set a new barrier config and set +// // recovery config to a clone of shamir's barrier config with stored +// // keys set to 0. +// newBarrierSealConfig := &SealConfig{ +// Type: newSeal.BarrierType(), +// SecretShares: 1, +// SecretThreshold: 1, +// StoredShares: 1, +// } +// newSeal.SetCachedBarrierConfig(newBarrierSealConfig) +// +// newRecoveryConfig := existBarrierSealConfig.Clone() +// newRecoveryConfig.StoredShares = 0 +// newSeal.SetCachedRecoveryConfig(newRecoveryConfig) +// } +// +// c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal) +// +// return nil +//} -func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { +//func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { +// c.stateLock.Lock() +// defer c.stateLock.Unlock() +// +// fmt.Printf("test-debug (c *Core) setSealsForMigration %T %T %T\n", migrationSeal, newSeal, unwrapSeal) +// +// c.unwrapSeal = unwrapSeal +// if c.unwrapSeal != nil { +// c.unwrapSeal.SetCore(c) +// } +// if newSeal != nil && migrationSeal != nil { +// c.migrationInfo = &migrationInformation{ +// seal: migrationSeal, +// } +// c.migrationInfo.seal.SetCore(c) +// c.seal = newSeal +// c.seal.SetCore(c) +// c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal", "from_barrier_type", c.migrationInfo.seal.BarrierType(), "to_barrier_type", c.seal.BarrierType()) +// c.initSealsForMigration() +// } +//} + +func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { c.stateLock.Lock() defer c.stateLock.Unlock() - - fmt.Printf("test-debug (c *Core) setSealsForMigration %T %T %T\n", migrationSeal, newSeal, unwrapSeal) - c.unwrapSeal = unwrapSeal if c.unwrapSeal != nil { c.unwrapSeal.SetCore(c) From 4ec9dc15d40da3c7aad8acc917b131a9aaa6917c Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 16:04:29 -0400 Subject: [PATCH 27/86] Revert "refactor core migration" This reverts commit a776452d32a9dca7a51e3df4a76b9234d8c0c7ce. --- command/server_util.go | 110 ------------------- vault/core.go | 239 +++++++++++++++++++---------------------- 2 files changed, 110 insertions(+), 239 deletions(-) diff --git a/command/server_util.go b/command/server_util.go index 5291ba7b7fb2..dd95e72a9437 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -1,15 +1,8 @@ package command import ( - "context" - "errors" - "fmt" - - log "github.com/hashicorp/go-hclog" - aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/vault" - vaultseal "github.com/hashicorp/vault/vault/seal" ) var ( @@ -18,106 +11,3 @@ var ( func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) { } - -func AdjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal, unwrapSeal vault.Seal) error { - existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background()) - if err != nil { - return fmt.Errorf("Error checking for existing seal: %s", err) - } - - // If we don't have an existing config or if it's the deprecated auto seal - // which needs an upgrade, skip out - if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { - return nil - } - - if unwrapSeal == nil { - // We have the same barrier type and the unwrap seal is nil so we're not - // migrating from same to same, IOW we assume it's not a migration - if existBarrierSealConfig.Type == barrierSeal.BarrierType() { - return nil - } - - // If we're not coming from Shamir, and the existing type doesn't match - // the barrier type, we need both the migration seal and the new seal - if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir { - return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) - } - } else { - if unwrapSeal.BarrierType() == wrapping.Shamir { - return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") - } - } - - if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { - return errors.New(`Recovery seal configuration not found for existing seal`) - } - - var migrationSeal vault.Seal - var newSeal vault.Seal - - // Determine the migrationSeal. This is either going to be an instance of - // shamir or the unwrapSeal. - switch existBarrierSealConfig.Type { - case wrapping.Shamir: - // The value reflected in config is what we're going to - migrationSeal = vault.NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{ - Logger: logger.Named("shamir"), - }), - }) - - default: - // If we're not coming from Shamir we expect the previous seal to be - // in the config and disabled. - migrationSeal = unwrapSeal - } - - // newSeal will be the barrierSeal - newSeal = barrierSeal - - if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { - return errors.New("Migrating between same seal types is currently not supported") - } - - if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() { - // In this case our migration seal is set so we are using it - // (potentially) for unwrapping. Set it on core for that purpose then - // exit. - core.SetSealsForMigration(nil, nil, unwrapSeal) - return nil - } - - // Set the appropriate barrier and recovery configs. - switch { - case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): - // Migrating from auto->auto, copy the configs over - newSeal.SetCachedBarrierConfig(existBarrierSealConfig) - newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) - case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): - // Migrating from auto->shamir, clone auto's recovery config and set - // stored keys to 1. - newSealConfig := existRecoverySealConfig.Clone() - newSealConfig.StoredShares = 1 - newSeal.SetCachedBarrierConfig(newSealConfig) - case newSeal != nil && newSeal.RecoveryKeySupported(): - // Migrating from shamir->auto, set a new barrier config and set - // recovery config to a clone of shamir's barrier config with stored - // keys set to 0. - newBarrierSealConfig := &vault.SealConfig{ - Type: newSeal.BarrierType(), - SecretShares: 1, - SecretThreshold: 1, - StoredShares: 1, - } - newSeal.SetCachedBarrierConfig(newBarrierSealConfig) - - newRecoveryConfig := existBarrierSealConfig.Clone() - newRecoveryConfig.StoredShares = 0 - newSeal.SetCachedRecoveryConfig(newRecoveryConfig) - } - - core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal) - - return nil -} diff --git a/vault/core.go b/vault/core.go index 322128598c4e..d0d71a708df1 100644 --- a/vault/core.go +++ b/vault/core.go @@ -943,10 +943,10 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.clusterListener.Store((*cluster.Listener)(nil)) - //err = c.adjustForSealMigration(conf.UnwrapSeal) - //if err != nil { - // return nil, err - //} + err = c.adjustForSealMigration(conf.UnwrapSeal) + if err != nil { + return nil, err + } return c, nil } @@ -2232,134 +2232,115 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi return barrierConf, recoveryConf, nil } -//func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { -// existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) -// if err != nil { -// return fmt.Errorf("Error checking for existing seal: %s", err) -// } -// -// // If we don't have an existing config or if it's the deprecated auto seal -// // which needs an upgrade, skip out -// if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { -// return nil -// } -// -// if unwrapSeal == nil { -// // We have the same barrier type and the unwrap seal is nil so we're not -// // migrating from same to same, IOW we assume it's not a migration -// if existBarrierSealConfig.Type == c.seal.BarrierType() { -// return nil -// } -// -// // If we're not coming from Shamir, and the existing type doesn't match -// // the barrier type, we need both the migration seal and the new seal -// if existBarrierSealConfig.Type != wrapping.Shamir && c.seal.BarrierType() != wrapping.Shamir { -// return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) -// } -// } else { -// if unwrapSeal.BarrierType() == wrapping.Shamir { -// return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") -// } -// } -// -// if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { -// return errors.New(`Recovery seal configuration not found for existing seal`) -// } -// -// var migrationSeal Seal -// var newSeal Seal -// -// // Determine the migrationSeal. This is either going to be an instance of -// // shamir or the unwrapSeal. -// switch existBarrierSealConfig.Type { -// case wrapping.Shamir: -// // The value reflected in config is what we're going to -// migrationSeal = NewDefaultSeal(&vaultseal.Access{ -// Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ -// Logger: c.logger.Named("shamir"), -// }), -// }) -// -// default: -// // If we're not coming from Shamir we expect the previous seal to be -// // in the config and disabled. -// migrationSeal = unwrapSeal -// } -// -// // newSeal will be the barrierSeal -// newSeal = c.seal -// -// if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { -// return errors.New("Migrating between same seal types is currently not supported") -// } -// -// if unwrapSeal != nil && existBarrierSealConfig.Type == c.seal.BarrierType() { -// // In this case our migration seal is set so we are using it -// // (potentially) for unwrapping. Set it on core for that purpose then -// // exit. -// c.setSealsForMigration(nil, nil, unwrapSeal) -// return nil -// } -// -// // Set the appropriate barrier and recovery configs. -// switch { -// case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): -// // Migrating from auto->auto, copy the configs over -// newSeal.SetCachedBarrierConfig(existBarrierSealConfig) -// newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) -// case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): -// // Migrating from auto->shamir, clone auto's recovery config and set -// // stored keys to 1. -// newSealConfig := existRecoverySealConfig.Clone() -// newSealConfig.StoredShares = 1 -// newSeal.SetCachedBarrierConfig(newSealConfig) -// case newSeal != nil && newSeal.RecoveryKeySupported(): -// // Migrating from shamir->auto, set a new barrier config and set -// // recovery config to a clone of shamir's barrier config with stored -// // keys set to 0. -// newBarrierSealConfig := &SealConfig{ -// Type: newSeal.BarrierType(), -// SecretShares: 1, -// SecretThreshold: 1, -// StoredShares: 1, -// } -// newSeal.SetCachedBarrierConfig(newBarrierSealConfig) -// -// newRecoveryConfig := existBarrierSealConfig.Clone() -// newRecoveryConfig.StoredShares = 0 -// newSeal.SetCachedRecoveryConfig(newRecoveryConfig) -// } -// -// c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal) -// -// return nil -//} +func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { + existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) + if err != nil { + return fmt.Errorf("Error checking for existing seal: %s", err) + } -//func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { -// c.stateLock.Lock() -// defer c.stateLock.Unlock() -// -// fmt.Printf("test-debug (c *Core) setSealsForMigration %T %T %T\n", migrationSeal, newSeal, unwrapSeal) -// -// c.unwrapSeal = unwrapSeal -// if c.unwrapSeal != nil { -// c.unwrapSeal.SetCore(c) -// } -// if newSeal != nil && migrationSeal != nil { -// c.migrationInfo = &migrationInformation{ -// seal: migrationSeal, -// } -// c.migrationInfo.seal.SetCore(c) -// c.seal = newSeal -// c.seal.SetCore(c) -// c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal", "from_barrier_type", c.migrationInfo.seal.BarrierType(), "to_barrier_type", c.seal.BarrierType()) -// c.initSealsForMigration() -// } -//} - -func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { + // If we don't have an existing config or if it's the deprecated auto seal + // which needs an upgrade, skip out + if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { + return nil + } + + if unwrapSeal == nil { + // We have the same barrier type and the unwrap seal is nil so we're not + // migrating from same to same, IOW we assume it's not a migration + if existBarrierSealConfig.Type == c.seal.BarrierType() { + return nil + } + + // If we're not coming from Shamir, and the existing type doesn't match + // the barrier type, we need both the migration seal and the new seal + if existBarrierSealConfig.Type != wrapping.Shamir && c.seal.BarrierType() != wrapping.Shamir { + return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) + } + } else { + if unwrapSeal.BarrierType() == wrapping.Shamir { + return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") + } + } + + if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { + return errors.New(`Recovery seal configuration not found for existing seal`) + } + + var migrationSeal Seal + var newSeal Seal + + // Determine the migrationSeal. This is either going to be an instance of + // shamir or the unwrapSeal. + switch existBarrierSealConfig.Type { + case wrapping.Shamir: + // The value reflected in config is what we're going to + migrationSeal = NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: c.logger.Named("shamir"), + }), + }) + + default: + // If we're not coming from Shamir we expect the previous seal to be + // in the config and disabled. + migrationSeal = unwrapSeal + } + + // newSeal will be the barrierSeal + newSeal = c.seal + + if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { + return errors.New("Migrating between same seal types is currently not supported") + } + + if unwrapSeal != nil && existBarrierSealConfig.Type == c.seal.BarrierType() { + // In this case our migration seal is set so we are using it + // (potentially) for unwrapping. Set it on core for that purpose then + // exit. + c.setSealsForMigration(nil, nil, unwrapSeal) + return nil + } + + // Set the appropriate barrier and recovery configs. + switch { + case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): + // Migrating from auto->auto, copy the configs over + newSeal.SetCachedBarrierConfig(existBarrierSealConfig) + newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) + case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): + // Migrating from auto->shamir, clone auto's recovery config and set + // stored keys to 1. + newSealConfig := existRecoverySealConfig.Clone() + newSealConfig.StoredShares = 1 + newSeal.SetCachedBarrierConfig(newSealConfig) + case newSeal != nil && newSeal.RecoveryKeySupported(): + // Migrating from shamir->auto, set a new barrier config and set + // recovery config to a clone of shamir's barrier config with stored + // keys set to 0. + newBarrierSealConfig := &SealConfig{ + Type: newSeal.BarrierType(), + SecretShares: 1, + SecretThreshold: 1, + StoredShares: 1, + } + newSeal.SetCachedBarrierConfig(newBarrierSealConfig) + + newRecoveryConfig := existBarrierSealConfig.Clone() + newRecoveryConfig.StoredShares = 0 + newSeal.SetCachedRecoveryConfig(newRecoveryConfig) + } + + c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal) + + return nil +} + +func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { c.stateLock.Lock() defer c.stateLock.Unlock() + + fmt.Printf("test-debug (c *Core) setSealsForMigration %T %T %T\n", migrationSeal, newSeal, unwrapSeal) + c.unwrapSeal = unwrapSeal if c.unwrapSeal != nil { c.unwrapSeal.SetCore(c) From e5f0d0fc5fb9cee9a778c0345f5faf7b9e8acfc4 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 16:05:44 -0400 Subject: [PATCH 28/86] begin refactor of adjustForSealMigration --- vault/core.go | 248 +++++++++++++++++++++++++------------------------- 1 file changed, 124 insertions(+), 124 deletions(-) diff --git a/vault/core.go b/vault/core.go index d0d71a708df1..af54af17a5de 100644 --- a/vault/core.go +++ b/vault/core.go @@ -2232,130 +2232,130 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi return barrierConf, recoveryConf, nil } -func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { - existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) - if err != nil { - return fmt.Errorf("Error checking for existing seal: %s", err) - } - - // If we don't have an existing config or if it's the deprecated auto seal - // which needs an upgrade, skip out - if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { - return nil - } - - if unwrapSeal == nil { - // We have the same barrier type and the unwrap seal is nil so we're not - // migrating from same to same, IOW we assume it's not a migration - if existBarrierSealConfig.Type == c.seal.BarrierType() { - return nil - } - - // If we're not coming from Shamir, and the existing type doesn't match - // the barrier type, we need both the migration seal and the new seal - if existBarrierSealConfig.Type != wrapping.Shamir && c.seal.BarrierType() != wrapping.Shamir { - return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) - } - } else { - if unwrapSeal.BarrierType() == wrapping.Shamir { - return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") - } - } - - if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { - return errors.New(`Recovery seal configuration not found for existing seal`) - } - - var migrationSeal Seal - var newSeal Seal - - // Determine the migrationSeal. This is either going to be an instance of - // shamir or the unwrapSeal. - switch existBarrierSealConfig.Type { - case wrapping.Shamir: - // The value reflected in config is what we're going to - migrationSeal = NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ - Logger: c.logger.Named("shamir"), - }), - }) - - default: - // If we're not coming from Shamir we expect the previous seal to be - // in the config and disabled. - migrationSeal = unwrapSeal - } - - // newSeal will be the barrierSeal - newSeal = c.seal - - if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { - return errors.New("Migrating between same seal types is currently not supported") - } - - if unwrapSeal != nil && existBarrierSealConfig.Type == c.seal.BarrierType() { - // In this case our migration seal is set so we are using it - // (potentially) for unwrapping. Set it on core for that purpose then - // exit. - c.setSealsForMigration(nil, nil, unwrapSeal) - return nil - } - - // Set the appropriate barrier and recovery configs. - switch { - case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): - // Migrating from auto->auto, copy the configs over - newSeal.SetCachedBarrierConfig(existBarrierSealConfig) - newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) - case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): - // Migrating from auto->shamir, clone auto's recovery config and set - // stored keys to 1. - newSealConfig := existRecoverySealConfig.Clone() - newSealConfig.StoredShares = 1 - newSeal.SetCachedBarrierConfig(newSealConfig) - case newSeal != nil && newSeal.RecoveryKeySupported(): - // Migrating from shamir->auto, set a new barrier config and set - // recovery config to a clone of shamir's barrier config with stored - // keys set to 0. - newBarrierSealConfig := &SealConfig{ - Type: newSeal.BarrierType(), - SecretShares: 1, - SecretThreshold: 1, - StoredShares: 1, - } - newSeal.SetCachedBarrierConfig(newBarrierSealConfig) - - newRecoveryConfig := existBarrierSealConfig.Clone() - newRecoveryConfig.StoredShares = 0 - newSeal.SetCachedRecoveryConfig(newRecoveryConfig) - } - - c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal) - - return nil -} - -func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { - c.stateLock.Lock() - defer c.stateLock.Unlock() - - fmt.Printf("test-debug (c *Core) setSealsForMigration %T %T %T\n", migrationSeal, newSeal, unwrapSeal) - - c.unwrapSeal = unwrapSeal - if c.unwrapSeal != nil { - c.unwrapSeal.SetCore(c) - } - if newSeal != nil && migrationSeal != nil { - c.migrationInfo = &migrationInformation{ - seal: migrationSeal, - } - c.migrationInfo.seal.SetCore(c) - c.seal = newSeal - c.seal.SetCore(c) - c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal", "from_barrier_type", c.migrationInfo.seal.BarrierType(), "to_barrier_type", c.seal.BarrierType()) - c.initSealsForMigration() - } -} +//func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { +// existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) +// if err != nil { +// return fmt.Errorf("Error checking for existing seal: %s", err) +// } +// +// // If we don't have an existing config or if it's the deprecated auto seal +// // which needs an upgrade, skip out +// if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { +// return nil +// } +// +// if unwrapSeal == nil { +// // We have the same barrier type and the unwrap seal is nil so we're not +// // migrating from same to same, IOW we assume it's not a migration +// if existBarrierSealConfig.Type == c.seal.BarrierType() { +// return nil +// } +// +// // If we're not coming from Shamir, and the existing type doesn't match +// // the barrier type, we need both the migration seal and the new seal +// if existBarrierSealConfig.Type != wrapping.Shamir && c.seal.BarrierType() != wrapping.Shamir { +// return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) +// } +// } else { +// if unwrapSeal.BarrierType() == wrapping.Shamir { +// return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") +// } +// } +// +// if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { +// return errors.New(`Recovery seal configuration not found for existing seal`) +// } +// +// var migrationSeal Seal +// var newSeal Seal +// +// // Determine the migrationSeal. This is either going to be an instance of +// // shamir or the unwrapSeal. +// switch existBarrierSealConfig.Type { +// case wrapping.Shamir: +// // The value reflected in config is what we're going to +// migrationSeal = NewDefaultSeal(&vaultseal.Access{ +// Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ +// Logger: c.logger.Named("shamir"), +// }), +// }) +// +// default: +// // If we're not coming from Shamir we expect the previous seal to be +// // in the config and disabled. +// migrationSeal = unwrapSeal +// } +// +// // newSeal will be the barrierSeal +// newSeal = c.seal +// +// if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { +// return errors.New("Migrating between same seal types is currently not supported") +// } +// +// if unwrapSeal != nil && existBarrierSealConfig.Type == c.seal.BarrierType() { +// // In this case our migration seal is set so we are using it +// // (potentially) for unwrapping. Set it on core for that purpose then +// // exit. +// c.setSealsForMigration(nil, nil, unwrapSeal) +// return nil +// } +// +// // Set the appropriate barrier and recovery configs. +// switch { +// case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): +// // Migrating from auto->auto, copy the configs over +// newSeal.SetCachedBarrierConfig(existBarrierSealConfig) +// newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) +// case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): +// // Migrating from auto->shamir, clone auto's recovery config and set +// // stored keys to 1. +// newSealConfig := existRecoverySealConfig.Clone() +// newSealConfig.StoredShares = 1 +// newSeal.SetCachedBarrierConfig(newSealConfig) +// case newSeal != nil && newSeal.RecoveryKeySupported(): +// // Migrating from shamir->auto, set a new barrier config and set +// // recovery config to a clone of shamir's barrier config with stored +// // keys set to 0. +// newBarrierSealConfig := &SealConfig{ +// Type: newSeal.BarrierType(), +// SecretShares: 1, +// SecretThreshold: 1, +// StoredShares: 1, +// } +// newSeal.SetCachedBarrierConfig(newBarrierSealConfig) +// +// newRecoveryConfig := existBarrierSealConfig.Clone() +// newRecoveryConfig.StoredShares = 0 +// newSeal.SetCachedRecoveryConfig(newRecoveryConfig) +// } +// +// c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal) +// +// return nil +//} +// +//func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { +// c.stateLock.Lock() +// defer c.stateLock.Unlock() +// +// fmt.Printf("test-debug (c *Core) setSealsForMigration %T %T %T\n", migrationSeal, newSeal, unwrapSeal) +// +// c.unwrapSeal = unwrapSeal +// if c.unwrapSeal != nil { +// c.unwrapSeal.SetCore(c) +// } +// if newSeal != nil && migrationSeal != nil { +// c.migrationInfo = &migrationInformation{ +// seal: migrationSeal, +// } +// c.migrationInfo.seal.SetCore(c) +// c.seal = newSeal +// c.seal.SetCore(c) +// c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal", "from_barrier_type", c.migrationInfo.seal.BarrierType(), "to_barrier_type", c.seal.BarrierType()) +// c.initSealsForMigration() +// } +//} // unsealKeyToMasterKey takes a key provided by the user, either a recovery key // if using an autoseal or an unseal key with Shamir. It returns a nil error From 559ddccb4777b6bb4c4e9a851e8e915b933bf7ed Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 16:16:37 -0400 Subject: [PATCH 29/86] fix bug in adjustForSealMigration --- vault/core.go | 124 +++++++++++++++++- .../seal_migration/seal_migration_test.go | 7 +- 2 files changed, 125 insertions(+), 6 deletions(-) diff --git a/vault/core.go b/vault/core.go index af54af17a5de..d2460d60082c 100644 --- a/vault/core.go +++ b/vault/core.go @@ -943,7 +943,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.clusterListener.Store((*cluster.Listener)(nil)) - err = c.adjustForSealMigration(conf.UnwrapSeal) + err = adjustForSealMigration(c.logger, c, c.seal, conf.UnwrapSeal) if err != nil { return nil, err } @@ -2232,6 +2232,128 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi return barrierConf, recoveryConf, nil } +func adjustForSealMigration(logger log.Logger, core *Core, barrierSeal, unwrapSeal Seal) error { + existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background()) + if err != nil { + return fmt.Errorf("Error checking for existing seal: %s", err) + } + + // If we don't have an existing config or if it's the deprecated auto seal + // which needs an upgrade, skip out + if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { + return nil + } + + if unwrapSeal == nil { + // We have the same barrier type and the unwrap seal is nil so we're not + // migrating from same to same, IOW we assume it's not a migration + if existBarrierSealConfig.Type == barrierSeal.BarrierType() { + return nil + } + + // If we're not coming from Shamir, and the existing type doesn't match + // the barrier type, we need both the migration seal and the new seal + if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir { + return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) + } + } else { + if unwrapSeal.BarrierType() == wrapping.Shamir { + return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") + } + } + + if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { + return errors.New(`Recovery seal configuration not found for existing seal`) + } + + var migrationSeal Seal + var newSeal Seal + + // Determine the migrationSeal. This is either going to be an instance of + // shamir or the unwrapSeal. + switch existBarrierSealConfig.Type { + case wrapping.Shamir: + // The value reflected in config is what we're going to + migrationSeal = NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{ + Logger: logger.Named("shamir"), + }), + }) + + default: + // If we're not coming from Shamir we expect the previous seal to be + // in the config and disabled. + migrationSeal = unwrapSeal + } + + // newSeal will be the barrierSeal + newSeal = barrierSeal + + if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { + return errors.New("Migrating between same seal types is currently not supported") + } + + if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() { + // In this case our migration seal is set so we are using it + // (potentially) for unwrapping. Set it on core for that purpose then + // exit. + core.SetSealsForMigration(nil, nil, unwrapSeal) + return nil + } + + // Set the appropriate barrier and recovery configs. + switch { + case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): + // Migrating from auto->auto, copy the configs over + newSeal.SetCachedBarrierConfig(existBarrierSealConfig) + newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) + case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): + // Migrating from auto->shamir, clone auto's recovery config and set + // stored keys to 1. + newSealConfig := existRecoverySealConfig.Clone() + newSealConfig.StoredShares = 1 + newSeal.SetCachedBarrierConfig(newSealConfig) + case newSeal != nil && newSeal.RecoveryKeySupported(): + // Migrating from shamir->auto, set a new barrier config and set + // recovery config to a clone of shamir's barrier config with stored + // keys set to 0. + newBarrierSealConfig := &SealConfig{ + Type: newSeal.BarrierType(), + SecretShares: 1, + SecretThreshold: 1, + StoredShares: 1, + } + newSeal.SetCachedBarrierConfig(newBarrierSealConfig) + + newRecoveryConfig := existBarrierSealConfig.Clone() + newRecoveryConfig.StoredShares = 0 + newSeal.SetCachedRecoveryConfig(newRecoveryConfig) + } + + core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal) + + return nil +} + +func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { + c.stateLock.Lock() + defer c.stateLock.Unlock() + c.unwrapSeal = unwrapSeal + if c.unwrapSeal != nil { + c.unwrapSeal.SetCore(c) + } + if newSeal != nil && migrationSeal != nil { + c.migrationInfo = &migrationInformation{ + seal: migrationSeal, + } + c.migrationInfo.seal.SetCore(c) + c.seal = newSeal + c.seal.SetCore(c) + c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal", "from_barrier_type", c.migrationInfo.seal.BarrierType(), "to_barrier_type", c.seal.BarrierType()) + c.initSealsForMigration() + } +} + //func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { // existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) // if err != nil { diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 7aadddf19d99..95ee62fbfbf6 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -363,7 +363,8 @@ func initializeTransit( storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer) (string, [][]byte, vault.Seal) { - var transitSeal vault.Seal + //var transitSeal vault.Seal + transitSeal := tss.MakeSeal(t, "transit-seal-key") var baseClusterPort = basePort + 10 @@ -377,10 +378,6 @@ func initializeTransit( BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseClusterListenPort: baseClusterPort, SealFunc: func() vault.Seal { - // Each core will create its own transit seal here. Later - // on it won't matter which one of these we end up using, since - // they were all created from the same transit key. - transitSeal = tss.MakeSeal(t, "transit-seal-key") return transitSeal }, } From 1293273df3d9daad45ad90ddb351372e87a4402d Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 16:41:15 -0400 Subject: [PATCH 30/86] clean up tests --- vault/core.go | 2 - .../seal_migration/seal_migration_test.go | 67 ++++++++++--------- 2 files changed, 35 insertions(+), 34 deletions(-) diff --git a/vault/core.go b/vault/core.go index d2460d60082c..67a00d98825b 100644 --- a/vault/core.go +++ b/vault/core.go @@ -1293,8 +1293,6 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover c.migrationInfo.shamirCombinedKey = make([]byte, len(recoveredKey)) copy(c.migrationInfo.shamirCombinedKey, recoveredKey) if seal.StoredKeysSupported() == vaultseal.StoredKeysSupportedShamirMaster { - fmt.Printf("test-debug (c *Core) unsealPart zzz %T\n", seal.GetAccess().Wrapper) - fmt.Printf("test-debug (c *Core) unsealPart zzz type %v\n", seal.GetAccess().Wrapper.Type()) err = seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(recoveredKey) if err != nil { return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err) diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 95ee62fbfbf6..5c5851324fd7 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -42,36 +42,36 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { tf(t, logger, storage, 51000) }) - //t.Run("file", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("file") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeFileBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 52000) - //}) - - //t.Run("consul", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("consul") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeConsulBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 53000) - //}) - - //if includeRaft { - // t.Run("raft", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("raft") - // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - // defer cleanup() - // tf(t, logger, storage, 54000) - // }) - //} + t.Run("file", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("file") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeFileBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 52000) + }) + + t.Run("consul", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) + }) + + if includeRaft { + t.Run("raft", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("raft") + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + defer cleanup() + tf(t, logger, storage, 54000) + }) + } } // TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal @@ -363,8 +363,7 @@ func initializeTransit( storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer) (string, [][]byte, vault.Seal) { - //var transitSeal vault.Seal - transitSeal := tss.MakeSeal(t, "transit-seal-key") + var transitSeal vault.Seal var baseClusterPort = basePort + 10 @@ -378,6 +377,10 @@ func initializeTransit( BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseClusterListenPort: baseClusterPort, SealFunc: func() vault.Seal { + // Each core will create its own transit seal here. Later + // on it won't matter which one of these we end up using, since + // they were all created from the same transit key. + transitSeal = tss.MakeSeal(t, "transit-seal-key") return transitSeal }, } From f7f49eea5045d132b1f073947047c4b1a9e3356e Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 17:00:15 -0400 Subject: [PATCH 31/86] clean up core refactoring --- vault/core.go | 142 ++---------------- .../seal_migration/seal_migration_test.go | 20 ++- 2 files changed, 22 insertions(+), 140 deletions(-) diff --git a/vault/core.go b/vault/core.go index 67a00d98825b..888c547ba3f2 100644 --- a/vault/core.go +++ b/vault/core.go @@ -943,7 +943,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.clusterListener.Store((*cluster.Listener)(nil)) - err = adjustForSealMigration(c.logger, c, c.seal, conf.UnwrapSeal) + err = c.adjustForSealMigration(conf.UnwrapSeal) if err != nil { return nil, err } @@ -2230,8 +2230,11 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi return barrierConf, recoveryConf, nil } -func adjustForSealMigration(logger log.Logger, core *Core, barrierSeal, unwrapSeal Seal) error { - existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background()) +func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { + + barrierSeal := c.seal + + existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) if err != nil { return fmt.Errorf("Error checking for existing seal: %s", err) } @@ -2274,7 +2277,7 @@ func adjustForSealMigration(logger log.Logger, core *Core, barrierSeal, unwrapSe // The value reflected in config is what we're going to migrationSeal = NewDefaultSeal(&vaultseal.Access{ Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{ - Logger: logger.Named("shamir"), + Logger: c.logger.Named("shamir"), }), }) @@ -2295,7 +2298,7 @@ func adjustForSealMigration(logger log.Logger, core *Core, barrierSeal, unwrapSe // In this case our migration seal is set so we are using it // (potentially) for unwrapping. Set it on core for that purpose then // exit. - core.SetSealsForMigration(nil, nil, unwrapSeal) + c.setSealsForMigration(nil, nil, unwrapSeal) return nil } @@ -2328,12 +2331,12 @@ func adjustForSealMigration(logger log.Logger, core *Core, barrierSeal, unwrapSe newSeal.SetCachedRecoveryConfig(newRecoveryConfig) } - core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal) + c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal) return nil } -func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { +func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { c.stateLock.Lock() defer c.stateLock.Unlock() c.unwrapSeal = unwrapSeal @@ -2352,131 +2355,6 @@ func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { } } -//func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { -// existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background()) -// if err != nil { -// return fmt.Errorf("Error checking for existing seal: %s", err) -// } -// -// // If we don't have an existing config or if it's the deprecated auto seal -// // which needs an upgrade, skip out -// if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { -// return nil -// } -// -// if unwrapSeal == nil { -// // We have the same barrier type and the unwrap seal is nil so we're not -// // migrating from same to same, IOW we assume it's not a migration -// if existBarrierSealConfig.Type == c.seal.BarrierType() { -// return nil -// } -// -// // If we're not coming from Shamir, and the existing type doesn't match -// // the barrier type, we need both the migration seal and the new seal -// if existBarrierSealConfig.Type != wrapping.Shamir && c.seal.BarrierType() != wrapping.Shamir { -// return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) -// } -// } else { -// if unwrapSeal.BarrierType() == wrapping.Shamir { -// return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") -// } -// } -// -// if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { -// return errors.New(`Recovery seal configuration not found for existing seal`) -// } -// -// var migrationSeal Seal -// var newSeal Seal -// -// // Determine the migrationSeal. This is either going to be an instance of -// // shamir or the unwrapSeal. -// switch existBarrierSealConfig.Type { -// case wrapping.Shamir: -// // The value reflected in config is what we're going to -// migrationSeal = NewDefaultSeal(&vaultseal.Access{ -// Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ -// Logger: c.logger.Named("shamir"), -// }), -// }) -// -// default: -// // If we're not coming from Shamir we expect the previous seal to be -// // in the config and disabled. -// migrationSeal = unwrapSeal -// } -// -// // newSeal will be the barrierSeal -// newSeal = c.seal -// -// if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() { -// return errors.New("Migrating between same seal types is currently not supported") -// } -// -// if unwrapSeal != nil && existBarrierSealConfig.Type == c.seal.BarrierType() { -// // In this case our migration seal is set so we are using it -// // (potentially) for unwrapping. Set it on core for that purpose then -// // exit. -// c.setSealsForMigration(nil, nil, unwrapSeal) -// return nil -// } -// -// // Set the appropriate barrier and recovery configs. -// switch { -// case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): -// // Migrating from auto->auto, copy the configs over -// newSeal.SetCachedBarrierConfig(existBarrierSealConfig) -// newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) -// case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported(): -// // Migrating from auto->shamir, clone auto's recovery config and set -// // stored keys to 1. -// newSealConfig := existRecoverySealConfig.Clone() -// newSealConfig.StoredShares = 1 -// newSeal.SetCachedBarrierConfig(newSealConfig) -// case newSeal != nil && newSeal.RecoveryKeySupported(): -// // Migrating from shamir->auto, set a new barrier config and set -// // recovery config to a clone of shamir's barrier config with stored -// // keys set to 0. -// newBarrierSealConfig := &SealConfig{ -// Type: newSeal.BarrierType(), -// SecretShares: 1, -// SecretThreshold: 1, -// StoredShares: 1, -// } -// newSeal.SetCachedBarrierConfig(newBarrierSealConfig) -// -// newRecoveryConfig := existBarrierSealConfig.Clone() -// newRecoveryConfig.StoredShares = 0 -// newSeal.SetCachedRecoveryConfig(newRecoveryConfig) -// } -// -// c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal) -// -// return nil -//} -// -//func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { -// c.stateLock.Lock() -// defer c.stateLock.Unlock() -// -// fmt.Printf("test-debug (c *Core) setSealsForMigration %T %T %T\n", migrationSeal, newSeal, unwrapSeal) -// -// c.unwrapSeal = unwrapSeal -// if c.unwrapSeal != nil { -// c.unwrapSeal.SetCore(c) -// } -// if newSeal != nil && migrationSeal != nil { -// c.migrationInfo = &migrationInformation{ -// seal: migrationSeal, -// } -// c.migrationInfo.seal.SetCore(c) -// c.seal = newSeal -// c.seal.SetCore(c) -// c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal", "from_barrier_type", c.migrationInfo.seal.BarrierType(), "to_barrier_type", c.seal.BarrierType()) -// c.initSealsForMigration() -// } -//} - // unsealKeyToMasterKey takes a key provided by the user, either a recovery key // if using an autoseal or an unseal key with Shamir. It returns a nil error // if the key is valid and an error otherwise. It also returns the master key diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index 5c5851324fd7..faa78e32d112 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -52,15 +52,15 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { tf(t, logger, storage, 52000) }) - t.Run("consul", func(t *testing.T) { - t.Parallel() + //t.Run("consul", func(t *testing.T) { + // t.Parallel() - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 53000) - }) + // logger := logger.Named("consul") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeConsulBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 53000) + //}) if includeRaft { t.Run("raft", func(t *testing.T) { @@ -486,6 +486,8 @@ func runTransit( cluster.EnsureCoresSealed(t) } +// TestShamir is a temporary test that exercises the reusable raft storage. +// It will be replace once we do the post-1.4 migration testing. func TestShamir(t *testing.T) { testVariousBackends(t, testShamir, true) } @@ -498,6 +500,8 @@ func testShamir( runShamir(t, logger, storage, basePort, rootToken, barrierKeys) } +// TestTransit is a temporary test that exercises the reusable raft storage. +// It will be replace once we do the post-1.4 migration testing. func TestTransit(t *testing.T) { testVariousBackends(t, testTransit, true) } From e2dd3d615bc3805d1104e517522f1ea543288070 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 26 May 2020 18:28:13 -0400 Subject: [PATCH 32/86] fix bug in shamir->transit migration --- .../seal_migration_pre14_test.go | 2 +- .../seal_migration/seal_migration_test.go | 29 +++++++++---------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/vault/external_tests/seal_migration/seal_migration_pre14_test.go b/vault/external_tests/seal_migration/seal_migration_pre14_test.go index 1d3f7b9d08bb..f8af83a2dd5d 100644 --- a/vault/external_tests/seal_migration/seal_migration_pre14_test.go +++ b/vault/external_tests/seal_migration/seal_migration_pre14_test.go @@ -52,7 +52,7 @@ func testSealMigrationTransitToShamir_Pre14( tss.Cleanup() tss = nil - // Run the backend with shamir. Note that the recovery keys are now the + // Run the backend with shamir. Note that the recovery keys are now the // barrier keys. runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) } diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go index faa78e32d112..4dd49eec3ec8 100644 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ b/vault/external_tests/seal_migration/seal_migration_test.go @@ -52,15 +52,15 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { tf(t, logger, storage, 52000) }) - //t.Run("consul", func(t *testing.T) { - // t.Parallel() + t.Run("consul", func(t *testing.T) { + t.Parallel() - // logger := logger.Named("consul") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeConsulBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 53000) - //}) + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) + }) if includeRaft { t.Run("raft", func(t *testing.T) { @@ -98,7 +98,8 @@ func testSealMigrationShamirToTransit_Pre14( }() tss.MakeKey(t, "transit-seal-key") - // Migrate the backend from transit to shamir + // Migrate the backend from shamir to transit. Note that the barrier keys + // are now the recovery keys. transitSeal := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys) // Run the backend with transit. @@ -108,7 +109,7 @@ func testSealMigrationShamirToTransit_Pre14( func migrateFromShamirToTransit_Pre14( t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, - tss *sealhelper.TransitSealServer, rootToken string, keys [][]byte, + tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte, ) vault.Seal { var baseClusterPort = basePort + 10 @@ -145,12 +146,8 @@ func migrateFromShamirToTransit_Pre14( client := leader.Client client.SetToken(rootToken) - // Unseal with the recovery keys - cluster.RecoveryKeys = keys - for _, core := range cluster.Cores { - cluster.UnsealCore(t, core) - } - testhelpers.WaitForActiveNode(t, cluster) + // Unseal and migrate to Transit. + unsealMigrate(t, client, recoveryKeys, true) // Wait for migration to finish. Sadly there is no callback, and the // test will fail later on if we don't do this. From 8c44144900bc9941596d986f9ab9b66a5fbf4323 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Mon, 8 Jun 2020 14:10:58 -0400 Subject: [PATCH 33/86] remove unnecessary lock from setSealsForMigration() --- vault/core.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/vault/core.go b/vault/core.go index 888c547ba3f2..98c0241d9dca 100644 --- a/vault/core.go +++ b/vault/core.go @@ -2337,8 +2337,6 @@ func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { } func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.unwrapSeal = unwrapSeal if c.unwrapSeal != nil { c.unwrapSeal.SetCore(c) From 149bd7f7413e6bf026ec6c1de4244d9717af4a17 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 11 Jun 2020 09:01:24 -0400 Subject: [PATCH 34/86] rename sealmigration test package --- .../seal_migration_pre14_test.go | 134 ----- .../seal_migration/seal_migration_test.go | 517 ------------------ .../sealmigration/seal_migration_test.go | 24 +- 3 files changed, 12 insertions(+), 663 deletions(-) delete mode 100644 vault/external_tests/seal_migration/seal_migration_pre14_test.go delete mode 100644 vault/external_tests/seal_migration/seal_migration_test.go diff --git a/vault/external_tests/seal_migration/seal_migration_pre14_test.go b/vault/external_tests/seal_migration/seal_migration_pre14_test.go deleted file mode 100644 index f8af83a2dd5d..000000000000 --- a/vault/external_tests/seal_migration/seal_migration_pre14_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// +build !enterprise - -package seal_migration - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/go-test/deep" - - "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping" - "github.com/hashicorp/vault/helper/testhelpers" - sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal" - "github.com/hashicorp/vault/helper/testhelpers/teststorage" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/vault" -) - -// TestSealMigration_TransitToShamir_Pre14 tests transit-to-shamir seal -// migration, using the pre-1.4 method of bring down the whole cluster to do -// the migration. -func TestSealMigration_TransitToShamir_Pre14(t *testing.T) { - // Note that we do not test integrated raft storage since this is - // a pre-1.4 test. - testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false) -} - -func testSealMigrationTransitToShamir_Pre14( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - // Create the transit server. - tss := sealhelper.NewTransitSealServer(t) - defer func() { - if tss != nil { - tss.Cleanup() - } - }() - tss.MakeKey(t, "transit-seal-key") - - // Initialize the backend with transit. - rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss) - - // Migrate the backend from transit to shamir - migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys) - - // Now that migration is done, we can nuke the transit server, since we - // can unseal without it. - tss.Cleanup() - tss = nil - - // Run the backend with shamir. Note that the recovery keys are now the - // barrier keys. - runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) -} - -func migrateFromTransitToShamir_Pre14( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int, - tss *sealhelper.TransitSealServer, transitSeal vault.Seal, - rootToken string, recoveryKeys [][]byte) { - - var baseClusterPort = basePort + 10 - - var conf = vault.CoreConfig{ - Logger: logger.Named("migrateFromTransitToShamir"), - // N.B. Providing an UnwrapSeal puts us in migration mode. This is the - // equivalent of doing the following in HCL: - // seal "transit" { - // // ... - // disabled = "true" - // } - UnwrapSeal: transitSeal, - } - var opts = vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: numTestCores, - BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), - BaseClusterListenPort: baseClusterPort, - SkipInit: true, - } - storage.Setup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() - - leader := cluster.Cores[0] - client := leader.Client - client.SetToken(rootToken) - - // Attempt to unseal while the transit server is unreachable. Although - // we're unsealing using the recovery keys, this is still an - // autounseal, so it should fail. - tss.EnsureCoresSealed(t) - unsealMigrate(t, client, recoveryKeys, false) - tss.UnsealCores(t) - testhelpers.WaitForActiveNode(t, tss.TestCluster) - - // Unseal and migrate to Shamir. Although we're unsealing using the - // recovery keys, this is still an autounseal. - unsealMigrate(t, client, recoveryKeys, true) - testhelpers.WaitForActiveNode(t, cluster) - - // Wait for migration to finish. Sadly there is no callback, and the - // test will fail later on if we don't do this. - time.Sleep(10 * time.Second) - - // Read the secret - secret, err := client.Logical().Read("secret/foo") - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { - t.Fatal(diff) - } - - // Make sure the seal configs were updated correctly. - b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background()) - if err != nil { - t.Fatal(err) - } - verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1) - if r != nil { - t.Fatalf("expected nil recovery config, got: %#v", r) - } - - cluster.EnsureCoresSealed(t) -} diff --git a/vault/external_tests/seal_migration/seal_migration_test.go b/vault/external_tests/seal_migration/seal_migration_test.go deleted file mode 100644 index 4dd49eec3ec8..000000000000 --- a/vault/external_tests/seal_migration/seal_migration_test.go +++ /dev/null @@ -1,517 +0,0 @@ -package seal_migration - -import ( - "context" - "encoding/base64" - "fmt" - "testing" - "time" - - "github.com/go-test/deep" - - "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers" - sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal" - "github.com/hashicorp/vault/helper/testhelpers/teststorage" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/vault" -) - -const ( - numTestCores = 5 - keyShares = 3 - keyThreshold = 3 -) - -type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) - -func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { - - logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - - t.Run("inmem", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("inmem") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeInmemBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 51000) - }) - - t.Run("file", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("file") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeFileBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 52000) - }) - - t.Run("consul", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 53000) - }) - - if includeRaft { - t.Run("raft", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("raft") - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - defer cleanup() - tf(t, logger, storage, 54000) - }) - } -} - -// TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal -// migration, using the pre-1.4 method of bring down the whole cluster to do -// the migration. -func TestSealMigration_ShamirToTransit_Pre14(t *testing.T) { - // Note that we do not test integrated raft storage since this is - // a pre-1.4 test. - testVariousBackends(t, testSealMigrationShamirToTransit_Pre14, false) -} - -func testSealMigrationShamirToTransit_Pre14( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - // Initialize the backend using shamir - rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) - - // Create the transit server. - tss := sealhelper.NewTransitSealServer(t) - defer func() { - tss.EnsureCoresSealed(t) - tss.Cleanup() - }() - tss.MakeKey(t, "transit-seal-key") - - // Migrate the backend from shamir to transit. Note that the barrier keys - // are now the recovery keys. - transitSeal := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys) - - // Run the backend with transit. - runTransit(t, logger, storage, basePort, rootToken, transitSeal) -} - -func migrateFromShamirToTransit_Pre14( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int, - tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte, -) vault.Seal { - - var baseClusterPort = basePort + 10 - - var transitSeal vault.Seal - - var conf = vault.CoreConfig{ - Logger: logger.Named("migrateFromShamirToTransit"), - } - var opts = vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: numTestCores, - BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), - BaseClusterListenPort: baseClusterPort, - SkipInit: true, - // N.B. Providing a transit seal puts us in migration mode. - SealFunc: func() vault.Seal { - // Each core will create its own transit seal here. Later - // on it won't matter which one of these we end up using, since - // they were all created from the same transit key. - transitSeal = tss.MakeSeal(t, "transit-seal-key") - return transitSeal - }, - } - storage.Setup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() - - leader := cluster.Cores[0] - client := leader.Client - client.SetToken(rootToken) - - // Unseal and migrate to Transit. - unsealMigrate(t, client, recoveryKeys, true) - - // Wait for migration to finish. Sadly there is no callback, and the - // test will fail later on if we don't do this. - time.Sleep(10 * time.Second) - - // Read the secret - secret, err := client.Logical().Read("secret/foo") - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { - t.Fatal(diff) - } - - // Make sure the seal configs were updated correctly. - b, r, err := leader.Core.PhysicalSealConfigs(context.Background()) - if err != nil { - t.Fatal(err) - } - verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1) - verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0) - - cluster.EnsureCoresSealed(t) - - return transitSeal -} - -func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { - - for i, key := range keys { - - // Try to unseal with missing "migrate" parameter - _, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ - Key: base64.StdEncoding.EncodeToString(key), - }) - if err == nil { - t.Fatal("expected error due to lack of migrate parameter") - } - - // Unseal with "migrate" parameter - resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ - Key: base64.StdEncoding.EncodeToString(key), - Migrate: true, - }) - - if i < keyThreshold-1 { - // Not enough keys have been provided yet. - if err != nil { - t.Fatal(err) - } - } else { - if transitServerAvailable { - // The transit server is running. - if err != nil { - t.Fatal(err) - } - if resp == nil || resp.Sealed { - t.Fatalf("expected unsealed state; got %#v", resp) - } - } else { - // The transit server is stopped. - if err == nil { - t.Fatal("expected error due to transit server being stopped.") - } - } - break - } - } -} - -// verifyBarrierConfig verifies that a barrier configuration is correct. -func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) { - t.Helper() - if cfg.Type != sealType { - t.Fatalf("bad seal config: %#v, expected type=%q", cfg, sealType) - } - if cfg.SecretShares != shares { - t.Fatalf("bad seal config: %#v, expected SecretShares=%d", cfg, shares) - } - if cfg.SecretThreshold != threshold { - t.Fatalf("bad seal config: %#v, expected SecretThreshold=%d", cfg, threshold) - } - if cfg.StoredShares != stored { - t.Fatalf("bad seal config: %#v, expected StoredShares=%d", cfg, stored) - } -} - -// initializeShamir initializes a brand new backend storage with Shamir. -func initializeShamir( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) (string, [][]byte) { - - var baseClusterPort = basePort + 10 - - // Start the cluster - var conf = vault.CoreConfig{ - Logger: logger.Named("initializeShamir"), - } - var opts = vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: numTestCores, - BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), - BaseClusterListenPort: baseClusterPort, - } - storage.Setup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() - - leader := cluster.Cores[0] - client := leader.Client - - // Unseal - if storage.IsRaft { - testhelpers.RaftClusterJoinNodes(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { - t.Fatal(err) - } - } else { - cluster.UnsealCores(t) - } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) - - // Write a secret that we will read back out later. - _, err := client.Logical().Write( - "secret/foo", - map[string]interface{}{"zork": "quux"}) - if err != nil { - t.Fatal(err) - } - - // Seal the cluster - cluster.EnsureCoresSealed(t) - - return cluster.RootToken, cluster.BarrierKeys -} - -// runShamir uses a pre-populated backend storage with Shamir. -func runShamir( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int, - rootToken string, barrierKeys [][]byte) { - - var baseClusterPort = basePort + 10 - - // Start the cluster - var conf = vault.CoreConfig{ - Logger: logger.Named("runShamir"), - } - var opts = vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: numTestCores, - BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), - BaseClusterListenPort: baseClusterPort, - SkipInit: true, - } - storage.Setup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() - - leader := cluster.Cores[0] - client := leader.Client - client.SetToken(rootToken) - - // Unseal - cluster.BarrierKeys = barrierKeys - if storage.IsRaft { - provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort) - testhelpers.SetRaftAddressProviders(t, cluster, provider) - - for _, core := range cluster.Cores { - cluster.UnsealCore(t, core) - } - - // This is apparently necessary for the raft cluster to get itself - // situated. - time.Sleep(15 * time.Second) - - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { - t.Fatal(err) - } - } else { - cluster.UnsealCores(t) - } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) - - // Read the secret - secret, err := client.Logical().Read("secret/foo") - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { - t.Fatal(diff) - } - - // Seal the cluster - cluster.EnsureCoresSealed(t) -} - -// initializeTransit initializes a brand new backend storage with Transit. -func initializeTransit( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int, - tss *sealhelper.TransitSealServer) (string, [][]byte, vault.Seal) { - - var transitSeal vault.Seal - - var baseClusterPort = basePort + 10 - - // Start the cluster - var conf = vault.CoreConfig{ - Logger: logger.Named("initializeTransit"), - } - var opts = vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: numTestCores, - BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), - BaseClusterListenPort: baseClusterPort, - SealFunc: func() vault.Seal { - // Each core will create its own transit seal here. Later - // on it won't matter which one of these we end up using, since - // they were all created from the same transit key. - transitSeal = tss.MakeSeal(t, "transit-seal-key") - return transitSeal - }, - } - storage.Setup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() - - leader := cluster.Cores[0] - client := leader.Client - - // Join raft - if storage.IsRaft { - testhelpers.RaftClusterJoinNodesWithStoredKeys(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { - t.Fatal(err) - } - } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) - - // Write a secret that we will read back out later. - _, err := client.Logical().Write( - "secret/foo", - map[string]interface{}{"zork": "quux"}) - if err != nil { - t.Fatal(err) - } - - // Seal the cluster - cluster.EnsureCoresSealed(t) - - return cluster.RootToken, cluster.RecoveryKeys, transitSeal -} - -func runTransit( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int, - rootToken string, transitSeal vault.Seal) { - - var baseClusterPort = basePort + 10 - - // Start the cluster - var conf = vault.CoreConfig{ - Logger: logger.Named("runTransit"), - Seal: transitSeal, - } - var opts = vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: numTestCores, - BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), - BaseClusterListenPort: baseClusterPort, - SkipInit: true, - } - storage.Setup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() - - leader := cluster.Cores[0] - client := leader.Client - client.SetToken(rootToken) - - // Unseal. Even though we are using autounseal, we have to unseal - // explicitly because we are using SkipInit. - if storage.IsRaft { - provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort) - testhelpers.SetRaftAddressProviders(t, cluster, provider) - - for _, core := range cluster.Cores { - cluster.UnsealCoreWithStoredKeys(t, core) - } - - // This is apparently necessary for the raft cluster to get itself - // situated. - time.Sleep(15 * time.Second) - - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { - t.Fatal(err) - } - } else { - if err := cluster.UnsealCoresWithError(true); err != nil { - t.Fatal(err) - } - } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) - - // Read the secret - secret, err := client.Logical().Read("secret/foo") - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { - t.Fatal(diff) - } - - // Seal the cluster - cluster.EnsureCoresSealed(t) -} - -// TestShamir is a temporary test that exercises the reusable raft storage. -// It will be replace once we do the post-1.4 migration testing. -func TestShamir(t *testing.T) { - testVariousBackends(t, testShamir, true) -} - -func testShamir( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) - runShamir(t, logger, storage, basePort, rootToken, barrierKeys) -} - -// TestTransit is a temporary test that exercises the reusable raft storage. -// It will be replace once we do the post-1.4 migration testing. -func TestTransit(t *testing.T) { - testVariousBackends(t, testTransit, true) -} - -func testTransit( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - // Create the transit server. - tss := sealhelper.NewTransitSealServer(t) - defer tss.Cleanup() - tss.MakeKey(t, "transit-seal-key") - - rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) - runTransit(t, logger, storage, basePort, rootToken, transitSeal) -} diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 78506b3023ea..3c9cb53f3649 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -39,7 +39,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { storage, cleanup := teststorage.MakeReusableStorage( t, logger, teststorage.MakeInmemBackend(t, logger)) defer cleanup() - tf(t, logger, storage, 20000) + tf(t, logger, storage, 51000) }) t.Run("file", func(t *testing.T) { @@ -49,7 +49,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { storage, cleanup := teststorage.MakeReusableStorage( t, logger, teststorage.MakeFileBackend(t, logger)) defer cleanup() - tf(t, logger, storage, 20020) + tf(t, logger, storage, 52000) }) t.Run("consul", func(t *testing.T) { @@ -59,7 +59,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { storage, cleanup := teststorage.MakeReusableStorage( t, logger, teststorage.MakeConsulBackend(t, logger)) defer cleanup() - tf(t, logger, storage, 20040) + tf(t, logger, storage, 53000) }) if includeRaft { @@ -69,7 +69,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { logger := logger.Named("raft") storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) defer cleanup() - tf(t, logger, storage, 20060) + tf(t, logger, storage, 54000) }) } } @@ -266,13 +266,13 @@ func initializeShamir( // Unseal if storage.IsRaft { testhelpers.RaftClusterJoinNodes(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } } else { cluster.UnsealCores(t) } - testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) // Write a secret that we will read back out later. _, err := client.Logical().Write( @@ -333,13 +333,13 @@ func runShamir( // situated. time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } } else { cluster.UnsealCores(t) } - testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) // Read the secret secret, err := client.Logical().Read("secret/foo") @@ -395,11 +395,11 @@ func initializeTransit( // Join raft if storage.IsRaft { testhelpers.RaftClusterJoinNodesWithStoredKeys(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } } - testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) // Write a secret that we will read back out later. _, err := client.Logical().Write( @@ -460,7 +460,7 @@ func runTransit( // situated. time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } } else { @@ -468,7 +468,7 @@ func runTransit( t.Fatal(err) } } - testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) // Read the secret secret, err := client.Logical().Read("secret/foo") From f645ab6e4512e0874308a8db4d137b7e78318340 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 11 Jun 2020 10:40:16 -0400 Subject: [PATCH 35/86] use ephemeral ports below 30000 --- vault/external_tests/sealmigration/seal_migration_test.go | 8 ++++---- vault/testing.go | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 3c9cb53f3649..6ca96124e133 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -39,7 +39,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { storage, cleanup := teststorage.MakeReusableStorage( t, logger, teststorage.MakeInmemBackend(t, logger)) defer cleanup() - tf(t, logger, storage, 51000) + tf(t, logger, storage, 20000) }) t.Run("file", func(t *testing.T) { @@ -49,7 +49,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { storage, cleanup := teststorage.MakeReusableStorage( t, logger, teststorage.MakeFileBackend(t, logger)) defer cleanup() - tf(t, logger, storage, 52000) + tf(t, logger, storage, 20020) }) t.Run("consul", func(t *testing.T) { @@ -59,7 +59,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { storage, cleanup := teststorage.MakeReusableStorage( t, logger, teststorage.MakeConsulBackend(t, logger)) defer cleanup() - tf(t, logger, storage, 53000) + tf(t, logger, storage, 20040) }) if includeRaft { @@ -69,7 +69,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { logger := logger.Named("raft") storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) defer cleanup() - tf(t, logger, storage, 54000) + tf(t, logger, storage, 20060) }) } } diff --git a/vault/testing.go b/vault/testing.go index 0c73b1f29ae5..b09c5b4503bd 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -26,6 +26,8 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/armon/go-metrics" hclog "github.com/hashicorp/go-hclog" log "github.com/hashicorp/go-hclog" From 0cfb125f33f7b1d16ac952d40f27ebc25928e416 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 27 May 2020 11:54:51 -0400 Subject: [PATCH 36/86] stub out test that brings individual nodes up and down --- .../sealmigration/seal_migration_test.go | 114 +++++++++++++----- 1 file changed, 84 insertions(+), 30 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 6ca96124e133..e5889edfa486 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -42,36 +42,36 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { tf(t, logger, storage, 20000) }) - t.Run("file", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("file") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeFileBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 20020) - }) - - t.Run("consul", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 20040) - }) - - if includeRaft { - t.Run("raft", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("raft") - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - defer cleanup() - tf(t, logger, storage, 20060) - }) - } + //t.Run("file", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("file") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeFileBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 52000) + //}) + + //t.Run("consul", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("consul") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeConsulBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 53000) + //}) + + //if includeRaft { + // t.Run("raft", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("raft") + // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + // defer cleanup() + // tf(t, logger, storage, 54000) + // }) + //} } // TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal @@ -515,3 +515,57 @@ func testTransit( rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) runTransit(t, logger, storage, basePort, rootToken, transitSeal) } + +func TestFoo(t *testing.T) { + testVariousBackends(t, testFoo, true) +} + +func testFoo( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + var baseClusterPort = basePort + 10 + + // Start the cluster + var conf = vault.CoreConfig{ + Logger: logger.Named("foo"), + } + var opts = vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: numTestCores, + BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), + BaseClusterListenPort: baseClusterPort, + } + storage.Setup(&conf, &opts) + cluster := vault.NewTestCluster(t, &conf, &opts) + cluster.Start() + defer func() { + storage.Cleanup(t, cluster) + cluster.Cleanup() + }() + + leader := cluster.Cores[0] + client := leader.Client + + // Unseal + if storage.IsRaft { + testhelpers.RaftClusterJoinNodes(t, cluster) + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + t.Fatal(err) + } + } else { + cluster.UnsealCores(t) + } + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) + + // Write a secret that we will read back out later. + _, err := client.Logical().Write( + "secret/foo", + map[string]interface{}{"zork": "quux"}) + if err != nil { + t.Fatal(err) + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) +} From 28e5d78552fca9bd4135955ed784c86c836e0711 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 27 May 2020 16:27:32 -0400 Subject: [PATCH 37/86] refactor NewTestCluster --- .../sealmigration/seal_migration_test.go | 3 +- vault/testing.go | 196 ++++++++++-------- 2 files changed, 114 insertions(+), 85 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index e5889edfa486..7dbbe473f1d2 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -517,7 +517,8 @@ func testTransit( } func TestFoo(t *testing.T) { - testVariousBackends(t, testFoo, true) + println("asdfasdfadsfadsfadfa") + //testVariousBackends(t, testFoo, true) } func testFoo( diff --git a/vault/testing.go b/vault/testing.go index b09c5b4503bd..ee0b8ecb30dc 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1080,16 +1080,6 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te numCores = opts.NumCores } - var disablePR1103 bool - if opts != nil && opts.PR1103Disabled { - disablePR1103 = true - } - - var firstCoreNumber int - if opts != nil { - firstCoreNumber = opts.FirstCoreNumber - } - certIPs := []net.IP{ net.IPv6loopback, net.ParseIP("127.0.0.1"), @@ -1453,87 +1443,22 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te t.Fatalf("err: %v", err) } + // Create cores cleanupFuncs := []func(){} cores := []*Core{} coreConfigs := []*CoreConfig{} - for i := 0; i < numCores; i++ { - localConfig := *coreConfig - localConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port) - - // if opts.SealFunc is provided, use that to generate a seal for the config instead - if opts != nil && opts.SealFunc != nil { - localConfig.Seal = opts.SealFunc() - } - - if coreConfig.Logger == nil || (opts != nil && opts.Logger != nil) { - localConfig.Logger = testCluster.Logger.Named(fmt.Sprintf("core%d", i)) - } - if opts != nil && opts.PhysicalFactory != nil { - physBundle := opts.PhysicalFactory(t, i, localConfig.Logger) - switch { - case physBundle == nil && coreConfig.Physical != nil: - case physBundle == nil && coreConfig.Physical == nil: - t.Fatal("PhysicalFactory produced no physical and none in CoreConfig") - case physBundle != nil: - testCluster.Logger.Info("created physical backend", "instance", i) - coreConfig.Physical = physBundle.Backend - localConfig.Physical = physBundle.Backend - base.Physical = physBundle.Backend - haBackend := physBundle.HABackend - if haBackend == nil { - if ha, ok := physBundle.Backend.(physical.HABackend); ok { - haBackend = ha - } - } - coreConfig.HAPhysical = haBackend - localConfig.HAPhysical = haBackend - if physBundle.Cleanup != nil { - cleanupFuncs = append(cleanupFuncs, physBundle.Cleanup) - } - } - } - - if opts != nil && opts.ClusterLayers != nil { - localConfig.ClusterNetworkLayer = opts.ClusterLayers.Layers()[i] - } - switch { - case localConfig.LicensingConfig != nil: - if pubKey != nil { - localConfig.LicensingConfig.AdditionalPublicKeys = append(localConfig.LicensingConfig.AdditionalPublicKeys, pubKey.(ed25519.PublicKey)) - } - default: - localConfig.LicensingConfig = testGetLicensingConfig(pubKey) - } - - if localConfig.MetricsHelper == nil { - inm := metrics.NewInmemSink(10*time.Second, time.Minute) - metrics.DefaultInmemSignal(inm) - localConfig.MetricsHelper = metricsutil.NewMetricsHelper(inm, false) - } + for i := 0; i < numCores; i++ { + port := listeners[i][0].Address.Port + cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, port, pubKey) - c, err := NewCore(&localConfig) - if err != nil { - t.Fatalf("err: %v", err) - } - c.coreNumber = firstCoreNumber + i - c.PR1103disabled = disablePR1103 + cleanupFuncs = append(cleanupFuncs, cleanup) cores = append(cores, c) coreConfigs = append(coreConfigs, &localConfig) - if opts != nil && opts.HandlerFunc != nil { - props := opts.DefaultHandlerProperties - props.Core = c - if props.ListenerConfig != nil && props.ListenerConfig.MaxRequestDuration == 0 { - props.ListenerConfig.MaxRequestDuration = DefaultMaxRequestDuration - } - handlers[i] = opts.HandlerFunc(&props) - servers[i].Handler = handlers[i] - } - // Set this in case the Seal was manually set before the core was - // created - if localConfig.Seal != nil { - localConfig.Seal.SetCore(c) + if handler != nil { + handlers[i] = handler + servers[i].Handler = handlers[i] } } @@ -1777,7 +1702,9 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te testCluster.CleanupFunc = func() { for _, c := range cleanupFuncs { - c() + if c != nil { + c() + } } if l, ok := testCluster.Logger.(*TestLogger); ok { if t.Failed() { @@ -1798,6 +1725,107 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te return &testCluster } +func (cluster *TestCluster) newCore( + t testing.T, + idx int, + coreConfig *CoreConfig, + opts *TestClusterOptions, + port int, + pubKey interface{}, +) (func(), *Core, CoreConfig, http.Handler) { + + localConfig := *coreConfig + cleanupFunc := func() {} + var handler http.Handler + + var disablePR1103 bool + if opts != nil && opts.PR1103Disabled { + disablePR1103 = true + } + + var firstCoreNumber int + if opts != nil { + firstCoreNumber = opts.FirstCoreNumber + } + + localConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", port) + + // if opts.SealFunc is provided, use that to generate a seal for the config instead + if opts != nil && opts.SealFunc != nil { + localConfig.Seal = opts.SealFunc() + } + + if coreConfig.Logger == nil || (opts != nil && opts.Logger != nil) { + localConfig.Logger = cluster.Logger.Named(fmt.Sprintf("core%d", idx)) + } + if opts != nil && opts.PhysicalFactory != nil { + physBundle := opts.PhysicalFactory(t, idx, localConfig.Logger) + switch { + case physBundle == nil && coreConfig.Physical != nil: + case physBundle == nil && coreConfig.Physical == nil: + t.Fatal("PhysicalFactory produced no physical and none in CoreConfig") + case physBundle != nil: + cluster.Logger.Info("created physical backend", "instance", idx) + coreConfig.Physical = physBundle.Backend + localConfig.Physical = physBundle.Backend + //base.Physical = physBundle.Backend + haBackend := physBundle.HABackend + if haBackend == nil { + if ha, ok := physBundle.Backend.(physical.HABackend); ok { + haBackend = ha + } + } + coreConfig.HAPhysical = haBackend + localConfig.HAPhysical = haBackend + if physBundle.Cleanup != nil { + cleanupFunc = physBundle.Cleanup + } + } + } + + if opts != nil && opts.ClusterLayers != nil { + localConfig.ClusterNetworkLayer = opts.ClusterLayers.Layers()[idx] + } + + switch { + case localConfig.LicensingConfig != nil: + if pubKey != nil { + localConfig.LicensingConfig.AdditionalPublicKeys = append(localConfig.LicensingConfig.AdditionalPublicKeys, pubKey.(ed25519.PublicKey)) + } + default: + localConfig.LicensingConfig = testGetLicensingConfig(pubKey) + } + + if localConfig.MetricsHelper == nil { + inm := metrics.NewInmemSink(10*time.Second, time.Minute) + metrics.DefaultInmemSignal(inm) + localConfig.MetricsHelper = metricsutil.NewMetricsHelper(inm, false) + } + + c, err := NewCore(&localConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + c.coreNumber = firstCoreNumber + idx + c.PR1103disabled = disablePR1103 + if opts != nil && opts.HandlerFunc != nil { + props := opts.DefaultHandlerProperties + props.Core = c + if props.ListenerConfig != nil && props.ListenerConfig.MaxRequestDuration == 0 { + props.ListenerConfig.MaxRequestDuration = DefaultMaxRequestDuration + } + handler = opts.HandlerFunc(&props) + } + + // Set this in case the Seal was manually set before the core was + // created + if localConfig.Seal != nil { + localConfig.Seal.SetCore(c) + } + + return cleanupFunc, c, localConfig, handler +} + func NewMockBuiltinRegistry() *mockBuiltinRegistry { return &mockBuiltinRegistry{ forTesting: map[string]consts.PluginType{ From 3d00a7088f4743dc53c7294d79969ef7dac0be8c Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 27 May 2020 17:04:40 -0400 Subject: [PATCH 38/86] pass listeners into newCore() --- vault/testing.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index ee0b8ecb30dc..f55d4b8cff2c 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1449,8 +1449,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te coreConfigs := []*CoreConfig{} for i := 0; i < numCores; i++ { - port := listeners[i][0].Address.Port - cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, port, pubKey) + cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], pubKey) cleanupFuncs = append(cleanupFuncs, cleanup) cores = append(cores, c) @@ -1730,7 +1729,7 @@ func (cluster *TestCluster) newCore( idx int, coreConfig *CoreConfig, opts *TestClusterOptions, - port int, + listeners []*TestListener, pubKey interface{}, ) (func(), *Core, CoreConfig, http.Handler) { @@ -1748,7 +1747,7 @@ func (cluster *TestCluster) newCore( firstCoreNumber = opts.FirstCoreNumber } - localConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", port) + localConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[0].Address.Port) // if opts.SealFunc is provided, use that to generate a seal for the config instead if opts != nil && opts.SealFunc != nil { From d40b963bd2e0b73dfd88071f14dde77ca125e55c Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 27 May 2020 17:23:20 -0400 Subject: [PATCH 39/86] simplify cluster address setup --- vault/testing.go | 75 +++++++++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index f55d4b8cff2c..ddc6b41cbc10 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1093,14 +1093,6 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te certIPs = append(certIPs, baseAddr.IP) } - baseClusterListenPort := 0 - if opts != nil && opts.BaseClusterListenPort != 0 { - if opts.BaseListenAddress == "" { - t.Fatal("BaseListenAddress is not specified") - } - baseClusterListenPort = opts.BaseClusterListenPort - } - var testCluster TestCluster switch { @@ -1461,30 +1453,9 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te } } - // // Clustering setup - // - clusterAddrGen := func(lns []*TestListener, port int) []*net.TCPAddr { - ret := make([]*net.TCPAddr, len(lns)) - for i, ln := range lns { - ret[i] = &net.TCPAddr{ - IP: ln.Address.IP, - Port: port, - } - } - return ret - } - for i := 0; i < numCores; i++ { - if coreConfigs[i].ClusterAddr != "" { - port := 0 - if baseClusterListenPort != 0 { - port = baseClusterListenPort + i - } - cores[i].Logger().Info("assigning cluster listener for test core", "core", i, "port", port) - cores[i].SetClusterListenerAddrs(clusterAddrGen(listeners[i], port)) - cores[i].SetClusterHandler(handlers[i]) - } + setupClusterAddress(t, i, cores[i], coreConfigs[i], opts, listeners[i], handlers[i]) } if opts == nil || !opts.SkipInit { @@ -1725,12 +1696,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te } func (cluster *TestCluster) newCore( - t testing.T, - idx int, - coreConfig *CoreConfig, - opts *TestClusterOptions, - listeners []*TestListener, - pubKey interface{}, + t testing.T, idx int, coreConfig *CoreConfig, + opts *TestClusterOptions, listeners []*TestListener, pubKey interface{}, ) (func(), *Core, CoreConfig, http.Handler) { localConfig := *coreConfig @@ -1825,6 +1792,42 @@ func (cluster *TestCluster) newCore( return cleanupFunc, c, localConfig, handler } +func setupClusterAddress( + t testing.T, idx int, core *Core, coreConfig *CoreConfig, + opts *TestClusterOptions, listeners []*TestListener, handler http.Handler) { + + if coreConfig.ClusterAddr == "" { + return + } + + clusterAddrGen := func(lns []*TestListener, port int) []*net.TCPAddr { + ret := make([]*net.TCPAddr, len(lns)) + for i, ln := range lns { + ret[i] = &net.TCPAddr{ + IP: ln.Address.IP, + Port: port, + } + } + return ret + } + + baseClusterListenPort := 0 + if opts != nil && opts.BaseClusterListenPort != 0 { + if opts.BaseListenAddress == "" { + t.Fatal("BaseListenAddress is not specified") + } + baseClusterListenPort = opts.BaseClusterListenPort + } + + port := 0 + if baseClusterListenPort != 0 { + port = baseClusterListenPort + idx + } + core.Logger().Info("assigning cluster listener for test core", "core", idx, "port", port) + core.SetClusterListenerAddrs(clusterAddrGen(listeners, port)) + core.SetClusterHandler(handler) +} + func NewMockBuiltinRegistry() *mockBuiltinRegistry { return &mockBuiltinRegistry{ forTesting: map[string]consts.PluginType{ From 59a3b2f2d64eff2bc03c603e988097c7469834f0 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 28 May 2020 08:09:23 -0400 Subject: [PATCH 40/86] simplify extra test core setup --- vault/testing.go | 4 +++- vault/testing_util.go | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index ddc6b41cbc10..b36f2d3f0073 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1668,7 +1668,9 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te testCluster.Cores = ret - testExtraClusterCoresTestSetup(t, priKey, testCluster.Cores) + for _, tcc := range testCluster.Cores { + testExtraTestCoreSetup(t, priKey, tcc) + } testCluster.CleanupFunc = func() { for _, c := range cleanupFuncs { diff --git a/vault/testing_util.go b/vault/testing_util.go index 26c7cde057cc..0d1887298f74 100644 --- a/vault/testing_util.go +++ b/vault/testing_util.go @@ -6,9 +6,9 @@ import ( testing "github.com/mitchellh/go-testing-interface" ) -func testGenerateCoreKeys() (interface{}, interface{}, error) { return nil, nil, nil } -func testGetLicensingConfig(interface{}) *LicensingConfig { return &LicensingConfig{} } -func testExtraClusterCoresTestSetup(testing.T, interface{}, []*TestClusterCore) {} +func testGenerateCoreKeys() (interface{}, interface{}, error) { return nil, nil, nil } +func testGetLicensingConfig(interface{}) *LicensingConfig { return &LicensingConfig{} } +func testExtraTestCoreSetup(testing.T, interface{}, *TestClusterCore) {} func testAdjustTestCore(_ *CoreConfig, tcc *TestClusterCore) { tcc.UnderlyingStorage = tcc.physical } From 2fa7116acdcf78b512d51e0a7b7cff7fd0881709 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 28 May 2020 08:49:37 -0400 Subject: [PATCH 41/86] refactor TestCluster for readability --- vault/testing.go | 314 +++++++++++++++++++++++++---------------------- 1 file changed, 164 insertions(+), 150 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index b36f2d3f0073..f901fd0b383a 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1455,155 +1455,12 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te // Clustering setup for i := 0; i < numCores; i++ { - setupClusterAddress(t, i, cores[i], coreConfigs[i], opts, listeners[i], handlers[i]) + testCluster.setupClusterListener(t, i, cores[i], coreConfigs[i], opts, listeners[i], handlers[i]) } + // Initialize cores if opts == nil || !opts.SkipInit { - bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, cores[0], handlers[0]) - barrierKeys, _ := copystructure.Copy(bKeys) - testCluster.BarrierKeys = barrierKeys.([][]byte) - recoveryKeys, _ := copystructure.Copy(rKeys) - testCluster.RecoveryKeys = recoveryKeys.([][]byte) - testCluster.RootToken = root - - // Write root token and barrier keys - err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(root), 0755) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - for i, key := range testCluster.BarrierKeys { - buf.Write([]byte(base64.StdEncoding.EncodeToString(key))) - if i < len(testCluster.BarrierKeys)-1 { - buf.WriteRune('\n') - } - } - err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "barrier_keys"), buf.Bytes(), 0755) - if err != nil { - t.Fatal(err) - } - for i, key := range testCluster.RecoveryKeys { - buf.Write([]byte(base64.StdEncoding.EncodeToString(key))) - if i < len(testCluster.RecoveryKeys)-1 { - buf.WriteRune('\n') - } - } - err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "recovery_keys"), buf.Bytes(), 0755) - if err != nil { - t.Fatal(err) - } - - // Unseal first core - for _, key := range bKeys { - if _, err := cores[0].Unseal(TestKeyCopy(key)); err != nil { - t.Fatalf("unseal err: %s", err) - } - } - - ctx := context.Background() - - // If stored keys is supported, the above will no no-op, so trigger auto-unseal - // using stored keys to try to unseal - if err := cores[0].UnsealWithStoredKeys(ctx); err != nil { - t.Fatal(err) - } - - // Verify unsealed - if cores[0].Sealed() { - t.Fatal("should not be sealed") - } - - TestWaitActive(t, cores[0]) - - // Existing tests rely on this; we can make a toggle to disable it - // later if we want - kvReq := &logical.Request{ - Operation: logical.UpdateOperation, - ClientToken: testCluster.RootToken, - Path: "sys/mounts/secret", - Data: map[string]interface{}{ - "type": "kv", - "path": "secret/", - "description": "key/value secret storage", - "options": map[string]string{ - "version": "1", - }, - }, - } - resp, err := cores[0].HandleRequest(namespace.RootContext(ctx), kvReq) - if err != nil { - t.Fatal(err) - } - if resp.IsError() { - t.Fatal(err) - } - - cfg, err := cores[0].seal.BarrierConfig(ctx) - if err != nil { - t.Fatal(err) - } - - // Unseal other cores unless otherwise specified - if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 { - for i := 1; i < numCores; i++ { - cores[i].seal.SetCachedBarrierConfig(cfg) - for _, key := range bKeys { - if _, err := cores[i].Unseal(TestKeyCopy(key)); err != nil { - t.Fatalf("unseal err: %s", err) - } - } - - // If stored keys is supported, the above will no no-op, so trigger auto-unseal - // using stored keys - if err := cores[i].UnsealWithStoredKeys(ctx); err != nil { - t.Fatal(err) - } - } - - // Let them come fully up to standby - time.Sleep(2 * time.Second) - - // Ensure cluster connection info is populated. - // Other cores should not come up as leaders. - for i := 1; i < numCores; i++ { - isLeader, _, _, err := cores[i].Leader() - if err != nil { - t.Fatal(err) - } - if isLeader { - t.Fatalf("core[%d] should not be leader", i) - } - } - } - - // - // Set test cluster core(s) and test cluster - // - cluster, err := cores[0].Cluster(context.Background()) - if err != nil { - t.Fatal(err) - } - testCluster.ID = cluster.ID - - if addAuditBackend { - // Enable auditing. - auditReq := &logical.Request{ - Operation: logical.UpdateOperation, - ClientToken: testCluster.RootToken, - Path: "sys/audit/noop", - Data: map[string]interface{}{ - "type": "noop", - }, - } - resp, err = cores[0].HandleRequest(namespace.RootContext(ctx), auditReq) - if err != nil { - t.Fatal(err) - } - - if resp.IsError() { - t.Fatal(err) - } - } + testCluster.initCores(t, opts, cores, handlers, addAuditBackend) } getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client { @@ -1697,7 +1554,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te return &testCluster } -func (cluster *TestCluster) newCore( +func (testCluster *TestCluster) newCore( t testing.T, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey interface{}, ) (func(), *Core, CoreConfig, http.Handler) { @@ -1724,7 +1581,7 @@ func (cluster *TestCluster) newCore( } if coreConfig.Logger == nil || (opts != nil && opts.Logger != nil) { - localConfig.Logger = cluster.Logger.Named(fmt.Sprintf("core%d", idx)) + localConfig.Logger = testCluster.Logger.Named(fmt.Sprintf("core%d", idx)) } if opts != nil && opts.PhysicalFactory != nil { physBundle := opts.PhysicalFactory(t, idx, localConfig.Logger) @@ -1733,7 +1590,7 @@ func (cluster *TestCluster) newCore( case physBundle == nil && coreConfig.Physical == nil: t.Fatal("PhysicalFactory produced no physical and none in CoreConfig") case physBundle != nil: - cluster.Logger.Info("created physical backend", "instance", idx) + testCluster.Logger.Info("created physical backend", "instance", idx) coreConfig.Physical = physBundle.Backend localConfig.Physical = physBundle.Backend //base.Physical = physBundle.Backend @@ -1794,7 +1651,7 @@ func (cluster *TestCluster) newCore( return cleanupFunc, c, localConfig, handler } -func setupClusterAddress( +func (testCluster *TestCluster) setupClusterListener( t testing.T, idx int, core *Core, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, handler http.Handler) { @@ -1830,6 +1687,163 @@ func setupClusterAddress( core.SetClusterHandler(handler) } +func (testCluster *TestCluster) initCores( + t testing.T, + opts *TestClusterOptions, + cores []*Core, + handlers []http.Handler, + addAuditBackend bool, +) { + + bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, cores[0], handlers[0]) + barrierKeys, _ := copystructure.Copy(bKeys) + testCluster.BarrierKeys = barrierKeys.([][]byte) + recoveryKeys, _ := copystructure.Copy(rKeys) + testCluster.RecoveryKeys = recoveryKeys.([][]byte) + testCluster.RootToken = root + + // Write root token and barrier keys + err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(root), 0755) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + for i, key := range testCluster.BarrierKeys { + buf.Write([]byte(base64.StdEncoding.EncodeToString(key))) + if i < len(testCluster.BarrierKeys)-1 { + buf.WriteRune('\n') + } + } + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "barrier_keys"), buf.Bytes(), 0755) + if err != nil { + t.Fatal(err) + } + for i, key := range testCluster.RecoveryKeys { + buf.Write([]byte(base64.StdEncoding.EncodeToString(key))) + if i < len(testCluster.RecoveryKeys)-1 { + buf.WriteRune('\n') + } + } + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "recovery_keys"), buf.Bytes(), 0755) + if err != nil { + t.Fatal(err) + } + + // Unseal first core + for _, key := range bKeys { + if _, err := cores[0].Unseal(TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + + ctx := context.Background() + + // If stored keys is supported, the above will no no-op, so trigger auto-unseal + // using stored keys to try to unseal + if err := cores[0].UnsealWithStoredKeys(ctx); err != nil { + t.Fatal(err) + } + + // Verify unsealed + if cores[0].Sealed() { + t.Fatal("should not be sealed") + } + + TestWaitActive(t, cores[0]) + + // Existing tests rely on this; we can make a toggle to disable it + // later if we want + kvReq := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: testCluster.RootToken, + Path: "sys/mounts/secret", + Data: map[string]interface{}{ + "type": "kv", + "path": "secret/", + "description": "key/value secret storage", + "options": map[string]string{ + "version": "1", + }, + }, + } + resp, err := cores[0].HandleRequest(namespace.RootContext(ctx), kvReq) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatal(err) + } + + cfg, err := cores[0].seal.BarrierConfig(ctx) + if err != nil { + t.Fatal(err) + } + + // Unseal other cores unless otherwise specified + numCores := len(cores) + if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 { + for i := 1; i < numCores; i++ { + cores[i].seal.SetCachedBarrierConfig(cfg) + for _, key := range bKeys { + if _, err := cores[i].Unseal(TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + + // If stored keys is supported, the above will no no-op, so trigger auto-unseal + // using stored keys + if err := cores[i].UnsealWithStoredKeys(ctx); err != nil { + t.Fatal(err) + } + } + + // Let them come fully up to standby + time.Sleep(2 * time.Second) + + // Ensure cluster connection info is populated. + // Other cores should not come up as leaders. + for i := 1; i < numCores; i++ { + isLeader, _, _, err := cores[i].Leader() + if err != nil { + t.Fatal(err) + } + if isLeader { + t.Fatalf("core[%d] should not be leader", i) + } + } + } + + // + // Set test cluster core(s) and test cluster + // + cluster, err := cores[0].Cluster(context.Background()) + if err != nil { + t.Fatal(err) + } + testCluster.ID = cluster.ID + + if addAuditBackend { + // Enable auditing. + auditReq := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: testCluster.RootToken, + Path: "sys/audit/noop", + Data: map[string]interface{}{ + "type": "noop", + }, + } + resp, err = cores[0].HandleRequest(namespace.RootContext(ctx), auditReq) + if err != nil { + t.Fatal(err) + } + + if resp.IsError() { + t.Fatal(err) + } + } + +} + func NewMockBuiltinRegistry() *mockBuiltinRegistry { return &mockBuiltinRegistry{ forTesting: map[string]consts.PluginType{ From 9676aba7c6c39e3b156b92bd1b09071ea894dbdc Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 28 May 2020 08:59:56 -0400 Subject: [PATCH 42/86] refactor TestCluster for readability --- vault/testing.go | 74 ++++++++++++++++++++++++++++-------------------- 1 file changed, 43 insertions(+), 31 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index f901fd0b383a..fbbd264cebfd 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1463,38 +1463,14 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te testCluster.initCores(t, opts, cores, handlers, addAuditBackend) } - getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client { - transport := cleanhttp.DefaultPooledTransport() - transport.TLSClientConfig = tlsConfig.Clone() - if err := http2.ConfigureTransport(transport); err != nil { - t.Fatal(err) - } - client := &http.Client{ - Transport: transport, - CheckRedirect: func(*http.Request, []*http.Request) error { - // This can of course be overridden per-test by using its own client - return fmt.Errorf("redirects not allowed in these tests") - }, - } - config := api.DefaultConfig() - if config.Error != nil { - t.Fatal(config.Error) - } - config.Address = fmt.Sprintf("https://127.0.0.1:%d", port) - config.HttpClient = client - config.MaxRetries = 0 - apiClient, err := api.NewClient(config) - if err != nil { - t.Fatal(err) - } - if opts == nil || !opts.SkipInit { - apiClient.SetToken(testCluster.RootToken) - } - return apiClient - } - + // Create TestClusterCores var ret []*TestClusterCore for i := 0; i < numCores; i++ { + + client := testCluster.getAPIClient( + t, opts == nil || !opts.SkipInit, + listeners[i][0].Address.Port, tlsConfigs[i]) + tcc := &TestClusterCore{ Core: cores[i], CoreConfig: coreConfigs[i], @@ -1507,7 +1483,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te Handler: handlers[i], Server: servers[i], TLSConfig: tlsConfigs[i], - Client: getAPIClient(listeners[i][0].Address.Port, tlsConfigs[i]), + Client: client, Barrier: cores[i].barrier, NodeID: fmt.Sprintf("core-%d", i), UnderlyingRawStorage: coreConfigs[i].Physical, @@ -1529,6 +1505,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te testExtraTestCoreSetup(t, priKey, tcc) } + // Cleanup testCluster.CleanupFunc = func() { for _, c := range cleanupFuncs { if c != nil { @@ -1543,6 +1520,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te } } } + + // Setup if opts != nil { if opts.SetupFunc != nil { testCluster.SetupFunc = func() { @@ -1554,6 +1533,39 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te return &testCluster } +func (testCluster *TestCluster) getAPIClient( + t testing.T, init bool, + port int, tlsConfig *tls.Config) *api.Client { + + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + client := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + // This can of course be overridden per-test by using its own client + return fmt.Errorf("redirects not allowed in these tests") + }, + } + config := api.DefaultConfig() + if config.Error != nil { + t.Fatal(config.Error) + } + config.Address = fmt.Sprintf("https://127.0.0.1:%d", port) + config.HttpClient = client + config.MaxRetries = 0 + apiClient, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + if init { + apiClient.SetToken(testCluster.RootToken) + } + return apiClient +} + func (testCluster *TestCluster) newCore( t testing.T, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey interface{}, From bf755da12e94a9340a7df19f29d91f5cb5588a01 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 28 May 2020 09:11:34 -0400 Subject: [PATCH 43/86] refactor TestCluster for readability --- vault/testing.go | 70 +++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index fbbd264cebfd..4a74b9f72678 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1467,9 +1467,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te var ret []*TestClusterCore for i := 0; i < numCores; i++ { - client := testCluster.getAPIClient( - t, opts == nil || !opts.SkipInit, - listeners[i][0].Address.Port, tlsConfigs[i]) + client := testCluster.getAPIClient(t, opts, listeners[i][0].Address.Port, tlsConfigs[i]) tcc := &TestClusterCore{ Core: cores[i], @@ -1533,39 +1531,6 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te return &testCluster } -func (testCluster *TestCluster) getAPIClient( - t testing.T, init bool, - port int, tlsConfig *tls.Config) *api.Client { - - transport := cleanhttp.DefaultPooledTransport() - transport.TLSClientConfig = tlsConfig.Clone() - if err := http2.ConfigureTransport(transport); err != nil { - t.Fatal(err) - } - client := &http.Client{ - Transport: transport, - CheckRedirect: func(*http.Request, []*http.Request) error { - // This can of course be overridden per-test by using its own client - return fmt.Errorf("redirects not allowed in these tests") - }, - } - config := api.DefaultConfig() - if config.Error != nil { - t.Fatal(config.Error) - } - config.Address = fmt.Sprintf("https://127.0.0.1:%d", port) - config.HttpClient = client - config.MaxRetries = 0 - apiClient, err := api.NewClient(config) - if err != nil { - t.Fatal(err) - } - if init { - apiClient.SetToken(testCluster.RootToken) - } - return apiClient -} - func (testCluster *TestCluster) newCore( t testing.T, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey interface{}, @@ -1856,6 +1821,39 @@ func (testCluster *TestCluster) initCores( } +func (testCluster *TestCluster) getAPIClient( + t testing.T, opts *TestClusterOptions, + port int, tlsConfig *tls.Config) *api.Client { + + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + client := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + // This can of course be overridden per-test by using its own client + return fmt.Errorf("redirects not allowed in these tests") + }, + } + config := api.DefaultConfig() + if config.Error != nil { + t.Fatal(config.Error) + } + config.Address = fmt.Sprintf("https://127.0.0.1:%d", port) + config.HttpClient = client + config.MaxRetries = 0 + apiClient, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + if opts == nil || !opts.SkipInit { + apiClient.SetToken(testCluster.RootToken) + } + return apiClient +} + func NewMockBuiltinRegistry() *mockBuiltinRegistry { return &mockBuiltinRegistry{ forTesting: map[string]consts.PluginType{ From 6b313a7b0f0ad8873ccbf242ef2f285e609a027f Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 28 May 2020 10:05:51 -0400 Subject: [PATCH 44/86] add shutdown func to TestCore --- vault/testing.go | 48 ++++++++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index 4a74b9f72678..ce356f416ebe 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -838,6 +838,32 @@ func (c *TestClusterCore) Seal(t testing.T) { } } +func (c *TestClusterCore) Shutdown() error { + if c.Listeners != nil { + for _, ln := range c.Listeners { + ln.Close() + } + } + if c.licensingStopCh != nil { + close(c.licensingStopCh) + c.licensingStopCh = nil + } + + if err := c.Shutdown(); err != nil { + return err + } + timeout := time.Now().Add(60 * time.Second) + for { + if time.Now().After(timeout) { + return errors.New("timeout waiting for core to seal") + } + if c.Sealed() { + return nil + } + time.Sleep(250 * time.Millisecond) + } +} + func CleanupClusters(clusters []*TestCluster) { wg := &sync.WaitGroup{} for _, cluster := range clusters { @@ -857,7 +883,6 @@ func (c *TestCluster) Cleanup() { core.CoreConfig.Logger.SetLevel(log.Error) } - // Close listeners wg := &sync.WaitGroup{} for _, core := range c.Cores { wg.Add(1) @@ -865,29 +890,8 @@ func (c *TestCluster) Cleanup() { go func() { defer wg.Done() - if lc.Listeners != nil { - for _, ln := range lc.Listeners { - ln.Close() - } - } - if lc.licensingStopCh != nil { - close(lc.licensingStopCh) - lc.licensingStopCh = nil - } - if err := lc.Shutdown(); err != nil { lc.Logger().Error("error during shutdown; abandoning sealing", "error", err) - } else { - timeout := time.Now().Add(60 * time.Second) - for { - if time.Now().After(timeout) { - lc.Logger().Error("timeout waiting for core to seal") - } - if lc.Sealed() { - break - } - time.Sleep(250 * time.Millisecond) - } } }() } From 31eb3a7422855058103ca9a1c0bd3311cb5b1d34 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 28 May 2020 10:35:26 -0400 Subject: [PATCH 45/86] add cleanup func to TestCore --- vault/testing.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index ce356f416ebe..dba2e40bad66 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -838,7 +838,14 @@ func (c *TestClusterCore) Seal(t testing.T) { } } -func (c *TestClusterCore) Shutdown() error { +func (c *TestClusterCore) Cleanup(t testing.T) { + t.Helper() + if err := c.cleanup(); err != nil { + t.Fatal(err) + } +} + +func (c *TestClusterCore) cleanup() error { if c.Listeners != nil { for _, ln := range c.Listeners { ln.Close() @@ -890,8 +897,8 @@ func (c *TestCluster) Cleanup() { go func() { defer wg.Done() - if err := lc.Shutdown(); err != nil { - lc.Logger().Error("error during shutdown; abandoning sealing", "error", err) + if err := lc.cleanup(); err != nil { + lc.Logger().Error("error during cleanup", "error", err) } }() } From c211fe8c4a78f42abc6aac14ebf96fb7f3691364 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 29 May 2020 09:26:31 -0400 Subject: [PATCH 46/86] create RestartCore --- .../sealmigration/seal_migration_test.go | 21 +- vault/testing.go | 213 ++++++++++++++++-- 2 files changed, 217 insertions(+), 17 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 7dbbe473f1d2..738e3ced1a21 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -517,8 +517,7 @@ func testTransit( } func TestFoo(t *testing.T) { - println("asdfasdfadsfadsfadfa") - //testVariousBackends(t, testFoo, true) + testVariousBackends(t, testFoo, true) } func testFoo( @@ -567,6 +566,24 @@ func testFoo( t.Fatal(err) } + println("--------------------------------------------------") + println("StopCore") + cluster.StopCore(t, 1) + + println("--------------------------------------------------") + println("RestartCore") + cluster.RestartCore(t, 1, &opts) + + println("--------------------------------------------------") + println("Sleep") + time.Sleep(10 * time.Second) + + println("--------------------------------------------------") + println("Seal") + // Seal the cluster cluster.EnsureCoresSealed(t) + + println("--------------------------------------------------") + println("exit") } diff --git a/vault/testing.go b/vault/testing.go index dba2e40bad66..e7632a16be82 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -726,6 +726,10 @@ type TestCluster struct { Logger log.Logger CleanupFunc func() SetupFunc func() + + cleanupFuncs []func() + base *CoreConfig + priKey interface{} } func (c *TestCluster) Start() { @@ -736,6 +740,7 @@ func (c *TestCluster) Start() { go core.Server.Serve(ln) } } + core.isRunning = true } if c.SetupFunc != nil { c.SetupFunc() @@ -838,18 +843,15 @@ func (c *TestClusterCore) Seal(t testing.T) { } } -func (c *TestClusterCore) Cleanup(t testing.T) { - t.Helper() - if err := c.cleanup(); err != nil { - t.Fatal(err) - } -} +func (c *TestClusterCore) stop() error { + + c.Logger().Info("stopping vault test core") -func (c *TestClusterCore) cleanup() error { if c.Listeners != nil { for _, ln := range c.Listeners { ln.Close() } + c.Logger().Info("listeners successfully shut down") } if c.licensingStopCh != nil { close(c.licensingStopCh) @@ -865,10 +867,14 @@ func (c *TestClusterCore) cleanup() error { return errors.New("timeout waiting for core to seal") } if c.Sealed() { - return nil + break } time.Sleep(250 * time.Millisecond) } + + c.Logger().Info("vault test core stopped") + c.isRunning = false + return nil } func CleanupClusters(clusters []*TestCluster) { @@ -897,7 +903,7 @@ func (c *TestCluster) Cleanup() { go func() { defer wg.Done() - if err := lc.cleanup(); err != nil { + if err := lc.stop(); err != nil { lc.Logger().Error("error during cleanup", "error", err) } }() @@ -964,6 +970,8 @@ type TestClusterCore struct { UnderlyingRawStorage physical.Backend Barrier SecurityBarrier NodeID string + + isRunning bool } type PhysicalBackendBundle struct { @@ -1105,6 +1113,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te } var testCluster TestCluster + testCluster.base = base switch { case opts != nil && opts.Logger != nil: @@ -1445,16 +1454,17 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te if err != nil { t.Fatalf("err: %v", err) } + testCluster.priKey = priKey // Create cores - cleanupFuncs := []func(){} + testCluster.cleanupFuncs = []func(){} cores := []*Core{} coreConfigs := []*CoreConfig{} for i := 0; i < numCores; i++ { cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], pubKey) - cleanupFuncs = append(cleanupFuncs, cleanup) + testCluster.cleanupFuncs = append(testCluster.cleanupFuncs, cleanup) cores = append(cores, c) coreConfigs = append(coreConfigs, &localConfig) @@ -1516,10 +1526,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te // Cleanup testCluster.CleanupFunc = func() { - for _, c := range cleanupFuncs { - if c != nil { - c() - } + for _, c := range testCluster.cleanupFuncs { + c() } if l, ok := testCluster.Logger.(*TestLogger); ok { if t.Failed() { @@ -1542,6 +1550,181 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te return &testCluster } +// StopCore performs an orderly shutdown of a core. +func (cluster *TestCluster) StopCore(t testing.T, idx int) { + t.Helper() + + if idx < 0 || idx > len(cluster.Cores) { + t.Fatalf("invalid core index %d", idx) + } + tcc := cluster.Cores[idx] + tcc.Logger().Info("stopping core", "core", idx) + + if !tcc.isRunning { + t.Fatalf("core is already stopped") + } + + // Stop listeners and call Shutdown() + if err := tcc.stop(); err != nil { + t.Fatal(err) + } + + // Run cleanup + cluster.cleanupFuncs[idx]() +} + +// Restart a TestClusterCore that was stopped, by replacing the +// underlying Core. +func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterOptions) { + t.Helper() + + if idx < 0 || idx > len(cluster.Cores) { + t.Fatalf("invalid core index %d", idx) + } + tcc := cluster.Cores[idx] + tcc.Logger().Info("restarting core", "core", idx) + + if tcc.isRunning { + t.Fatalf("cannot restart a running core") + } + + //------------------------------------ + + // Create a new Core + newCore, err := NewCore(tcc.CoreConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + newCore.coreNumber = tcc.Core.coreNumber + newCore.PR1103disabled = tcc.Core.PR1103disabled + + cluster.setupClusterListener( + t, idx, newCore, tcc.CoreConfig, + opts, tcc.Listeners, tcc.Handler) + + testAdjustTestCore(cluster.base, tcc) + testExtraTestCoreSetup(t, cluster.priKey, tcc) + + tcc.Core = newCore + + //------------------------------------ + + // start listeners + for _, ln := range tcc.Listeners { + tcc.Logger().Info("starting listener for core", "port", ln.Address.Port) + go tcc.Server.Serve(ln) + } + + tcc.isRunning = true +} + +//type TestClusterCore struct { +// *Core +// CoreConfig *CoreConfig +// Client *api.Client +// Handler http.Handler +// Listeners []*TestListener +// ReloadFuncs *map[string][]reloadutil.ReloadFunc +// ReloadFuncsLock *sync.RWMutex +// Server *http.Server +// ServerCert *x509.Certificate +// ServerCertBytes []byte +// ServerCertPEM []byte +// ServerKey *ecdsa.PrivateKey +// ServerKeyPEM []byte +// TLSConfig *tls.Config +// UnderlyingStorage physical.Backend +// UnderlyingRawStorage physical.Backend +// Barrier SecurityBarrier +// NodeID string +// +// isRunning bool +//} + +// // Create cores +// cleanupFuncs := []func(){} +// cores := []*Core{} +// coreConfigs := []*CoreConfig{} +// +// for i := 0; i < numCores; i++ { +// cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], pubKey) +// +// cleanupFuncs = append(cleanupFuncs, cleanup) +// cores = append(cores, c) +// coreConfigs = append(coreConfigs, &localConfig) +// +// if handler != nil { +// handlers[i] = handler +// servers[i].Handler = handlers[i] +// } +// } +// +// // Clustering setup +// for i := 0; i < numCores; i++ { +// testCluster.setupClusterListener(t, i, cores[i], coreConfigs[i], opts, listeners[i], handlers[i]) +// } +// +// // Initialize cores +// if opts == nil || !opts.SkipInit { +// testCluster.initCores(t, opts, cores, handlers, addAuditBackend) +// } +// +// // Create TestClusterCores +// var ret []*TestClusterCore +// for i := 0; i < numCores; i++ { +// +// client := testCluster.getAPIClient(t, opts, listeners[i][0].Address.Port, tlsConfigs[i]) +// +// tcc := &TestClusterCore{ +// Core: cores[i], +// CoreConfig: coreConfigs[i], +// ServerKey: certInfoSlice[i].key, +// ServerKeyPEM: certInfoSlice[i].keyPEM, +// ServerCert: certInfoSlice[i].cert, +// ServerCertBytes: certInfoSlice[i].certBytes, +// ServerCertPEM: certInfoSlice[i].certPEM, +// Listeners: listeners[i], +// Handler: handlers[i], +// Server: servers[i], +// TLSConfig: tlsConfigs[i], +// Client: client, +// Barrier: cores[i].barrier, +// NodeID: fmt.Sprintf("core-%d", i), +// UnderlyingRawStorage: coreConfigs[i].Physical, +// } +// tcc.ReloadFuncs = &cores[i].reloadFuncs +// tcc.ReloadFuncsLock = &cores[i].reloadFuncsLock +// tcc.ReloadFuncsLock.Lock() +// (*tcc.ReloadFuncs)["listener|tcp"] = []reloadutil.ReloadFunc{certGetters[i].Reload} +// tcc.ReloadFuncsLock.Unlock() +// +// testAdjustTestCore(base, tcc) +// +// ret = append(ret, tcc) +// } +// +// testCluster.Cores = ret +// +// for _, tcc := range testCluster.Cores { +// testExtraTestCoreSetup(t, priKey, tcc) +// } +// +// // Cleanup +// testCluster.CleanupFunc = func() { +// for _, c := range cleanupFuncs { +// if c != nil { +// c() +// } +// } +// if l, ok := testCluster.Logger.(*TestLogger); ok { +// if t.Failed() { +// _ = l.File.Close() +// } else { +// _ = os.Remove(l.Path) +// } +// } +// } + func (testCluster *TestCluster) newCore( t testing.T, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey interface{}, From 91eb9c1469250f0d5d7fbe7e44b541daf0bd1b73 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 29 May 2020 11:57:46 -0400 Subject: [PATCH 47/86] stub out TestSealMigration_ShamirToTransit_Post14 --- .../sealmigration/seal_migration_test.go | 182 +++++++++++++----- vault/testing.go | 44 ++++- 2 files changed, 175 insertions(+), 51 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 738e3ced1a21..7b4623cad52d 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -88,7 +88,9 @@ func testSealMigrationShamirToTransit_Pre14( storage teststorage.ReusableStorage, basePort int) { // Initialize the backend using shamir - rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) + cluster := initializeShamir(t, logger, storage, basePort) + rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys + cluster.EnsureCoresSealed(t) // Create the transit server. tss := sealhelper.NewTransitSealServer(t) @@ -127,9 +129,6 @@ func migrateFromShamirToTransit_Pre14( SkipInit: true, // N.B. Providing a transit seal puts us in migration mode. SealFunc: func() vault.Seal { - // Each core will create its own transit seal here. Later - // on it won't matter which one of these we end up using, since - // they were all created from the same transit key. transitSeal = tss.MakeSeal(t, "transit-seal-key") return transitSeal }, @@ -175,6 +174,107 @@ func migrateFromShamirToTransit_Pre14( return transitSeal } +// TestSealMigration_ShamirToTransit_Post14 tests shamir-to-transit seal +// migration, using the post-1.4 method of bring individual nodes in the cluster +// to do the migration. +func TestSealMigration_ShamirToTransit_Post14(t *testing.T) { + testVariousBackends(t, testSealMigrationShamirToTransit_Post14, true) +} + +func testSealMigrationShamirToTransit_Post14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + var baseClusterPort = basePort + 10 + + //---------------------------------------------------- + // Initialize Shamir + + // Start the cluster + var conf = vault.CoreConfig{ + Logger: logger.Named("initializeShamir"), + } + var opts = vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: numTestCores, + BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), + BaseClusterListenPort: baseClusterPort, + } + storage.Setup(&conf, &opts) + cluster := vault.NewTestCluster(t, &conf, &opts) + cluster.Start() + defer func() { + storage.Cleanup(t, cluster) + cluster.Cleanup() + }() + + leader := cluster.Cores[0] + client := leader.Client + + // Unseal + if storage.IsRaft { + testhelpers.RaftClusterJoinNodes(t, cluster) + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + t.Fatal(err) + } + } else { + cluster.UnsealCores(t) + } + testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) + + // Write a secret that we will read back out later. + _, err := client.Logical().Write( + "secret/foo", + map[string]interface{}{"zork": "quux"}) + if err != nil { + t.Fatal(err) + } + + rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys + + //---------------------------------------------------- + // Migrate to Transit + + // Create the transit server. + tss := sealhelper.NewTransitSealServer(t) + defer func() { + tss.EnsureCoresSealed(t) + tss.Cleanup() + }() + tss.MakeKey(t, "transit-seal-key") + + // N.B. Providing a transit seal puts us in migration mode. + var transitSeal vault.Seal + opts.SealFunc = func() vault.Seal { + println("asd;lfkjasdl;fkjal;skfjl;askdjfl;adksjf;lakdsjf;ladksf") + transitSeal = tss.MakeSeal(t, "transit-seal-key") + return transitSeal + } + + println("--------------------------------------------------") + + // Unseal and migrate to Transit. + for i := 1; i < 2; i++ { + cluster.StopCore(t, i) + cluster.RestartCore(t, i, &opts) + + println("-------------------") + time.Sleep(15 * time.Second) + + // Note that the barrier keys are being used as recovery keys + client := cluster.Cores[i].Client + client.SetToken(rootToken) + unsealMigrate(t, client, barrierKeys, true) + } + + // done + println("--------------------------------------------------") + cluster.EnsureCoresSealed(t) + + //---------------------------------------------------- + // Run Transit +} + func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { for i, key := range keys { @@ -238,7 +338,7 @@ func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, s // initializeShamir initializes a brand new backend storage with Shamir. func initializeShamir( t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) (string, [][]byte) { + storage teststorage.ReusableStorage, basePort int) *vault.TestCluster { var baseClusterPort = basePort + 10 @@ -282,10 +382,7 @@ func initializeShamir( t.Fatal(err) } - // Seal the cluster - cluster.EnsureCoresSealed(t) - - return cluster.RootToken, cluster.BarrierKeys + return cluster } // runShamir uses a pre-populated backend storage with Shamir. @@ -374,9 +471,6 @@ func initializeTransit( BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseClusterListenPort: baseClusterPort, SealFunc: func() vault.Seal { - // Each core will create its own transit seal here. Later - // on it won't matter which one of these we end up using, since - // they were all created from the same transit key. transitSeal = tss.MakeSeal(t, "transit-seal-key") return transitSeal }, @@ -483,38 +577,38 @@ func runTransit( cluster.EnsureCoresSealed(t) } -// TestShamir is a temporary test that exercises the reusable raft storage. -// It will be replace once we do the post-1.4 migration testing. -func TestShamir(t *testing.T) { - testVariousBackends(t, testShamir, true) -} - -func testShamir( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) - runShamir(t, logger, storage, basePort, rootToken, barrierKeys) -} - -// TestTransit is a temporary test that exercises the reusable raft storage. -// It will be replace once we do the post-1.4 migration testing. -func TestTransit(t *testing.T) { - testVariousBackends(t, testTransit, true) -} - -func testTransit( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - // Create the transit server. - tss := sealhelper.NewTransitSealServer(t) - defer tss.Cleanup() - tss.MakeKey(t, "transit-seal-key") - - rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) - runTransit(t, logger, storage, basePort, rootToken, transitSeal) -} +//// TestShamir is a temporary test that exercises the reusable raft storage. +//// It will be replace once we do the post-1.4 migration testing. +//func TestShamir(t *testing.T) { +// testVariousBackends(t, testShamir, true) +//} +// +//func testShamir( +// t *testing.T, logger hclog.Logger, +// storage teststorage.ReusableStorage, basePort int) { +// +// rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) +// runShamir(t, logger, storage, basePort, rootToken, barrierKeys) +//} +// +//// TestTransit is a temporary test that exercises the reusable raft storage. +//// It will be replace once we do the post-1.4 migration testing. +//func TestTransit(t *testing.T) { +// testVariousBackends(t, testTransit, true) +//} +// +//func testTransit( +// t *testing.T, logger hclog.Logger, +// storage teststorage.ReusableStorage, basePort int) { +// +// // Create the transit server. +// tss := sealhelper.NewTransitSealServer(t) +// defer tss.Cleanup() +// tss.MakeKey(t, "transit-seal-key") +// +// rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) +// runTransit(t, logger, storage, basePort, rootToken, transitSeal) +//} func TestFoo(t *testing.T) { testVariousBackends(t, testFoo, true) diff --git a/vault/testing.go b/vault/testing.go index e7632a16be82..e389c7cb7938 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -729,6 +729,7 @@ type TestCluster struct { cleanupFuncs []func() base *CoreConfig + pubKey interface{} priKey interface{} } @@ -1454,6 +1455,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te if err != nil { t.Fatalf("err: %v", err) } + testCluster.pubKey = pubKey testCluster.priKey = priKey // Create cores @@ -1591,22 +1593,26 @@ func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterO //------------------------------------ // Create a new Core - newCore, err := NewCore(tcc.CoreConfig) - if err != nil { - t.Fatalf("err: %v", err) + cleanup, newCore, localConfig, handler := cluster.newCore( + t, idx, tcc.CoreConfig, opts, tcc.Listeners, cluster.pubKey) + if handler != nil { + tcc.Handler = handler + tcc.Server.Handler = handler } - newCore.coreNumber = tcc.Core.coreNumber - newCore.PR1103disabled = tcc.Core.PR1103disabled + + cluster.cleanupFuncs[idx] = cleanup + tcc.Core = newCore + tcc.CoreConfig = &localConfig cluster.setupClusterListener( t, idx, newCore, tcc.CoreConfig, opts, tcc.Listeners, tcc.Handler) + tcc.Client = cluster.getAPIClient(t, opts, tcc.Listeners[0].Address.Port, tcc.TLSConfig) + testAdjustTestCore(cluster.base, tcc) testExtraTestCoreSetup(t, cluster.priKey, tcc) - tcc.Core = newCore - //------------------------------------ // start listeners @@ -1618,6 +1624,30 @@ func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterO tcc.isRunning = true } +//type TestCluster struct { +// BarrierKeys [][]byte +// RecoveryKeys [][]byte +// CACert *x509.Certificate +// CACertBytes []byte +// CACertPEM []byte +// CACertPEMFile string +// CAKey *ecdsa.PrivateKey +// CAKeyPEM []byte +// Cores []*TestClusterCore +// ID string +// RootToken string +// RootCAs *x509.CertPool +// TempDir string +// ClientAuthRequired bool +// Logger log.Logger +// CleanupFunc func() +// SetupFunc func() +// +// cleanupFuncs []func() +// base *CoreConfig +// priKey interface{} +//} + //type TestClusterCore struct { // *Core // CoreConfig *CoreConfig From 0e1d20b57e88772114f182f2e00fb2c397e552a4 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 29 May 2020 12:28:43 -0400 Subject: [PATCH 48/86] refactor address handling in NewTestCluster --- vault/testing.go | 43 +++++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index e389c7cb7938..81fab8489fb7 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1111,6 +1111,11 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te t.Fatal("could not parse given base IP") } certIPs = append(certIPs, baseAddr.IP) + } else { + baseAddr = &net.TCPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 0, + } } var testCluster TestCluster @@ -1264,29 +1269,39 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te // // Listener setup // - ports := make([]int, numCores) - if baseAddr != nil { - for i := 0; i < numCores; i++ { - ports[i] = baseAddr.Port + i - } - } else { - baseAddr = &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 0, - } - } - + //ports := make([]int, numCores) + //if baseAddr != nil { + // for i := 0; i < numCores; i++ { + // ports[i] = baseAddr.Port + i + // } + //} else { + // baseAddr = &net.TCPAddr{ + // IP: net.ParseIP("127.0.0.1"), + // Port: 0, + // } + //} + + addresses := []*net.TCPAddr{} listeners := [][]*TestListener{} servers := []*http.Server{} handlers := []http.Handler{} tlsConfigs := []*tls.Config{} certGetters := []*reloadutil.CertificateGetter{} for i := 0; i < numCores; i++ { - baseAddr.Port = ports[i] - ln, err := net.ListenTCP("tcp", baseAddr) + + addr := &net.TCPAddr{ + IP: baseAddr.IP, + Port: 0, + } + if baseAddr.Port != 0 { + addr.Port = baseAddr.Port + i + } + ln, err := net.ListenTCP("tcp", addr) if err != nil { t.Fatal(err) } + addresses = append(addresses, addr) + certFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_cert.pem", i+1, ln.Addr().(*net.TCPAddr).Port)) keyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_key.pem", i+1, ln.Addr().(*net.TCPAddr).Port)) err = ioutil.WriteFile(certFile, certInfoSlice[i].certPEM, 0755) From 3738d76fed02fb2dfcf69d4752c1873a963be051 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 29 May 2020 12:43:38 -0400 Subject: [PATCH 49/86] fix listener setup in newCore() --- vault/testing.go | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index 81fab8489fb7..83041bc90b2d 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -957,6 +957,7 @@ type TestClusterCore struct { CoreConfig *CoreConfig Client *api.Client Handler http.Handler + Address *net.TCPAddr Listeners []*TestListener ReloadFuncs *map[string][]reloadutil.ReloadFunc ReloadFuncsLock *sync.RWMutex @@ -1269,18 +1270,6 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te // // Listener setup // - //ports := make([]int, numCores) - //if baseAddr != nil { - // for i := 0; i < numCores; i++ { - // ports[i] = baseAddr.Port + i - // } - //} else { - // baseAddr = &net.TCPAddr{ - // IP: net.ParseIP("127.0.0.1"), - // Port: 0, - // } - //} - addresses := []*net.TCPAddr{} listeners := [][]*TestListener{} servers := []*http.Server{} @@ -1296,6 +1285,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te if baseAddr.Port != 0 { addr.Port = baseAddr.Port + i } + ln, err := net.ListenTCP("tcp", addr) if err != nil { t.Fatal(err) @@ -1515,6 +1505,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te ServerCert: certInfoSlice[i].cert, ServerCertBytes: certInfoSlice[i].certBytes, ServerCertPEM: certInfoSlice[i].certPEM, + Address: addresses[i], Listeners: listeners[i], Handler: handlers[i], Server: servers[i], @@ -1607,12 +1598,29 @@ func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterO //------------------------------------ + // Set up listeners + ln, err := net.ListenTCP("tcp", tcc.Address) + if err != nil { + t.Fatal(err) + } + tcc.Listeners = []*TestListener{&TestListener{ + Listener: tls.NewListener(ln, tcc.TLSConfig), + Address: ln.Addr().(*net.TCPAddr), + }, + } + + tcc.Handler = http.NewServeMux() + tcc.Server = &http.Server{ + Handler: tcc.Handler, + ErrorLog: cluster.Logger.StandardLogger(nil), + } + // Create a new Core - cleanup, newCore, localConfig, handler := cluster.newCore( + cleanup, newCore, localConfig, coreHandler := cluster.newCore( t, idx, tcc.CoreConfig, opts, tcc.Listeners, cluster.pubKey) - if handler != nil { - tcc.Handler = handler - tcc.Server.Handler = handler + if coreHandler != nil { + tcc.Handler = coreHandler + tcc.Server.Handler = coreHandler } cluster.cleanupFuncs[idx] = cleanup From 7e3b1cf8778ef6ba01220092f4ffcd41dd0d8504 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 29 May 2020 13:51:10 -0400 Subject: [PATCH 50/86] work on post-1.4 migration testing --- .../sealmigration/seal_migration_test.go | 218 ++++++++++++------ vault/testing.go | 28 ++- 2 files changed, 163 insertions(+), 83 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 7b4623cad52d..bc876f62c0be 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -88,7 +88,7 @@ func testSealMigrationShamirToTransit_Pre14( storage teststorage.ReusableStorage, basePort int) { // Initialize the backend using shamir - cluster := initializeShamir(t, logger, storage, basePort) + cluster, _ := initializeShamir(t, logger, storage, basePort) rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys cluster.EnsureCoresSealed(t) @@ -185,55 +185,8 @@ func testSealMigrationShamirToTransit_Post14( t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) { - var baseClusterPort = basePort + 10 - - //---------------------------------------------------- - // Initialize Shamir - - // Start the cluster - var conf = vault.CoreConfig{ - Logger: logger.Named("initializeShamir"), - } - var opts = vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: numTestCores, - BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), - BaseClusterListenPort: baseClusterPort, - } - storage.Setup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() - - leader := cluster.Cores[0] - client := leader.Client - - // Unseal - if storage.IsRaft { - testhelpers.RaftClusterJoinNodes(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { - t.Fatal(err) - } - } else { - cluster.UnsealCores(t) - } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) - - // Write a secret that we will read back out later. - _, err := client.Logical().Write( - "secret/foo", - map[string]interface{}{"zork": "quux"}) - if err != nil { - t.Fatal(err) - } - - rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys - - //---------------------------------------------------- - // Migrate to Transit + // Initialize the backend using shamir + cluster, opts := initializeShamir(t, logger, storage, basePort) // Create the transit server. tss := sealhelper.NewTransitSealServer(t) @@ -243,38 +196,167 @@ func testSealMigrationShamirToTransit_Post14( }() tss.MakeKey(t, "transit-seal-key") + // Migrate the backend from shamir to transit. Note that the barrier keys + // are now the recovery keys. + _ = migrateFromShamirToTransit_Post14(t, logger, storage, tss, cluster, opts) + //transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, tss, cluster, opts) + cluster.EnsureCoresSealed(t) + + //// Run the backend with transit. + //runTransit(t, logger, storage, basePort, rootToken, transitSeal) +} + +func migrateFromShamirToTransit_Post14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, + tss *sealhelper.TransitSealServer, + cluster *vault.TestCluster, opts *vault.TestClusterOptions, +) vault.Seal { + // N.B. Providing a transit seal puts us in migration mode. var transitSeal vault.Seal opts.SealFunc = func() vault.Seal { - println("asd;lfkjasdl;fkjal;skfjl;askdjfl;adksjf;lakdsjf;ladksf") transitSeal = tss.MakeSeal(t, "transit-seal-key") return transitSeal } println("--------------------------------------------------") - // Unseal and migrate to Transit. - for i := 1; i < 2; i++ { - cluster.StopCore(t, i) - cluster.RestartCore(t, i, &opts) + // Restart each follower with the new config, and migrate to Transit. + //rootToken, recoveryKeys := cluster.RootToken, cluster.BarrierKeys + for i := 1; i < numTestCores; i++ { - println("-------------------") - time.Sleep(15 * time.Second) + fmt.Printf("------------------- aaa %d\n", i) + cluster.StopCore(t, i) + cluster.RestartCore(t, i, opts) + time.Sleep(5 * time.Second) - // Note that the barrier keys are being used as recovery keys - client := cluster.Cores[i].Client - client.SetToken(rootToken) - unsealMigrate(t, client, barrierKeys, true) + //// Note that the barrier keys are being used as recovery keys + //fmt.Printf("------------------- bbb %d\n", i) + //client := cluster.Cores[i].Client + //client.SetToken(rootToken) + //unsealMigrate(t, client, recoveryKeys, true) + //time.Sleep(10 * time.Second) } - // done - println("--------------------------------------------------") - cluster.EnsureCoresSealed(t) + //// Wait for migration to finish. Sadly there is no callback, and the + //// test will fail later on if we don't do this. + //time.Sleep(10 * time.Second) - //---------------------------------------------------- - // Run Transit + //// Read the secret + //secret, err := client.Logical().Read("secret/foo") + //if err != nil { + // t.Fatal(err) + //} + //if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + // t.Fatal(diff) + //} + + //// Make sure the seal configs were updated correctly. + //b, r, err := leader.Core.PhysicalSealConfigs(context.Background()) + //if err != nil { + // t.Fatal(err) + //} + //verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1) + //verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0) + + return transitSeal } +//func testSealMigrationShamirToTransit_Post14( +// t *testing.T, logger hclog.Logger, +// storage teststorage.ReusableStorage, basePort int) { +// +// var baseClusterPort = basePort + 10 +// +// //---------------------------------------------------- +// // Initialize Shamir +// +// // Start the cluster +// var conf = vault.CoreConfig{ +// Logger: logger.Named("initializeShamir"), +// } +// var opts = vault.TestClusterOptions{ +// HandlerFunc: vaulthttp.Handler, +// NumCores: numTestCores, +// BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), +// BaseClusterListenPort: baseClusterPort, +// } +// storage.Setup(&conf, &opts) +// cluster := vault.NewTestCluster(t, &conf, &opts) +// cluster.Start() +// defer func() { +// storage.Cleanup(t, cluster) +// cluster.Cleanup() +// }() +// +// leader := cluster.Cores[0] +// client := leader.Client +// +// // Unseal +// if storage.IsRaft { +// testhelpers.RaftClusterJoinNodes(t, cluster) +// if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { +// t.Fatal(err) +// } +// } else { +// cluster.UnsealCores(t) +// } +// testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) +// +// // Write a secret that we will read back out later. +// _, err := client.Logical().Write( +// "secret/foo", +// map[string]interface{}{"zork": "quux"}) +// if err != nil { +// t.Fatal(err) +// } +// +// rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys +// +// //---------------------------------------------------- +// // Migrate to Transit +// +// // Create the transit server. +// tss := sealhelper.NewTransitSealServer(t) +// defer func() { +// tss.EnsureCoresSealed(t) +// tss.Cleanup() +// }() +// tss.MakeKey(t, "transit-seal-key") +// +// // N.B. Providing a transit seal puts us in migration mode. +// var transitSeal vault.Seal +// opts.SealFunc = func() vault.Seal { +// println("asd;lfkjasdl;fkjal;skfjl;askdjfl;adksjf;lakdsjf;ladksf") +// transitSeal = tss.MakeSeal(t, "transit-seal-key") +// return transitSeal +// } +// +// println("--------------------------------------------------") +// +// // Unseal and migrate to Transit. +// for i := 1; i < numTestCores; i++ { +// println("-------------------") +// +// cluster.StopCore(t, i) +// cluster.RestartCore(t, i, &opts) +// time.Sleep(5 * time.Second) +// +// // Note that the barrier keys are being used as recovery keys +// client := cluster.Cores[i].Client +// client.SetToken(rootToken) +// unsealMigrate(t, client, barrierKeys, true) +// } +// +// // done +// println("--------------------------------------------------") +// cluster.EnsureCoresSealed(t) +// +// //---------------------------------------------------- +// // Run Transit +//} + func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { for i, key := range keys { @@ -338,7 +420,7 @@ func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, s // initializeShamir initializes a brand new backend storage with Shamir. func initializeShamir( t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) *vault.TestCluster { + storage teststorage.ReusableStorage, basePort int) (*vault.TestCluster, *vault.TestClusterOptions) { var baseClusterPort = basePort + 10 @@ -382,7 +464,7 @@ func initializeShamir( t.Fatal(err) } - return cluster + return cluster, &opts } // runShamir uses a pre-populated backend storage with Shamir. diff --git a/vault/testing.go b/vault/testing.go index 83041bc90b2d..7a960997b1ec 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -741,7 +741,8 @@ func (c *TestCluster) Start() { go core.Server.Serve(ln) } } - core.isRunning = true + + //core.isRunning = true } if c.SetupFunc != nil { c.SetupFunc() @@ -874,7 +875,7 @@ func (c *TestClusterCore) stop() error { } c.Logger().Info("vault test core stopped") - c.isRunning = false + //c.isRunning = false return nil } @@ -973,7 +974,7 @@ type TestClusterCore struct { Barrier SecurityBarrier NodeID string - isRunning bool + //isRunning bool } type PhysicalBackendBundle struct { @@ -1568,9 +1569,9 @@ func (cluster *TestCluster) StopCore(t testing.T, idx int) { tcc := cluster.Cores[idx] tcc.Logger().Info("stopping core", "core", idx) - if !tcc.isRunning { - t.Fatalf("core is already stopped") - } + //if !tcc.isRunning { + // t.Fatalf("core is already stopped") + //} // Stop listeners and call Shutdown() if err := tcc.stop(); err != nil { @@ -1592,11 +1593,9 @@ func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterO tcc := cluster.Cores[idx] tcc.Logger().Info("restarting core", "core", idx) - if tcc.isRunning { - t.Fatalf("cannot restart a running core") - } - - //------------------------------------ + //if tcc.isRunning { + // t.Fatalf("cannot restart a running core") + //} // Set up listeners ln, err := net.ListenTCP("tcp", tcc.Address) @@ -1636,15 +1635,14 @@ func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterO testAdjustTestCore(cluster.base, tcc) testExtraTestCoreSetup(t, cluster.priKey, tcc) - //------------------------------------ - - // start listeners + // Start listeners for _, ln := range tcc.Listeners { tcc.Logger().Info("starting listener for core", "port", ln.Address.Port) go tcc.Server.Serve(ln) } - tcc.isRunning = true + tcc.Logger().Info("restarted test core", "core", idx) + //tcc.isRunning = true } //type TestCluster struct { From 4481ac1b557204378c2fbbfa2626515957249aea Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 29 May 2020 14:11:57 -0400 Subject: [PATCH 51/86] clean up pre-1.4 test --- .../sealmigration/seal_migration_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index bc876f62c0be..c07478e539ce 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -88,9 +88,10 @@ func testSealMigrationShamirToTransit_Pre14( storage teststorage.ReusableStorage, basePort int) { // Initialize the backend using shamir - cluster, _ := initializeShamir(t, logger, storage, basePort) + cluster, _, cleanup := initializeShamir(t, logger, storage, basePort) rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys cluster.EnsureCoresSealed(t) + cleanup() // Create the transit server. tss := sealhelper.NewTransitSealServer(t) @@ -186,7 +187,7 @@ func testSealMigrationShamirToTransit_Post14( storage teststorage.ReusableStorage, basePort int) { // Initialize the backend using shamir - cluster, opts := initializeShamir(t, logger, storage, basePort) + cluster, opts, cleanup := initializeShamir(t, logger, storage, basePort) // Create the transit server. tss := sealhelper.NewTransitSealServer(t) @@ -201,6 +202,7 @@ func testSealMigrationShamirToTransit_Post14( _ = migrateFromShamirToTransit_Post14(t, logger, storage, tss, cluster, opts) //transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, tss, cluster, opts) cluster.EnsureCoresSealed(t) + cleanup() //// Run the backend with transit. //runTransit(t, logger, storage, basePort, rootToken, transitSeal) @@ -420,7 +422,7 @@ func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, s // initializeShamir initializes a brand new backend storage with Shamir. func initializeShamir( t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) (*vault.TestCluster, *vault.TestClusterOptions) { + storage teststorage.ReusableStorage, basePort int) (*vault.TestCluster, *vault.TestClusterOptions, func()) { var baseClusterPort = basePort + 10 @@ -437,10 +439,10 @@ func initializeShamir( storage.Setup(&conf, &opts) cluster := vault.NewTestCluster(t, &conf, &opts) cluster.Start() - defer func() { + cleanup := func() { storage.Cleanup(t, cluster) cluster.Cleanup() - }() + } leader := cluster.Cores[0] client := leader.Client @@ -464,7 +466,7 @@ func initializeShamir( t.Fatal(err) } - return cluster, &opts + return cluster, &opts, cleanup } // runShamir uses a pre-populated backend storage with Shamir. From 4f5eba6d091a0cbc2faa96c2cde7509c3c250ebf Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 29 May 2020 15:05:10 -0400 Subject: [PATCH 52/86] TestSealMigration_ShamirToTransit_Post14 works for non-raft --- .../sealmigration/seal_migration_test.go | 146 +++++++++++------- 1 file changed, 91 insertions(+), 55 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index c07478e539ce..8438c8769d6b 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -42,25 +42,25 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { tf(t, logger, storage, 20000) }) - //t.Run("file", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("file") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeFileBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 52000) - //}) - - //t.Run("consul", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("consul") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeConsulBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 53000) - //}) + t.Run("file", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("file") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeFileBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 52000) + }) + + t.Run("consul", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) + }) //if includeRaft { // t.Run("raft", func(t *testing.T) { @@ -151,6 +151,7 @@ func migrateFromShamirToTransit_Pre14( // Wait for migration to finish. Sadly there is no callback, and the // test will fail later on if we don't do this. + // TODO -- actually there is a callback, we can monitor this and await time.Sleep(10 * time.Second) // Read the secret @@ -188,6 +189,7 @@ func testSealMigrationShamirToTransit_Post14( // Initialize the backend using shamir cluster, opts, cleanup := initializeShamir(t, logger, storage, basePort) + rootToken := cluster.RootToken // Create the transit server. tss := sealhelper.NewTransitSealServer(t) @@ -197,15 +199,13 @@ func testSealMigrationShamirToTransit_Post14( }() tss.MakeKey(t, "transit-seal-key") - // Migrate the backend from shamir to transit. Note that the barrier keys - // are now the recovery keys. - _ = migrateFromShamirToTransit_Post14(t, logger, storage, tss, cluster, opts) - //transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, tss, cluster, opts) + // Migrate the backend from shamir to transit. + transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, tss, cluster, opts) cluster.EnsureCoresSealed(t) cleanup() - //// Run the backend with transit. - //runTransit(t, logger, storage, basePort, rootToken, transitSeal) + // Run the backend with transit. + runTransit(t, logger, storage, basePort, rootToken, transitSeal) } func migrateFromShamirToTransit_Post14( @@ -222,49 +222,85 @@ func migrateFromShamirToTransit_Post14( return transitSeal } - println("--------------------------------------------------") - - // Restart each follower with the new config, and migrate to Transit. - //rootToken, recoveryKeys := cluster.RootToken, cluster.BarrierKeys + // Restart each follower with the new config, and migrate to Transit. + // Note that the barrier keys are being used as recovery keys. + rootToken, recoveryKeys := cluster.RootToken, cluster.BarrierKeys for i := 1; i < numTestCores; i++ { - - fmt.Printf("------------------- aaa %d\n", i) cluster.StopCore(t, i) cluster.RestartCore(t, i, opts) + + client := cluster.Cores[i].Client + client.SetToken(rootToken) + unsealMigrate(t, client, recoveryKeys, true) time.Sleep(5 * time.Second) + } - //// Note that the barrier keys are being used as recovery keys - //fmt.Printf("------------------- bbb %d\n", i) - //client := cluster.Cores[i].Client - //client.SetToken(rootToken) - //unsealMigrate(t, client, recoveryKeys, true) - //time.Sleep(10 * time.Second) + // Bring down the leader + cluster.StopCore(t, 0) + + // Wait for the followers to establish a new leader + leaderIdx, err := awaitLeader(t, cluster) + if err != nil { + t.Fatal(err) } + leader := cluster.Cores[leaderIdx] + client := leader.Client + client.SetToken(rootToken) - //// Wait for migration to finish. Sadly there is no callback, and the - //// test will fail later on if we don't do this. - //time.Sleep(10 * time.Second) + // Wait for migration to finish. Sadly there is no callback, and the + // test will fail later on if we don't do this. + // TODO -- actually there is a callback, we can monitor this and await + time.Sleep(10 * time.Second) - //// Read the secret - //secret, err := client.Logical().Read("secret/foo") - //if err != nil { - // t.Fatal(err) - //} - //if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { - // t.Fatal(diff) - //} + // Bring core 0 back up + // TODO -- make sure its not migrating + cluster.RestartCore(t, 0, opts) - //// Make sure the seal configs were updated correctly. - //b, r, err := leader.Core.PhysicalSealConfigs(context.Background()) - //if err != nil { - // t.Fatal(err) - //} - //verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1) - //verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0) + // Read the secret + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + t.Fatal(diff) + } + + // Make sure the seal configs were updated correctly. + b, r, err := leader.Core.PhysicalSealConfigs(context.Background()) + if err != nil { + t.Fatal(err) + } + verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1) + verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0) return transitSeal } +// awaitLeader waits for one of the followers to become leader. +func awaitLeader(t *testing.T, cluster *vault.TestCluster) (int, error) { + + timeout := time.Now().Add(30 * time.Second) + for { + if time.Now().After(timeout) { + break + } + + for i := 1; i < numTestCores; i++ { + isLeader, _, _, err := cluster.Cores[i].Leader() + if err != nil { + t.Fatal(err) + } + if isLeader { + return i, nil + } + } + + time.Sleep(time.Second) + } + + return 0, fmt.Errorf("timeout waiting leader") +} + //func testSealMigrationShamirToTransit_Post14( // t *testing.T, logger hclog.Logger, // storage teststorage.ReusableStorage, basePort int) { From debbbb198cf319dcdc9c5f92a604287b7ddd082c Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Mon, 1 Jun 2020 09:11:48 -0400 Subject: [PATCH 53/86] work on raft TestSealMigration_ShamirToTransit_Post14 --- helper/testhelpers/testhelpers.go | 5 + .../teststorage/teststorage_reusable.go | 13 +- .../sealmigration/seal_migration_test.go | 294 ++++-------------- 3 files changed, 67 insertions(+), 245 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index b9aff79f3b14..a705fd1408c2 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -531,6 +531,11 @@ func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider r } } +// SetRaftAddressProvider sets a ServerAddressProvider on a node. +func SetRaftAddressProvider(t testing.T, core *vault.TestClusterCore, provider raftlib.ServerAddressProvider) { + core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(provider) +} + // VerifyRaftConfiguration checks that we have a valid raft configuration, i.e. // the correct number of servers, having the correct NodeIDs, and exactly one // leader. diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index bb78a6d972ca..02cf117f2009 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -94,10 +94,7 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (Re // Close open files being used by raft. Cleanup: func(t testing.T, cluster *vault.TestCluster) { for _, core := range cluster.Cores { - raftStorage := core.UnderlyingRawStorage.(*raft.RaftBackend) - if err := raftStorage.Close(); err != nil { - t.Fatal(err) - } + CloseRaftStorage(t, core) } }, } @@ -111,6 +108,14 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (Re return storage, cleanup } +// CloseRaftStorage closes open files being used by raft. +func CloseRaftStorage(t testing.T, core *vault.TestClusterCore) { + raftStorage := core.UnderlyingRawStorage.(*raft.RaftBackend) + if err := raftStorage.Close(); err != nil { + t.Fatal(err) + } +} + func makeRaftDir(t testing.T) string { raftDir, err := ioutil.TempDir("", "vault-raft-") if err != nil { diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 8438c8769d6b..2d1d1a70fc47 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -32,46 +32,46 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - t.Run("inmem", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("inmem") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeInmemBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 20000) - }) - - t.Run("file", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("file") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeFileBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 52000) - }) - - t.Run("consul", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 53000) - }) - - //if includeRaft { - // t.Run("raft", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("raft") - // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - // defer cleanup() - // tf(t, logger, storage, 54000) - // }) - //} + //t.Run("inmem", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("inmem") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeInmemBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 51000) + //}) + + //t.Run("file", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("file") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeFileBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 52000) + //}) + + //t.Run("consul", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("consul") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeConsulBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 53000) + //}) + + if includeRaft { + t.Run("raft", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("raft") + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + defer cleanup() + tf(t, logger, storage, 54000) + }) + } } // TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal @@ -200,7 +200,7 @@ func testSealMigrationShamirToTransit_Post14( tss.MakeKey(t, "transit-seal-key") // Migrate the backend from shamir to transit. - transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, tss, cluster, opts) + transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, basePort, tss, cluster, opts) cluster.EnsureCoresSealed(t) cleanup() @@ -210,7 +210,7 @@ func testSealMigrationShamirToTransit_Post14( func migrateFromShamirToTransit_Post14( t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, + storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, cluster *vault.TestCluster, opts *vault.TestClusterOptions, ) vault.Seal { @@ -222,11 +222,18 @@ func migrateFromShamirToTransit_Post14( return transitSeal } + var baseClusterPort = basePort + 10 + provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort) + // Restart each follower with the new config, and migrate to Transit. // Note that the barrier keys are being used as recovery keys. rootToken, recoveryKeys := cluster.RootToken, cluster.BarrierKeys for i := 1; i < numTestCores; i++ { cluster.StopCore(t, i) + if storage.IsRaft { + teststorage.CloseRaftStorage(t, cluster.Cores[i]) + testhelpers.SetRaftAddressProvider(t, cluster.Cores[i], provider) + } cluster.RestartCore(t, i, opts) client := cluster.Cores[i].Client @@ -237,6 +244,10 @@ func migrateFromShamirToTransit_Post14( // Bring down the leader cluster.StopCore(t, 0) + if storage.IsRaft { + teststorage.CloseRaftStorage(t, cluster.Cores[0]) + testhelpers.SetRaftAddressProvider(t, cluster.Cores[0], provider) + } // Wait for the followers to establish a new leader leaderIdx, err := awaitLeader(t, cluster) @@ -301,100 +312,6 @@ func awaitLeader(t *testing.T, cluster *vault.TestCluster) (int, error) { return 0, fmt.Errorf("timeout waiting leader") } -//func testSealMigrationShamirToTransit_Post14( -// t *testing.T, logger hclog.Logger, -// storage teststorage.ReusableStorage, basePort int) { -// -// var baseClusterPort = basePort + 10 -// -// //---------------------------------------------------- -// // Initialize Shamir -// -// // Start the cluster -// var conf = vault.CoreConfig{ -// Logger: logger.Named("initializeShamir"), -// } -// var opts = vault.TestClusterOptions{ -// HandlerFunc: vaulthttp.Handler, -// NumCores: numTestCores, -// BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), -// BaseClusterListenPort: baseClusterPort, -// } -// storage.Setup(&conf, &opts) -// cluster := vault.NewTestCluster(t, &conf, &opts) -// cluster.Start() -// defer func() { -// storage.Cleanup(t, cluster) -// cluster.Cleanup() -// }() -// -// leader := cluster.Cores[0] -// client := leader.Client -// -// // Unseal -// if storage.IsRaft { -// testhelpers.RaftClusterJoinNodes(t, cluster) -// if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { -// t.Fatal(err) -// } -// } else { -// cluster.UnsealCores(t) -// } -// testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) -// -// // Write a secret that we will read back out later. -// _, err := client.Logical().Write( -// "secret/foo", -// map[string]interface{}{"zork": "quux"}) -// if err != nil { -// t.Fatal(err) -// } -// -// rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys -// -// //---------------------------------------------------- -// // Migrate to Transit -// -// // Create the transit server. -// tss := sealhelper.NewTransitSealServer(t) -// defer func() { -// tss.EnsureCoresSealed(t) -// tss.Cleanup() -// }() -// tss.MakeKey(t, "transit-seal-key") -// -// // N.B. Providing a transit seal puts us in migration mode. -// var transitSeal vault.Seal -// opts.SealFunc = func() vault.Seal { -// println("asd;lfkjasdl;fkjal;skfjl;askdjfl;adksjf;lakdsjf;ladksf") -// transitSeal = tss.MakeSeal(t, "transit-seal-key") -// return transitSeal -// } -// -// println("--------------------------------------------------") -// -// // Unseal and migrate to Transit. -// for i := 1; i < numTestCores; i++ { -// println("-------------------") -// -// cluster.StopCore(t, i) -// cluster.RestartCore(t, i, &opts) -// time.Sleep(5 * time.Second) -// -// // Note that the barrier keys are being used as recovery keys -// client := cluster.Cores[i].Client -// client.SetToken(rootToken) -// unsealMigrate(t, client, barrierKeys, true) -// } -// -// // done -// println("--------------------------------------------------") -// cluster.EnsureCoresSealed(t) -// -// //---------------------------------------------------- -// // Run Transit -//} - func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { for i, key := range keys { @@ -696,108 +613,3 @@ func runTransit( // Seal the cluster cluster.EnsureCoresSealed(t) } - -//// TestShamir is a temporary test that exercises the reusable raft storage. -//// It will be replace once we do the post-1.4 migration testing. -//func TestShamir(t *testing.T) { -// testVariousBackends(t, testShamir, true) -//} -// -//func testShamir( -// t *testing.T, logger hclog.Logger, -// storage teststorage.ReusableStorage, basePort int) { -// -// rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort) -// runShamir(t, logger, storage, basePort, rootToken, barrierKeys) -//} -// -//// TestTransit is a temporary test that exercises the reusable raft storage. -//// It will be replace once we do the post-1.4 migration testing. -//func TestTransit(t *testing.T) { -// testVariousBackends(t, testTransit, true) -//} -// -//func testTransit( -// t *testing.T, logger hclog.Logger, -// storage teststorage.ReusableStorage, basePort int) { -// -// // Create the transit server. -// tss := sealhelper.NewTransitSealServer(t) -// defer tss.Cleanup() -// tss.MakeKey(t, "transit-seal-key") -// -// rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) -// runTransit(t, logger, storage, basePort, rootToken, transitSeal) -//} - -func TestFoo(t *testing.T) { - testVariousBackends(t, testFoo, true) -} - -func testFoo( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - var baseClusterPort = basePort + 10 - - // Start the cluster - var conf = vault.CoreConfig{ - Logger: logger.Named("foo"), - } - var opts = vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: numTestCores, - BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), - BaseClusterListenPort: baseClusterPort, - } - storage.Setup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() - - leader := cluster.Cores[0] - client := leader.Client - - // Unseal - if storage.IsRaft { - testhelpers.RaftClusterJoinNodes(t, cluster) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { - t.Fatal(err) - } - } else { - cluster.UnsealCores(t) - } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) - - // Write a secret that we will read back out later. - _, err := client.Logical().Write( - "secret/foo", - map[string]interface{}{"zork": "quux"}) - if err != nil { - t.Fatal(err) - } - - println("--------------------------------------------------") - println("StopCore") - cluster.StopCore(t, 1) - - println("--------------------------------------------------") - println("RestartCore") - cluster.RestartCore(t, 1, &opts) - - println("--------------------------------------------------") - println("Sleep") - time.Sleep(10 * time.Second) - - println("--------------------------------------------------") - println("Seal") - - // Seal the cluster - cluster.EnsureCoresSealed(t) - - println("--------------------------------------------------") - println("exit") -} From 357a61b1e4f6d7241d09ed61302a313ee17fce2a Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Mon, 1 Jun 2020 09:30:30 -0400 Subject: [PATCH 54/86] clean up test code --- helper/testhelpers/testhelpers.go | 29 +++++ .../sealmigration/seal_migration_test.go | 122 +++++++++++------- 2 files changed, 104 insertions(+), 47 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index a705fd1408c2..e00cdbc583e1 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -570,6 +570,35 @@ func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error { return nil } +// AwaitLeader waits for one of the cluster's nodes to become leader. +func AwaitLeader(t testing.T, cluster *vault.TestCluster) (int, error) { + + timeout := time.Now().Add(30 * time.Second) + for { + if time.Now().After(timeout) { + break + } + + for i, core := range cluster.Cores { + if core.Core.Sealed() { + continue + } + + isLeader, _, _, err := core.Leader() + if err != nil { + t.Fatal(err) + } + if isLeader { + return i, nil + } + } + + time.Sleep(time.Second) + } + + return 0, fmt.Errorf("timeout waiting leader") +} + func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} { t.Helper() diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 2d1d1a70fc47..5aaf0e0c98df 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -32,15 +32,15 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - //t.Run("inmem", func(t *testing.T) { - // t.Parallel() + t.Run("inmem", func(t *testing.T) { + t.Parallel() - // logger := logger.Named("inmem") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeInmemBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 51000) - //}) + logger := logger.Named("inmem") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeInmemBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 51000) + }) //t.Run("file", func(t *testing.T) { // t.Parallel() @@ -62,16 +62,16 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { // tf(t, logger, storage, 53000) //}) - if includeRaft { - t.Run("raft", func(t *testing.T) { - t.Parallel() + //if includeRaft { + // t.Run("raft", func(t *testing.T) { + // t.Parallel() - logger := logger.Named("raft") - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - defer cleanup() - tf(t, logger, storage, 54000) - }) - } + // logger := logger.Named("raft") + // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + // defer cleanup() + // tf(t, logger, storage, 54000) + // }) + //} } // TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal @@ -189,7 +189,7 @@ func testSealMigrationShamirToTransit_Post14( // Initialize the backend using shamir cluster, opts, cleanup := initializeShamir(t, logger, storage, basePort) - rootToken := cluster.RootToken + //rootToken := cluster.RootToken // Create the transit server. tss := sealhelper.NewTransitSealServer(t) @@ -200,12 +200,15 @@ func testSealMigrationShamirToTransit_Post14( tss.MakeKey(t, "transit-seal-key") // Migrate the backend from shamir to transit. - transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, basePort, tss, cluster, opts) + //transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, basePort, tss, cluster, opts) + println("--------------------------------------------------------------------------------------------") + _ = migrateFromShamirToTransit_Post14(t, logger, storage, basePort, tss, cluster, opts) cluster.EnsureCoresSealed(t) cleanup() + println("--------------------------------------------------------------------------------------------") - // Run the backend with transit. - runTransit(t, logger, storage, basePort, rootToken, transitSeal) + //// Run the backend with transit. + //runTransit(t, logger, storage, basePort, rootToken, transitSeal) } func migrateFromShamirToTransit_Post14( @@ -250,7 +253,7 @@ func migrateFromShamirToTransit_Post14( } // Wait for the followers to establish a new leader - leaderIdx, err := awaitLeader(t, cluster) + leaderIdx, err := testhelpers.AwaitLeader(t, cluster) if err != nil { t.Fatal(err) } @@ -287,31 +290,6 @@ func migrateFromShamirToTransit_Post14( return transitSeal } -// awaitLeader waits for one of the followers to become leader. -func awaitLeader(t *testing.T, cluster *vault.TestCluster) (int, error) { - - timeout := time.Now().Add(30 * time.Second) - for { - if time.Now().After(timeout) { - break - } - - for i := 1; i < numTestCores; i++ { - isLeader, _, _, err := cluster.Cores[i].Leader() - if err != nil { - t.Fatal(err) - } - if isLeader { - return i, nil - } - } - - time.Sleep(time.Second) - } - - return 0, fmt.Errorf("timeout waiting leader") -} - func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { for i, key := range keys { @@ -613,3 +591,53 @@ func runTransit( // Seal the cluster cluster.EnsureCoresSealed(t) } + +//func TestFoo(t *testing.T) { +// testVariousBackends(t, testFoo, true) +//} +// +//func testFoo( +// t *testing.T, logger hclog.Logger, +// storage teststorage.ReusableStorage, basePort int) { +// +// var baseClusterPort = basePort + 10 +// +// // Start the cluster +// var conf = vault.CoreConfig{ +// Logger: logger.Named("foo"), +// } +// var opts = vault.TestClusterOptions{ +// HandlerFunc: vaulthttp.Handler, +// NumCores: numTestCores, +// BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), +// BaseClusterListenPort: baseClusterPort, +// } +// storage.Setup(&conf, &opts) +// cluster := vault.NewTestCluster(t, &conf, &opts) +// cluster.Start() +// defer func() { +// storage.Cleanup(t, cluster) +// cluster.Cleanup() +// }() +// +// leader := cluster.Cores[0] +// client := leader.Client +// +// // Unseal +// if storage.IsRaft { +// testhelpers.RaftClusterJoinNodes(t, cluster) +// if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { +// t.Fatal(err) +// } +// } else { +// cluster.UnsealCores(t) +// } +// testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) +// +// +// // Seal the cluster +// cluster.EnsureCoresSealed(t) +// +// println("--------------------------------------------------") +// println("exit") +//} From 1a5f997a870cfa93c8dd62ae13531ec32006bfee Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Mon, 1 Jun 2020 15:05:02 -0400 Subject: [PATCH 55/86] refactor TestClusterCore --- vault/testing.go | 85 ++++++++++++++++++++++++------------------------ 1 file changed, 42 insertions(+), 43 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index 7a960997b1ec..d8631e5e6448 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1487,17 +1487,10 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te testCluster.setupClusterListener(t, i, cores[i], coreConfigs[i], opts, listeners[i], handlers[i]) } - // Initialize cores - if opts == nil || !opts.SkipInit { - testCluster.initCores(t, opts, cores, handlers, addAuditBackend) - } - // Create TestClusterCores var ret []*TestClusterCore for i := 0; i < numCores; i++ { - client := testCluster.getAPIClient(t, opts, listeners[i][0].Address.Port, tlsConfigs[i]) - tcc := &TestClusterCore{ Core: cores[i], CoreConfig: coreConfigs[i], @@ -1511,7 +1504,6 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te Handler: handlers[i], Server: servers[i], TLSConfig: tlsConfigs[i], - Client: client, Barrier: cores[i].barrier, NodeID: fmt.Sprintf("core-%d", i), UnderlyingRawStorage: coreConfigs[i].Physical, @@ -1526,9 +1518,20 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te ret = append(ret, tcc) } - testCluster.Cores = ret + // Initialize cores + if opts == nil || !opts.SkipInit { + testCluster.initCores(t, opts, addAuditBackend) + } + + // Assign clients + for i := 0; i < numCores; i++ { + testCluster.Cores[i].Client = + testCluster.getAPIClient(t, opts, listeners[i][0].Address.Port, tlsConfigs[i]) + } + + // Extra Setup for _, tcc := range testCluster.Cores { testExtraTestCoreSetup(t, priKey, tcc) } @@ -1909,51 +1912,47 @@ func (testCluster *TestCluster) setupClusterListener( core.SetClusterHandler(handler) } -func (testCluster *TestCluster) initCores( - t testing.T, - opts *TestClusterOptions, - cores []*Core, - handlers []http.Handler, - addAuditBackend bool, -) { +func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions, addAuditBackend bool) { + + leader := tc.Cores[0] - bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, cores[0], handlers[0]) + bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, leader.Core, leader.Handler) barrierKeys, _ := copystructure.Copy(bKeys) - testCluster.BarrierKeys = barrierKeys.([][]byte) + tc.BarrierKeys = barrierKeys.([][]byte) recoveryKeys, _ := copystructure.Copy(rKeys) - testCluster.RecoveryKeys = recoveryKeys.([][]byte) - testCluster.RootToken = root + tc.RecoveryKeys = recoveryKeys.([][]byte) + tc.RootToken = root // Write root token and barrier keys - err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(root), 0755) + err := ioutil.WriteFile(filepath.Join(tc.TempDir, "root_token"), []byte(root), 0755) if err != nil { t.Fatal(err) } var buf bytes.Buffer - for i, key := range testCluster.BarrierKeys { + for i, key := range tc.BarrierKeys { buf.Write([]byte(base64.StdEncoding.EncodeToString(key))) - if i < len(testCluster.BarrierKeys)-1 { + if i < len(tc.BarrierKeys)-1 { buf.WriteRune('\n') } } - err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "barrier_keys"), buf.Bytes(), 0755) + err = ioutil.WriteFile(filepath.Join(tc.TempDir, "barrier_keys"), buf.Bytes(), 0755) if err != nil { t.Fatal(err) } - for i, key := range testCluster.RecoveryKeys { + for i, key := range tc.RecoveryKeys { buf.Write([]byte(base64.StdEncoding.EncodeToString(key))) - if i < len(testCluster.RecoveryKeys)-1 { + if i < len(tc.RecoveryKeys)-1 { buf.WriteRune('\n') } } - err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "recovery_keys"), buf.Bytes(), 0755) + err = ioutil.WriteFile(filepath.Join(tc.TempDir, "recovery_keys"), buf.Bytes(), 0755) if err != nil { t.Fatal(err) } // Unseal first core for _, key := range bKeys { - if _, err := cores[0].Unseal(TestKeyCopy(key)); err != nil { + if _, err := leader.Core.Unseal(TestKeyCopy(key)); err != nil { t.Fatalf("unseal err: %s", err) } } @@ -1962,22 +1961,22 @@ func (testCluster *TestCluster) initCores( // If stored keys is supported, the above will no no-op, so trigger auto-unseal // using stored keys to try to unseal - if err := cores[0].UnsealWithStoredKeys(ctx); err != nil { + if err := leader.Core.UnsealWithStoredKeys(ctx); err != nil { t.Fatal(err) } // Verify unsealed - if cores[0].Sealed() { + if leader.Core.Sealed() { t.Fatal("should not be sealed") } - TestWaitActive(t, cores[0]) + TestWaitActive(t, leader.Core) // Existing tests rely on this; we can make a toggle to disable it // later if we want kvReq := &logical.Request{ Operation: logical.UpdateOperation, - ClientToken: testCluster.RootToken, + ClientToken: tc.RootToken, Path: "sys/mounts/secret", Data: map[string]interface{}{ "type": "kv", @@ -1988,7 +1987,7 @@ func (testCluster *TestCluster) initCores( }, }, } - resp, err := cores[0].HandleRequest(namespace.RootContext(ctx), kvReq) + resp, err := leader.Core.HandleRequest(namespace.RootContext(ctx), kvReq) if err != nil { t.Fatal(err) } @@ -1996,25 +1995,25 @@ func (testCluster *TestCluster) initCores( t.Fatal(err) } - cfg, err := cores[0].seal.BarrierConfig(ctx) + cfg, err := leader.Core.seal.BarrierConfig(ctx) if err != nil { t.Fatal(err) } // Unseal other cores unless otherwise specified - numCores := len(cores) + numCores := len(tc.Cores) if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 { for i := 1; i < numCores; i++ { - cores[i].seal.SetCachedBarrierConfig(cfg) + tc.Cores[i].Core.seal.SetCachedBarrierConfig(cfg) for _, key := range bKeys { - if _, err := cores[i].Unseal(TestKeyCopy(key)); err != nil { + if _, err := tc.Cores[i].Core.Unseal(TestKeyCopy(key)); err != nil { t.Fatalf("unseal err: %s", err) } } // If stored keys is supported, the above will no no-op, so trigger auto-unseal // using stored keys - if err := cores[i].UnsealWithStoredKeys(ctx); err != nil { + if err := tc.Cores[i].Core.UnsealWithStoredKeys(ctx); err != nil { t.Fatal(err) } } @@ -2025,7 +2024,7 @@ func (testCluster *TestCluster) initCores( // Ensure cluster connection info is populated. // Other cores should not come up as leaders. for i := 1; i < numCores; i++ { - isLeader, _, _, err := cores[i].Leader() + isLeader, _, _, err := tc.Cores[i].Core.Leader() if err != nil { t.Fatal(err) } @@ -2038,23 +2037,23 @@ func (testCluster *TestCluster) initCores( // // Set test cluster core(s) and test cluster // - cluster, err := cores[0].Cluster(context.Background()) + cluster, err := leader.Core.Cluster(context.Background()) if err != nil { t.Fatal(err) } - testCluster.ID = cluster.ID + tc.ID = cluster.ID if addAuditBackend { // Enable auditing. auditReq := &logical.Request{ Operation: logical.UpdateOperation, - ClientToken: testCluster.RootToken, + ClientToken: tc.RootToken, Path: "sys/audit/noop", Data: map[string]interface{}{ "type": "noop", }, } - resp, err = cores[0].HandleRequest(namespace.RootContext(ctx), auditReq) + resp, err = leader.Core.HandleRequest(namespace.RootContext(ctx), auditReq) if err != nil { t.Fatal(err) } From 304e93270538c2c7d77e62c4981f3183948056ed Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Mon, 1 Jun 2020 15:07:04 -0400 Subject: [PATCH 56/86] clean up TestClusterCore --- vault/testing.go | 147 +---------------------------------------------- 1 file changed, 1 insertion(+), 146 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index d8631e5e6448..fe4ce24597bc 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -741,8 +741,6 @@ func (c *TestCluster) Start() { go core.Server.Serve(ln) } } - - //core.isRunning = true } if c.SetupFunc != nil { c.SetupFunc() @@ -875,7 +873,6 @@ func (c *TestClusterCore) stop() error { } c.Logger().Info("vault test core stopped") - //c.isRunning = false return nil } @@ -973,8 +970,6 @@ type TestClusterCore struct { UnderlyingRawStorage physical.Backend Barrier SecurityBarrier NodeID string - - //isRunning bool } type PhysicalBackendBundle struct { @@ -1572,10 +1567,6 @@ func (cluster *TestCluster) StopCore(t testing.T, idx int) { tcc := cluster.Cores[idx] tcc.Logger().Info("stopping core", "core", idx) - //if !tcc.isRunning { - // t.Fatalf("core is already stopped") - //} - // Stop listeners and call Shutdown() if err := tcc.stop(); err != nil { t.Fatal(err) @@ -1596,10 +1587,6 @@ func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterO tcc := cluster.Cores[idx] tcc.Logger().Info("restarting core", "core", idx) - //if tcc.isRunning { - // t.Fatalf("cannot restart a running core") - //} - // Set up listeners ln, err := net.ListenTCP("tcp", tcc.Address) if err != nil { @@ -1645,139 +1632,7 @@ func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterO } tcc.Logger().Info("restarted test core", "core", idx) - //tcc.isRunning = true -} - -//type TestCluster struct { -// BarrierKeys [][]byte -// RecoveryKeys [][]byte -// CACert *x509.Certificate -// CACertBytes []byte -// CACertPEM []byte -// CACertPEMFile string -// CAKey *ecdsa.PrivateKey -// CAKeyPEM []byte -// Cores []*TestClusterCore -// ID string -// RootToken string -// RootCAs *x509.CertPool -// TempDir string -// ClientAuthRequired bool -// Logger log.Logger -// CleanupFunc func() -// SetupFunc func() -// -// cleanupFuncs []func() -// base *CoreConfig -// priKey interface{} -//} - -//type TestClusterCore struct { -// *Core -// CoreConfig *CoreConfig -// Client *api.Client -// Handler http.Handler -// Listeners []*TestListener -// ReloadFuncs *map[string][]reloadutil.ReloadFunc -// ReloadFuncsLock *sync.RWMutex -// Server *http.Server -// ServerCert *x509.Certificate -// ServerCertBytes []byte -// ServerCertPEM []byte -// ServerKey *ecdsa.PrivateKey -// ServerKeyPEM []byte -// TLSConfig *tls.Config -// UnderlyingStorage physical.Backend -// UnderlyingRawStorage physical.Backend -// Barrier SecurityBarrier -// NodeID string -// -// isRunning bool -//} - -// // Create cores -// cleanupFuncs := []func(){} -// cores := []*Core{} -// coreConfigs := []*CoreConfig{} -// -// for i := 0; i < numCores; i++ { -// cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], pubKey) -// -// cleanupFuncs = append(cleanupFuncs, cleanup) -// cores = append(cores, c) -// coreConfigs = append(coreConfigs, &localConfig) -// -// if handler != nil { -// handlers[i] = handler -// servers[i].Handler = handlers[i] -// } -// } -// -// // Clustering setup -// for i := 0; i < numCores; i++ { -// testCluster.setupClusterListener(t, i, cores[i], coreConfigs[i], opts, listeners[i], handlers[i]) -// } -// -// // Initialize cores -// if opts == nil || !opts.SkipInit { -// testCluster.initCores(t, opts, cores, handlers, addAuditBackend) -// } -// -// // Create TestClusterCores -// var ret []*TestClusterCore -// for i := 0; i < numCores; i++ { -// -// client := testCluster.getAPIClient(t, opts, listeners[i][0].Address.Port, tlsConfigs[i]) -// -// tcc := &TestClusterCore{ -// Core: cores[i], -// CoreConfig: coreConfigs[i], -// ServerKey: certInfoSlice[i].key, -// ServerKeyPEM: certInfoSlice[i].keyPEM, -// ServerCert: certInfoSlice[i].cert, -// ServerCertBytes: certInfoSlice[i].certBytes, -// ServerCertPEM: certInfoSlice[i].certPEM, -// Listeners: listeners[i], -// Handler: handlers[i], -// Server: servers[i], -// TLSConfig: tlsConfigs[i], -// Client: client, -// Barrier: cores[i].barrier, -// NodeID: fmt.Sprintf("core-%d", i), -// UnderlyingRawStorage: coreConfigs[i].Physical, -// } -// tcc.ReloadFuncs = &cores[i].reloadFuncs -// tcc.ReloadFuncsLock = &cores[i].reloadFuncsLock -// tcc.ReloadFuncsLock.Lock() -// (*tcc.ReloadFuncs)["listener|tcp"] = []reloadutil.ReloadFunc{certGetters[i].Reload} -// tcc.ReloadFuncsLock.Unlock() -// -// testAdjustTestCore(base, tcc) -// -// ret = append(ret, tcc) -// } -// -// testCluster.Cores = ret -// -// for _, tcc := range testCluster.Cores { -// testExtraTestCoreSetup(t, priKey, tcc) -// } -// -// // Cleanup -// testCluster.CleanupFunc = func() { -// for _, c := range cleanupFuncs { -// if c != nil { -// c() -// } -// } -// if l, ok := testCluster.Logger.(*TestLogger); ok { -// if t.Failed() { -// _ = l.File.Close() -// } else { -// _ = os.Remove(l.Path) -// } -// } -// } +} func (testCluster *TestCluster) newCore( t testing.T, idx int, coreConfig *CoreConfig, From fa9a707f97bd039e1914654565da87cdbe0bfd31 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Mon, 1 Jun 2020 15:26:06 -0400 Subject: [PATCH 57/86] stub out some temporary tests --- .../sealmigration/seal_migration_test.go | 63 +++++-------------- vault/testing.go | 33 +++++++--- 2 files changed, 37 insertions(+), 59 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 5aaf0e0c98df..ddf2b3d4d254 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -592,52 +592,17 @@ func runTransit( cluster.EnsureCoresSealed(t) } -//func TestFoo(t *testing.T) { -// testVariousBackends(t, testFoo, true) -//} -// -//func testFoo( -// t *testing.T, logger hclog.Logger, -// storage teststorage.ReusableStorage, basePort int) { -// -// var baseClusterPort = basePort + 10 -// -// // Start the cluster -// var conf = vault.CoreConfig{ -// Logger: logger.Named("foo"), -// } -// var opts = vault.TestClusterOptions{ -// HandlerFunc: vaulthttp.Handler, -// NumCores: numTestCores, -// BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), -// BaseClusterListenPort: baseClusterPort, -// } -// storage.Setup(&conf, &opts) -// cluster := vault.NewTestCluster(t, &conf, &opts) -// cluster.Start() -// defer func() { -// storage.Cleanup(t, cluster) -// cluster.Cleanup() -// }() -// -// leader := cluster.Cores[0] -// client := leader.Client -// -// // Unseal -// if storage.IsRaft { -// testhelpers.RaftClusterJoinNodes(t, cluster) -// if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { -// t.Fatal(err) -// } -// } else { -// cluster.UnsealCores(t) -// } -// testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) -// -// -// // Seal the cluster -// cluster.EnsureCoresSealed(t) -// -// println("--------------------------------------------------") -// println("exit") -//} +func TestShamir(t *testing.T) { + testVariousBackends(t, testShamir, true) +} + +func testShamir( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + // Initialize the backend using shamir + cluster, _, cleanup := initializeShamir(t, logger, storage, basePort) + //rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys + cluster.EnsureCoresSealed(t) + cleanup() +} diff --git a/vault/testing.go b/vault/testing.go index fe4ce24597bc..b18545591d7f 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -26,23 +26,20 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/vault/internalshared/configutil" - "github.com/armon/go-metrics" - hclog "github.com/hashicorp/go-hclog" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/metricsutil" - "github.com/hashicorp/vault/vault/cluster" - "github.com/hashicorp/vault/vault/seal" "github.com/mitchellh/copystructure" - + testing "github.com/mitchellh/go-testing-interface" "golang.org/x/crypto/ed25519" "golang.org/x/net/http2" cleanhttp "github.com/hashicorp/go-cleanhttp" + hclog "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" + raftlib "github.com/hashicorp/raft" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/reloadutil" @@ -54,9 +51,9 @@ import ( "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" - testing "github.com/mitchellh/go-testing-interface" - physInmem "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault/cluster" + "github.com/hashicorp/vault/vault/seal" ) // This file contains a number of methods that are useful for unit @@ -1030,6 +1027,22 @@ type TestClusterOptions struct { // ClusterLayers are used to override the default cluster connection layer ClusterLayers cluster.NetworkLayerSet + + // RaftAddressProvider is used to set the raft ServerAddressProvider on + // each core. + // + // If SkipInit is true, then RaftAddressProvider has no effect. + // RaftAddressProvider should only be specified if the underlying physical + // storage is Raft. + RaftAddressProvider raftlib.ServerAddressProvider + + // JoinRaftFollowers specifies that each follower core will be joined to + // the raft cluster just before it is unsealed in InitializeCores(). + // + // If SkipInit is true, then JoinRaftFollowers has no effect. + // JoinRaftFollowers should only be specified if the underlying physical + // storage is Raft. + JoinRaftFollowers bool } var DefaultNumCores = 3 From 106bc894bfe1337691ab3877a3abcad972cffafd Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Mon, 1 Jun 2020 16:53:25 -0400 Subject: [PATCH 58/86] use HardcodedServerAddressProvider in seal migration tests --- helper/testhelpers/testhelpers.go | 23 ++---- .../sealmigration/seal_migration_test.go | 82 ++++++++++++------- 2 files changed, 63 insertions(+), 42 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index e00cdbc583e1..8300bec3954d 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -412,16 +412,16 @@ func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib } func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { - raftClusterJoinNodes(t, cluster, false) + JoinRaftNodes(t, cluster, false, + &TestRaftServerAddressProvider{Cluster: cluster}) } -func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) { - raftClusterJoinNodes(t, cluster, true) -} - -func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) { +func JoinRaftNodes( + t testing.T, cluster *vault.TestCluster, + useStoredKeys bool, + addressProvider raftlib.ServerAddressProvider, +) { - addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) leader := cluster.Cores[0] @@ -505,11 +505,11 @@ func (p *HardcodedServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftli // NewHardcodedServerAddressProvider is a convenience function that makes a // ServerAddressProvider from a given cluster address base port. -func NewHardcodedServerAddressProvider(cluster *vault.TestCluster, baseClusterPort int) raftlib.ServerAddressProvider { +func NewHardcodedServerAddressProvider(numCores, baseClusterPort int) raftlib.ServerAddressProvider { entries := make(map[raftlib.ServerID]raftlib.ServerAddress) - for i := 0; i < len(cluster.Cores); i++ { + for i := 0; i < numCores; i++ { id := fmt.Sprintf("core-%d", i) addr := fmt.Sprintf("127.0.0.1:%d", baseClusterPort+i) entries[raftlib.ServerID(id)] = raftlib.ServerAddress(addr) @@ -531,11 +531,6 @@ func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider r } } -// SetRaftAddressProvider sets a ServerAddressProvider on a node. -func SetRaftAddressProvider(t testing.T, core *vault.TestClusterCore, provider raftlib.ServerAddressProvider) { - core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(provider) -} - // VerifyRaftConfiguration checks that we have a valid raft configuration, i.e. // the correct number of servers, having the correct NodeIDs, and exactly one // leader. diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index ddf2b3d4d254..eb29d9e4c9ee 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -42,25 +42,25 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { tf(t, logger, storage, 51000) }) - //t.Run("file", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("file") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeFileBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 52000) - //}) - - //t.Run("consul", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("consul") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeConsulBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 53000) - //}) + t.Run("file", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("file") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeFileBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 52000) + }) + + t.Run("consul", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) + }) //if includeRaft { // t.Run("raft", func(t *testing.T) { @@ -225,8 +225,7 @@ func migrateFromShamirToTransit_Post14( return transitSeal } - var baseClusterPort = basePort + 10 - provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort) + //var baseClusterPort = basePort + 10 // Restart each follower with the new config, and migrate to Transit. // Note that the barrier keys are being used as recovery keys. @@ -235,7 +234,8 @@ func migrateFromShamirToTransit_Post14( cluster.StopCore(t, i) if storage.IsRaft { teststorage.CloseRaftStorage(t, cluster.Cores[i]) - testhelpers.SetRaftAddressProvider(t, cluster.Cores[i], provider) + panic("TODO") + //testhelpers.SetRaftAddressProvider(t, cluster.Cores[i], provider) } cluster.RestartCore(t, i, opts) @@ -249,7 +249,8 @@ func migrateFromShamirToTransit_Post14( cluster.StopCore(t, 0) if storage.IsRaft { teststorage.CloseRaftStorage(t, cluster.Cores[0]) - testhelpers.SetRaftAddressProvider(t, cluster.Cores[0], provider) + panic("TODO") + //testhelpers.SetRaftAddressProvider(t, cluster.Cores[0], provider) } // Wait for the followers to establish a new leader @@ -380,7 +381,9 @@ func initializeShamir( // Unseal if storage.IsRaft { - testhelpers.RaftClusterJoinNodes(t, cluster) + testhelpers.JoinRaftNodes(t, cluster, false, + testhelpers.NewHardcodedServerAddressProvider(numTestCores, baseClusterPort)) + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } @@ -434,7 +437,7 @@ func runShamir( // Unseal cluster.BarrierKeys = barrierKeys if storage.IsRaft { - provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort) + provider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, baseClusterPort) testhelpers.SetRaftAddressProviders(t, cluster, provider) for _, core := range cluster.Cores { @@ -503,7 +506,9 @@ func initializeTransit( // Join raft if storage.IsRaft { - testhelpers.RaftClusterJoinNodesWithStoredKeys(t, cluster) + testhelpers.JoinRaftNodes(t, cluster, true, + testhelpers.NewHardcodedServerAddressProvider(numTestCores, baseClusterPort)) + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } @@ -558,7 +563,7 @@ func runTransit( // Unseal. Even though we are using autounseal, we have to unseal // explicitly because we are using SkipInit. if storage.IsRaft { - provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort) + provider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, baseClusterPort) testhelpers.SetRaftAddressProviders(t, cluster, provider) for _, core := range cluster.Cores { @@ -592,6 +597,8 @@ func runTransit( cluster.EnsureCoresSealed(t) } +//-------------------------------------------------------------- + func TestShamir(t *testing.T) { testVariousBackends(t, testShamir, true) } @@ -602,7 +609,26 @@ func testShamir( // Initialize the backend using shamir cluster, _, cleanup := initializeShamir(t, logger, storage, basePort) - //rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys + rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys cluster.EnsureCoresSealed(t) cleanup() + + runShamir(t, logger, storage, basePort, rootToken, barrierKeys) +} + +func TestTransit(t *testing.T) { + testVariousBackends(t, testTransit, true) +} + +func testTransit( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + tss := sealhelper.NewTransitSealServer(t) + defer tss.Cleanup() + tss.MakeKey(t, "transit-seal-key") + + rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + + runTransit(t, logger, storage, basePort, rootToken, transitSeal) } From 1ea087b6dce5ea1bc7c9fe695ac422331cb4bcf9 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Mon, 1 Jun 2020 17:00:42 -0400 Subject: [PATCH 59/86] work on raft for TestSealMigration_ShamirToTransit_Post14 --- .../sealmigration/seal_migration_test.go | 88 +++++++++---------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index eb29d9e4c9ee..56dacc64a042 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -32,46 +32,46 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - t.Run("inmem", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("inmem") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeInmemBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 51000) - }) - - t.Run("file", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("file") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeFileBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 52000) - }) - - t.Run("consul", func(t *testing.T) { - t.Parallel() - - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 53000) - }) - - //if includeRaft { - // t.Run("raft", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("raft") - // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) - // defer cleanup() - // tf(t, logger, storage, 54000) - // }) - //} + //t.Run("inmem", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("inmem") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeInmemBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 51000) + //}) + + //t.Run("file", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("file") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeFileBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 52000) + //}) + + //t.Run("consul", func(t *testing.T) { + // t.Parallel() + + // logger := logger.Named("consul") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeConsulBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 53000) + //}) + + if includeRaft { + t.Run("raft", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("raft") + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + defer cleanup() + tf(t, logger, storage, 54000) + }) + } } // TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal @@ -234,8 +234,6 @@ func migrateFromShamirToTransit_Post14( cluster.StopCore(t, i) if storage.IsRaft { teststorage.CloseRaftStorage(t, cluster.Cores[i]) - panic("TODO") - //testhelpers.SetRaftAddressProvider(t, cluster.Cores[i], provider) } cluster.RestartCore(t, i, opts) @@ -249,8 +247,6 @@ func migrateFromShamirToTransit_Post14( cluster.StopCore(t, 0) if storage.IsRaft { teststorage.CloseRaftStorage(t, cluster.Cores[0]) - panic("TODO") - //testhelpers.SetRaftAddressProvider(t, cluster.Cores[0], provider) } // Wait for the followers to establish a new leader @@ -258,6 +254,7 @@ func migrateFromShamirToTransit_Post14( if err != nil { t.Fatal(err) } + leader := cluster.Cores[leaderIdx] client := leader.Client client.SetToken(rootToken) @@ -271,6 +268,9 @@ func migrateFromShamirToTransit_Post14( // TODO -- make sure its not migrating cluster.RestartCore(t, 0, opts) + // TODO + // if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + // Read the secret secret, err := client.Logical().Read("secret/foo") if err != nil { From 4fc1f5259f27bac316a717a9913a51a5835dadc7 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 2 Jun 2020 12:54:24 -0400 Subject: [PATCH 60/86] always use hardcoded raft address provider in seal migration tests --- helper/testhelpers/testhelpers.go | 57 +++++++++++-------- .../teststorage/teststorage_reusable.go | 16 +++++- .../sealmigration/seal_migration_test.go | 27 ++++----- 3 files changed, 56 insertions(+), 44 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 8300bec3954d..6d862de21310 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -412,15 +412,8 @@ func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib } func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { - JoinRaftNodes(t, cluster, false, - &TestRaftServerAddressProvider{Cluster: cluster}) -} -func JoinRaftNodes( - t testing.T, cluster *vault.TestCluster, - useStoredKeys bool, - addressProvider raftlib.ServerAddressProvider, -) { + addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) @@ -430,11 +423,7 @@ func JoinRaftNodes( { EnsureCoreSealed(t, leader) leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - if useStoredKeys { - cluster.UnsealCoreWithStoredKeys(t, leader) - } else { - cluster.UnsealCore(t, leader) - } + cluster.UnsealCore(t, leader) vault.TestWaitActive(t, leader.Core) } @@ -454,6 +443,37 @@ func JoinRaftNodes( t.Fatal(err) } + cluster.UnsealCore(t, core) + } + + WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) +} + +// JoinRaftFollowers unseals the leader, and then joins-and-unseals the +// followers one at a time. We assume the ServerAddressProvider has already +// been installed on all the nodes. +func JoinRaftFollowers(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) { + + leader := cluster.Cores[0] + + cluster.UnsealCore(t, leader) + vault.TestWaitActive(t, leader.Core) + + leaderInfos := []*raft.LeaderJoinInfo{ + &raft.LeaderJoinInfo{ + LeaderAPIAddr: leader.Client.Address(), + TLSConfig: leader.TLSConfig, + }, + } + + // Join followers + for i := 1; i < len(cluster.Cores); i++ { + core := cluster.Cores[i] + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) + if err != nil { + t.Fatal(err) + } + if useStoredKeys { // For autounseal, the raft backend is not initialized right away // after the join. We need to wait briefly before we can unseal. @@ -520,17 +540,6 @@ func NewHardcodedServerAddressProvider(numCores, baseClusterPort int) raftlib.Se } } -// SetRaftAddressProviders sets a ServerAddressProvider for all the nodes in a -// cluster. -func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider raftlib.ServerAddressProvider) { - - atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) - - for _, core := range cluster.Cores { - core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(provider) - } -} - // VerifyRaftConfiguration checks that we have a valid raft configuration, i.e. // the correct number of servers, having the correct NodeIDs, and exactly one // leader. diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index 02cf117f2009..f5b3762355eb 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -8,6 +8,7 @@ import ( "github.com/mitchellh/go-testing-interface" hclog "github.com/hashicorp/go-hclog" + raftlib "github.com/hashicorp/raft" "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/vault" ) @@ -73,7 +74,10 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica // MakeReusableRaftStorage makes a physical raft backend that can be re-used // across multiple test clusters in sequence. -func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (ReusableStorage, StorageCleanup) { +func MakeReusableRaftStorage( + t testing.T, logger hclog.Logger, numCores int, + addressProvider raftlib.ServerAddressProvider, +) (ReusableStorage, StorageCleanup) { raftDirs := make([]string, numCores) for i := 0; i < numCores; i++ { @@ -87,7 +91,7 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (Re conf.DisablePerformanceStandby = true opts.KeepStandbysSealed = true opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger) *vault.PhysicalBackendBundle { - return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx]) + return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], addressProvider) } }, @@ -125,7 +129,10 @@ func makeRaftDir(t testing.T) string { return raftDir } -func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string) *vault.PhysicalBackendBundle { +func makeReusableRaftBackend( + t testing.T, coreIdx int, logger hclog.Logger, raftDir string, + addressProvider raftlib.ServerAddressProvider, +) *vault.PhysicalBackendBundle { nodeID := fmt.Sprintf("core-%d", coreIdx) conf := map[string]string{ @@ -139,6 +146,9 @@ func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raft t.Fatal(err) } + fmt.Printf("makeReusableRaftBackend %T %d\n", addressProvider, coreIdx) + backend.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + return &vault.PhysicalBackendBundle{ Backend: backend, } diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 56dacc64a042..6f37e65bedef 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "fmt" + "sync/atomic" "testing" "time" @@ -67,7 +68,11 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { t.Parallel() logger := logger.Named("raft") - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) + + atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) + addressProvider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, 54010) + + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores, addressProvider) defer cleanup() tf(t, logger, storage, 54000) }) @@ -225,8 +230,6 @@ func migrateFromShamirToTransit_Post14( return transitSeal } - //var baseClusterPort = basePort + 10 - // Restart each follower with the new config, and migrate to Transit. // Note that the barrier keys are being used as recovery keys. rootToken, recoveryKeys := cluster.RootToken, cluster.BarrierKeys @@ -381,9 +384,7 @@ func initializeShamir( // Unseal if storage.IsRaft { - testhelpers.JoinRaftNodes(t, cluster, false, - testhelpers.NewHardcodedServerAddressProvider(numTestCores, baseClusterPort)) - + testhelpers.JoinRaftFollowers(t, cluster, false) if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } @@ -437,13 +438,10 @@ func runShamir( // Unseal cluster.BarrierKeys = barrierKeys if storage.IsRaft { - provider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, baseClusterPort) - testhelpers.SetRaftAddressProviders(t, cluster, provider) - - for _, core := range cluster.Cores { + for i, core := range cluster.Cores { + fmt.Printf(">>> unsealing %d\n", i) cluster.UnsealCore(t, core) } - // This is apparently necessary for the raft cluster to get itself // situated. time.Sleep(15 * time.Second) @@ -506,8 +504,7 @@ func initializeTransit( // Join raft if storage.IsRaft { - testhelpers.JoinRaftNodes(t, cluster, true, - testhelpers.NewHardcodedServerAddressProvider(numTestCores, baseClusterPort)) + testhelpers.JoinRaftFollowers(t, cluster, true) if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) @@ -563,13 +560,9 @@ func runTransit( // Unseal. Even though we are using autounseal, we have to unseal // explicitly because we are using SkipInit. if storage.IsRaft { - provider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, baseClusterPort) - testhelpers.SetRaftAddressProviders(t, cluster, provider) - for _, core := range cluster.Cores { cluster.UnsealCoreWithStoredKeys(t, core) } - // This is apparently necessary for the raft cluster to get itself // situated. time.Sleep(15 * time.Second) From 22db307178d48d15c4cab23cee80a39b6dbcfc96 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 2 Jun 2020 14:23:47 -0400 Subject: [PATCH 61/86] debug TestSealMigration_ShamirToTransit_Post14 --- helper/testhelpers/testhelpers.go | 4 +- .../teststorage/teststorage_reusable.go | 9 ++-- physical/raft/fsm.go | 1 + physical/raft/raft.go | 2 + .../sealmigration/seal_migration_test.go | 44 ++++++++++--------- 5 files changed, 33 insertions(+), 27 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 6d862de21310..3647cefea160 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -450,8 +450,8 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { } // JoinRaftFollowers unseals the leader, and then joins-and-unseals the -// followers one at a time. We assume the ServerAddressProvider has already -// been installed on all the nodes. +// followers one at a time. We assume that the ServerAddressProvider has +// already been installed on all the nodes. func JoinRaftFollowers(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) { leader := cluster.Cores[0] diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index f5b3762355eb..42cd0d805f40 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -97,8 +97,8 @@ func MakeReusableRaftStorage( // Close open files being used by raft. Cleanup: func(t testing.T, cluster *vault.TestCluster) { - for _, core := range cluster.Cores { - CloseRaftStorage(t, core) + for i := 0; i < len(cluster.Cores); i++ { + CloseRaftStorage(t, cluster, i) } }, } @@ -113,8 +113,9 @@ func MakeReusableRaftStorage( } // CloseRaftStorage closes open files being used by raft. -func CloseRaftStorage(t testing.T, core *vault.TestClusterCore) { - raftStorage := core.UnderlyingRawStorage.(*raft.RaftBackend) +func CloseRaftStorage(t testing.T, cluster *vault.TestCluster, idx int) { + fmt.Printf("CloseRaftStorage %d\n", idx) + raftStorage := cluster.Cores[idx].UnderlyingRawStorage.(*raft.RaftBackend) if err := raftStorage.Close(); err != nil { t.Fatal(err) } diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go index 77c9be3d0294..8f70961cdf24 100644 --- a/physical/raft/fsm.go +++ b/physical/raft/fsm.go @@ -101,6 +101,7 @@ func NewFSM(conf map[string]string, logger log.Logger) (*FSM, error) { } dbPath := filepath.Join(path, "vault.db") + fmt.Printf("NewFSM: %s\n", dbPath) boltDB, err := bolt.Open(dbPath, 0666, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 68c1e914d905..4ef91c0472d4 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -342,6 +342,8 @@ func (b *RaftBackend) Close() error { b.l.Lock() defer b.l.Unlock() + fmt.Printf("(b *RaftBackend) Close() aaa %s\n", b.fsm.path) + if err := b.fsm.db.Close(); err != nil { return err } diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 6f37e65bedef..6bfeca8fe820 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -93,10 +93,11 @@ func testSealMigrationShamirToTransit_Pre14( storage teststorage.ReusableStorage, basePort int) { // Initialize the backend using shamir - cluster, _, cleanup := initializeShamir(t, logger, storage, basePort) + cluster, _ := initializeShamir(t, logger, storage, basePort) rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys cluster.EnsureCoresSealed(t) - cleanup() + storage.Cleanup(t, cluster) + cluster.Cleanup() // Create the transit server. tss := sealhelper.NewTransitSealServer(t) @@ -193,8 +194,7 @@ func testSealMigrationShamirToTransit_Post14( storage teststorage.ReusableStorage, basePort int) { // Initialize the backend using shamir - cluster, opts, cleanup := initializeShamir(t, logger, storage, basePort) - //rootToken := cluster.RootToken + cluster, opts := initializeShamir(t, logger, storage, basePort) // Create the transit server. tss := sealhelper.NewTransitSealServer(t) @@ -205,15 +205,19 @@ func testSealMigrationShamirToTransit_Post14( tss.MakeKey(t, "transit-seal-key") // Migrate the backend from shamir to transit. - //transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, basePort, tss, cluster, opts) - println("--------------------------------------------------------------------------------------------") - _ = migrateFromShamirToTransit_Post14(t, logger, storage, basePort, tss, cluster, opts) + transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, basePort, tss, cluster, opts) cluster.EnsureCoresSealed(t) - cleanup() - println("--------------------------------------------------------------------------------------------") - //// Run the backend with transit. - //runTransit(t, logger, storage, basePort, rootToken, transitSeal) + storage.Cleanup(t, cluster) + cluster.Cleanup() + println("--------------------------------------------------------------") + println(">>> sleep") + time.Sleep(15 * time.Second) + + // Run the backend with transit. + println("--------------------------------------------------------------") + println(">>> runTransit") + runTransit(t, logger, storage, basePort, cluster.RootToken, transitSeal) } func migrateFromShamirToTransit_Post14( @@ -236,7 +240,7 @@ func migrateFromShamirToTransit_Post14( for i := 1; i < numTestCores; i++ { cluster.StopCore(t, i) if storage.IsRaft { - teststorage.CloseRaftStorage(t, cluster.Cores[i]) + teststorage.CloseRaftStorage(t, cluster, i) } cluster.RestartCore(t, i, opts) @@ -249,7 +253,7 @@ func migrateFromShamirToTransit_Post14( // Bring down the leader cluster.StopCore(t, 0) if storage.IsRaft { - teststorage.CloseRaftStorage(t, cluster.Cores[0]) + teststorage.CloseRaftStorage(t, cluster, 0) } // Wait for the followers to establish a new leader @@ -357,7 +361,7 @@ func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, s // initializeShamir initializes a brand new backend storage with Shamir. func initializeShamir( t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) (*vault.TestCluster, *vault.TestClusterOptions, func()) { + storage teststorage.ReusableStorage, basePort int) (*vault.TestCluster, *vault.TestClusterOptions) { var baseClusterPort = basePort + 10 @@ -374,10 +378,6 @@ func initializeShamir( storage.Setup(&conf, &opts) cluster := vault.NewTestCluster(t, &conf, &opts) cluster.Start() - cleanup := func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - } leader := cluster.Cores[0] client := leader.Client @@ -401,7 +401,7 @@ func initializeShamir( t.Fatal(err) } - return cluster, &opts, cleanup + return cluster, &opts } // runShamir uses a pre-populated backend storage with Shamir. @@ -601,10 +601,12 @@ func testShamir( storage teststorage.ReusableStorage, basePort int) { // Initialize the backend using shamir - cluster, _, cleanup := initializeShamir(t, logger, storage, basePort) + cluster, _ := initializeShamir(t, logger, storage, basePort) rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys cluster.EnsureCoresSealed(t) - cleanup() + + storage.Cleanup(t, cluster) + cluster.Cleanup() runShamir(t, logger, storage, basePort, rootToken, barrierKeys) } From 1bb99442d2b1983fc03a453a9cb5efaf8f665dc5 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 4 Jun 2020 12:50:23 -0400 Subject: [PATCH 62/86] fix bug in RestartCore --- vault/testing.go | 1 + 1 file changed, 1 insertion(+) diff --git a/vault/testing.go b/vault/testing.go index b18545591d7f..8f69118007cf 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1628,6 +1628,7 @@ func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterO cluster.cleanupFuncs[idx] = cleanup tcc.Core = newCore tcc.CoreConfig = &localConfig + tcc.UnderlyingRawStorage = localConfig.Physical cluster.setupClusterListener( t, idx, newCore, tcc.CoreConfig, From f77e990026f8a3910f2672ba47bd5204ffba8499 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 4 Jun 2020 12:51:07 -0400 Subject: [PATCH 63/86] remove debug code --- vault/external_tests/sealmigration/seal_migration_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 6bfeca8fe820..02562559ca72 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -210,13 +210,8 @@ func testSealMigrationShamirToTransit_Post14( storage.Cleanup(t, cluster) cluster.Cleanup() - println("--------------------------------------------------------------") - println(">>> sleep") - time.Sleep(15 * time.Second) // Run the backend with transit. - println("--------------------------------------------------------------") - println(">>> runTransit") runTransit(t, logger, storage, basePort, cluster.RootToken, transitSeal) } From 4ba1036ea7163e9cc503822a3e2ad4aacd1eafe5 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 4 Jun 2020 13:05:24 -0400 Subject: [PATCH 64/86] TestSealMigration_ShamirToTransit_Post14 works now --- .../sealmigration/seal_migration_test.go | 44 +++++++++++++++---- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 02562559ca72..55d9941ebe93 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -239,9 +239,8 @@ func migrateFromShamirToTransit_Post14( } cluster.RestartCore(t, i, opts) - client := cluster.Cores[i].Client - client.SetToken(rootToken) - unsealMigrate(t, client, recoveryKeys, true) + cluster.Cores[i].Client.SetToken(rootToken) + unsealMigrate(t, cluster.Cores[i].Client, recoveryKeys, true) time.Sleep(5 * time.Second) } @@ -256,25 +255,28 @@ func migrateFromShamirToTransit_Post14( if err != nil { t.Fatal(err) } - + if leaderIdx == 0 { + t.Fatalf("Core 0 cannot be the leader right now") + } leader := cluster.Cores[leaderIdx] - client := leader.Client - client.SetToken(rootToken) + leader.Client.SetToken(rootToken) // Wait for migration to finish. Sadly there is no callback, and the // test will fail later on if we don't do this. // TODO -- actually there is a callback, we can monitor this and await - time.Sleep(10 * time.Second) + time.Sleep(60 * time.Second) // Bring core 0 back up - // TODO -- make sure its not migrating cluster.RestartCore(t, 0, opts) + cluster.Cores[0].Client.SetToken(rootToken) + unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + time.Sleep(5 * time.Second) // TODO // if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { // Read the secret - secret, err := client.Logical().Read("secret/foo") + secret, err := leader.Client.Logical().Read("secret/foo") if err != nil { t.Fatal(err) } @@ -336,6 +338,30 @@ func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServe } } +func unseal(t *testing.T, client *api.Client, keys [][]byte) { + + for i, key := range keys { + + resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ + Key: base64.StdEncoding.EncodeToString(key), + }) + if i < keyThreshold-1 { + // Not enough keys have been provided yet. + if err != nil { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Sealed { + t.Fatalf("expected unsealed state; got %#v", resp) + } + break + } + } +} + // verifyBarrierConfig verifies that a barrier configuration is correct. func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) { t.Helper() From d717e5335f7ae4df31ab155f19fda03bd23890be Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 4 Jun 2020 13:31:44 -0400 Subject: [PATCH 65/86] clean up debug code --- .../teststorage/teststorage_reusable.go | 2 - physical/raft/fsm.go | 1 - physical/raft/raft.go | 2 - .../sealmigration/seal_migration_test.go | 49 +++++++++---------- 4 files changed, 24 insertions(+), 30 deletions(-) diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index 42cd0d805f40..3546fb7e528b 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -114,7 +114,6 @@ func MakeReusableRaftStorage( // CloseRaftStorage closes open files being used by raft. func CloseRaftStorage(t testing.T, cluster *vault.TestCluster, idx int) { - fmt.Printf("CloseRaftStorage %d\n", idx) raftStorage := cluster.Cores[idx].UnderlyingRawStorage.(*raft.RaftBackend) if err := raftStorage.Close(); err != nil { t.Fatal(err) @@ -147,7 +146,6 @@ func makeReusableRaftBackend( t.Fatal(err) } - fmt.Printf("makeReusableRaftBackend %T %d\n", addressProvider, coreIdx) backend.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) return &vault.PhysicalBackendBundle{ diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go index 8f70961cdf24..77c9be3d0294 100644 --- a/physical/raft/fsm.go +++ b/physical/raft/fsm.go @@ -101,7 +101,6 @@ func NewFSM(conf map[string]string, logger log.Logger) (*FSM, error) { } dbPath := filepath.Join(path, "vault.db") - fmt.Printf("NewFSM: %s\n", dbPath) boltDB, err := bolt.Open(dbPath, 0666, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 4ef91c0472d4..68c1e914d905 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -342,8 +342,6 @@ func (b *RaftBackend) Close() error { b.l.Lock() defer b.l.Unlock() - fmt.Printf("(b *RaftBackend) Close() aaa %s\n", b.fsm.path) - if err := b.fsm.db.Close(); err != nil { return err } diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 55d9941ebe93..e92975bc020d 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -338,29 +338,29 @@ func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServe } } -func unseal(t *testing.T, client *api.Client, keys [][]byte) { - - for i, key := range keys { - - resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ - Key: base64.StdEncoding.EncodeToString(key), - }) - if i < keyThreshold-1 { - // Not enough keys have been provided yet. - if err != nil { - t.Fatal(err) - } - } else { - if err != nil { - t.Fatal(err) - } - if resp == nil || resp.Sealed { - t.Fatalf("expected unsealed state; got %#v", resp) - } - break - } - } -} +//func unseal(t *testing.T, client *api.Client, keys [][]byte) { +// +// for i, key := range keys { +// +// resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ +// Key: base64.StdEncoding.EncodeToString(key), +// }) +// if i < keyThreshold-1 { +// // Not enough keys have been provided yet. +// if err != nil { +// t.Fatal(err) +// } +// } else { +// if err != nil { +// t.Fatal(err) +// } +// if resp == nil || resp.Sealed { +// t.Fatalf("expected unsealed state; got %#v", resp) +// } +// break +// } +// } +//} // verifyBarrierConfig verifies that a barrier configuration is correct. func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) { @@ -459,8 +459,7 @@ func runShamir( // Unseal cluster.BarrierKeys = barrierKeys if storage.IsRaft { - for i, core := range cluster.Cores { - fmt.Printf(">>> unsealing %d\n", i) + for _, core := range cluster.Cores { cluster.UnsealCore(t, core) } // This is apparently necessary for the raft cluster to get itself From 3dcb5ede3a11d52c5cfc7e38b3f6ff3f32e7e5d4 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 4 Jun 2020 13:47:19 -0400 Subject: [PATCH 66/86] clean up tests --- .../sealmigration/seal_migration_test.go | 41 +++++++++++++++---- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index e92975bc020d..7c653d8beb4b 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -261,19 +261,23 @@ func migrateFromShamirToTransit_Post14( leader := cluster.Cores[leaderIdx] leader.Client.SetToken(rootToken) - // Wait for migration to finish. Sadly there is no callback, and the - // test will fail later on if we don't do this. - // TODO -- actually there is a callback, we can monitor this and await - time.Sleep(60 * time.Second) - // Bring core 0 back up cluster.RestartCore(t, 0, opts) cluster.Cores[0].Client.SetToken(rootToken) unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) time.Sleep(5 * time.Second) - // TODO - // if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + // Wait for migration to finish. + awaitMigration(t, leader.Client) + + //// TODO + //if storage.IsRaft { + // if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + // t.Fatal(err) + // } + //} else { + // cluster.UnsealCores(t) + //} // Read the secret secret, err := leader.Client.Logical().Read("secret/foo") @@ -338,6 +342,29 @@ func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServe } } +// awaitMigration waits for migration to finish. +func awaitMigration(t *testing.T, client *api.Client) { + + timeout := time.Now().Add(60 * time.Second) + for { + if time.Now().After(timeout) { + break + } + + resp, err := client.Sys().SealStatus() + if err != nil { + t.Fatal(err) + } + if !resp.Migration { + return + } + + time.Sleep(time.Second) + } + + t.Fatalf("migration did not complete.") +} + //func unseal(t *testing.T, client *api.Client, keys [][]byte) { // // for i, key := range keys { From 1f6c138a8642ff2ea526995e87b3fd775bbf3c32 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 4 Jun 2020 14:34:56 -0400 Subject: [PATCH 67/86] cleanup tests --- .../sealmigration/seal_migration_test.go | 213 +++++++++--------- 1 file changed, 107 insertions(+), 106 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 7c653d8beb4b..fcb80ea21c63 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -33,35 +33,35 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) - //t.Run("inmem", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("inmem") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeInmemBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 51000) - //}) - - //t.Run("file", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("file") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeFileBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 52000) - //}) - - //t.Run("consul", func(t *testing.T) { - // t.Parallel() - - // logger := logger.Named("consul") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeConsulBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 53000) - //}) + t.Run("inmem", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("inmem") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeInmemBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 51000) + }) + + t.Run("file", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("file") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeFileBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 52000) + }) + + t.Run("consul", func(t *testing.T) { + t.Parallel() + + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) + }) if includeRaft { t.Run("raft", func(t *testing.T) { @@ -149,19 +149,16 @@ func migrateFromShamirToTransit_Pre14( }() leader := cluster.Cores[0] - client := leader.Client - client.SetToken(rootToken) + leader.Client.SetToken(rootToken) // Unseal and migrate to Transit. - unsealMigrate(t, client, recoveryKeys, true) + unsealMigrate(t, leader.Client, recoveryKeys, true) - // Wait for migration to finish. Sadly there is no callback, and the - // test will fail later on if we don't do this. - // TODO -- actually there is a callback, we can monitor this and await - time.Sleep(10 * time.Second) + // Wait for migration to finish. + awaitMigration(t, leader.Client) // Read the secret - secret, err := client.Logical().Read("secret/foo") + secret, err := leader.Client.Logical().Read("secret/foo") if err != nil { t.Fatal(err) } @@ -264,20 +261,27 @@ func migrateFromShamirToTransit_Post14( // Bring core 0 back up cluster.RestartCore(t, 0, opts) cluster.Cores[0].Client.SetToken(rootToken) - unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + + // TODO is this a bug? Why is raft different here? + if storage.IsRaft { + unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + } else { + unseal(t, cluster.Cores[0].Client, recoveryKeys) + } + time.Sleep(5 * time.Second) // Wait for migration to finish. awaitMigration(t, leader.Client) - //// TODO - //if storage.IsRaft { - // if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { - // t.Fatal(err) - // } - //} else { - // cluster.UnsealCores(t) - //} + // This is apparently necessary for the raft cluster to get itself + // situated. + if storage.IsRaft { + time.Sleep(15 * time.Second) + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + t.Fatal(err) + } + } // Read the secret secret, err := leader.Client.Logical().Read("secret/foo") @@ -365,29 +369,29 @@ func awaitMigration(t *testing.T, client *api.Client) { t.Fatalf("migration did not complete.") } -//func unseal(t *testing.T, client *api.Client, keys [][]byte) { -// -// for i, key := range keys { -// -// resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ -// Key: base64.StdEncoding.EncodeToString(key), -// }) -// if i < keyThreshold-1 { -// // Not enough keys have been provided yet. -// if err != nil { -// t.Fatal(err) -// } -// } else { -// if err != nil { -// t.Fatal(err) -// } -// if resp == nil || resp.Sealed { -// t.Fatalf("expected unsealed state; got %#v", resp) -// } -// break -// } -// } -//} +func unseal(t *testing.T, client *api.Client, keys [][]byte) { + + for i, key := range keys { + + resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ + Key: base64.StdEncoding.EncodeToString(key), + }) + if i < keyThreshold-1 { + // Not enough keys have been provided yet. + if err != nil { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Sealed { + t.Fatalf("expected unsealed state; got %#v", resp) + } + break + } + } +} // verifyBarrierConfig verifies that a barrier configuration is correct. func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) { @@ -492,7 +496,6 @@ func runShamir( // This is apparently necessary for the raft cluster to get itself // situated. time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } @@ -613,7 +616,6 @@ func runTransit( // This is apparently necessary for the raft cluster to get itself // situated. time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { t.Fatal(err) } @@ -639,38 +641,37 @@ func runTransit( //-------------------------------------------------------------- -func TestShamir(t *testing.T) { - testVariousBackends(t, testShamir, true) -} - -func testShamir( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - // Initialize the backend using shamir - cluster, _ := initializeShamir(t, logger, storage, basePort) - rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys - cluster.EnsureCoresSealed(t) - - storage.Cleanup(t, cluster) - cluster.Cleanup() - - runShamir(t, logger, storage, basePort, rootToken, barrierKeys) -} - -func TestTransit(t *testing.T) { - testVariousBackends(t, testTransit, true) -} - -func testTransit( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - tss := sealhelper.NewTransitSealServer(t) - defer tss.Cleanup() - tss.MakeKey(t, "transit-seal-key") - - rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) - - runTransit(t, logger, storage, basePort, rootToken, transitSeal) -} +//func TestShamir(t *testing.T) { +// testVariousBackends(t, testShamir, true) +//} +// +//func testShamir( +// t *testing.T, logger hclog.Logger, +// storage teststorage.ReusableStorage, basePort int) { +// +// cluster, _ := initializeShamir(t, logger, storage, basePort) +// rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys +// cluster.EnsureCoresSealed(t) +// +// storage.Cleanup(t, cluster) +// cluster.Cleanup() +// +// runShamir(t, logger, storage, basePort, rootToken, barrierKeys) +//} +// +//func TestTransit(t *testing.T) { +// testVariousBackends(t, testTransit, true) +//} +// +//func testTransit( +// t *testing.T, logger hclog.Logger, +// storage teststorage.ReusableStorage, basePort int) { +// +// tss := sealhelper.NewTransitSealServer(t) +// defer tss.Cleanup() +// tss.MakeKey(t, "transit-seal-key") +// +// rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) +// +// runTransit(t, logger, storage, basePort, rootToken, transitSeal) +//} From 4239d8ae7a7d496509ae32029936484aa0e1cdfa Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 4 Jun 2020 14:44:03 -0400 Subject: [PATCH 68/86] refactor test code --- .../seal_migration_pre14_test.go | 6 +- .../sealmigration/seal_migration_test.go | 84 +++++++++---------- 2 files changed, 46 insertions(+), 44 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_pre14_test.go b/vault/external_tests/sealmigration/seal_migration_pre14_test.go index 6f72449da118..7b9e7f334756 100644 --- a/vault/external_tests/sealmigration/seal_migration_pre14_test.go +++ b/vault/external_tests/sealmigration/seal_migration_pre14_test.go @@ -42,7 +42,11 @@ func testSealMigrationTransitToShamir_Pre14( tss.MakeKey(t, "transit-seal-key") // Initialize the backend with transit. - rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + cluster, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys + cluster.EnsureCoresSealed(t) + storage.Cleanup(t, cluster) + cluster.Cleanup() // Migrate the backend from transit to shamir migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index fcb80ea21c63..370156fe1e8e 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -521,7 +521,7 @@ func runShamir( func initializeTransit( t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, - tss *sealhelper.TransitSealServer) (string, [][]byte, vault.Seal) { + tss *sealhelper.TransitSealServer) (*vault.TestCluster, *vault.TestClusterOptions, vault.Seal) { var transitSeal vault.Seal @@ -544,10 +544,6 @@ func initializeTransit( storage.Setup(&conf, &opts) cluster := vault.NewTestCluster(t, &conf, &opts) cluster.Start() - defer func() { - storage.Cleanup(t, cluster) - cluster.Cleanup() - }() leader := cluster.Cores[0] client := leader.Client @@ -570,10 +566,7 @@ func initializeTransit( t.Fatal(err) } - // Seal the cluster - cluster.EnsureCoresSealed(t) - - return cluster.RootToken, cluster.RecoveryKeys, transitSeal + return cluster, &opts, transitSeal } func runTransit( @@ -641,37 +634,42 @@ func runTransit( //-------------------------------------------------------------- -//func TestShamir(t *testing.T) { -// testVariousBackends(t, testShamir, true) -//} -// -//func testShamir( -// t *testing.T, logger hclog.Logger, -// storage teststorage.ReusableStorage, basePort int) { -// -// cluster, _ := initializeShamir(t, logger, storage, basePort) -// rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys -// cluster.EnsureCoresSealed(t) -// -// storage.Cleanup(t, cluster) -// cluster.Cleanup() -// -// runShamir(t, logger, storage, basePort, rootToken, barrierKeys) -//} -// -//func TestTransit(t *testing.T) { -// testVariousBackends(t, testTransit, true) -//} -// -//func testTransit( -// t *testing.T, logger hclog.Logger, -// storage teststorage.ReusableStorage, basePort int) { -// -// tss := sealhelper.NewTransitSealServer(t) -// defer tss.Cleanup() -// tss.MakeKey(t, "transit-seal-key") -// -// rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) -// -// runTransit(t, logger, storage, basePort, rootToken, transitSeal) -//} +func TestShamir(t *testing.T) { + testVariousBackends(t, testShamir, true) +} + +func testShamir( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + cluster, _ := initializeShamir(t, logger, storage, basePort) + rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys + + cluster.EnsureCoresSealed(t) + storage.Cleanup(t, cluster) + cluster.Cleanup() + + runShamir(t, logger, storage, basePort, rootToken, barrierKeys) +} + +func TestTransit(t *testing.T) { + testVariousBackends(t, testTransit, true) +} + +func testTransit( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + tss := sealhelper.NewTransitSealServer(t) + defer tss.Cleanup() + tss.MakeKey(t, "transit-seal-key") + + cluster, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + rootToken := cluster.RootToken + + cluster.EnsureCoresSealed(t) + storage.Cleanup(t, cluster) + cluster.Cleanup() + + runTransit(t, logger, storage, basePort, rootToken, transitSeal) +} From 5e9d375c576b43b42e986de799f6f9abf7216396 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 5 Jun 2020 09:10:11 -0400 Subject: [PATCH 69/86] stub out TestSealMigration_TransitToShamir_Post14 --- .../sealmigration/seal_migration_test.go | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 370156fe1e8e..c5030fd5fe3a 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -303,6 +303,80 @@ func migrateFromShamirToTransit_Post14( return transitSeal } +// TestSealMigration_TransitToShamir_Post14 tests transit-to-shamir seal +// migration, using the post-1.4 method of bring individual nodes in the +// cluster to do the migration. +func TestSealMigration_TransitToShamir_Post14(t *testing.T) { + // Note that we do not test integrated raft storage since this is + // a pre-1.4 test. + testVariousBackends(t, testSealMigrationTransitToShamir_Post14, false) +} + +func testSealMigrationTransitToShamir_Post14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int) { + + // Create the transit server. + tss := sealhelper.NewTransitSealServer(t) + defer func() { + if tss != nil { + tss.Cleanup() + } + }() + tss.MakeKey(t, "transit-seal-key") + + // Initialize the backend with transit. + cluster, opts, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + + // Migrate the backend from transit to shamir + migrateFromTransitToShamir_Post14(t, logger, storage, basePort, tss, transitSeal, cluster, opts) + cluster.EnsureCoresSealed(t) + storage.Cleanup(t, cluster) + cluster.Cleanup() + + // Now that migration is done, we can nuke the transit server, since we + // can unseal without it. + tss.Cleanup() + tss = nil + + //// Run the backend with shamir. Note that the recovery keys are now the + //// barrier keys. + //runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) +} + +func migrateFromTransitToShamir_Post14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, basePort int, + tss *sealhelper.TransitSealServer, transitSeal vault.Seal, + cluster *vault.TestCluster, opts *vault.TestClusterOptions) { + + // This will give us Shamir + opts.SealFunc = nil + + // Restart each follower with the new config, and migrate to Shamir. + rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys + for i := 1; i < numTestCores; i++ { + cluster.StopCore(t, i) + if storage.IsRaft { + teststorage.CloseRaftStorage(t, cluster, i) + } + + // N.B. Providing an UnwrapSeal puts us in migration mode. This is the + // equivalent of doing the following in HCL: + // seal "transit" { + // // ... + // disabled = "true" + // } + cluster.Cores[i].CoreConfig.UnwrapSeal = transitSeal + + cluster.RestartCore(t, i, opts) + + cluster.Cores[i].Client.SetToken(rootToken) + unsealMigrate(t, cluster.Cores[i].Client, recoveryKeys, true) + time.Sleep(5 * time.Second) + } +} + func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { for i, key := range keys { From 641c40d0c92d531b4e7465bed554bcff479c5934 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 5 Jun 2020 10:12:22 -0400 Subject: [PATCH 70/86] set seals properly for transit->shamir migration --- .../seal_migration_pre14_test.go | 6 ++ .../sealmigration/seal_migration_test.go | 80 ++++++++++--------- 2 files changed, 50 insertions(+), 36 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_pre14_test.go b/vault/external_tests/sealmigration/seal_migration_pre14_test.go index 7b9e7f334756..8f0e14cd5422 100644 --- a/vault/external_tests/sealmigration/seal_migration_pre14_test.go +++ b/vault/external_tests/sealmigration/seal_migration_pre14_test.go @@ -42,6 +42,8 @@ func testSealMigrationTransitToShamir_Pre14( tss.MakeKey(t, "transit-seal-key") // Initialize the backend with transit. + fmt.Printf("-----------------------------------------------------------------\n") + fmt.Printf("initializeTransit\n") cluster, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys cluster.EnsureCoresSealed(t) @@ -49,6 +51,8 @@ func testSealMigrationTransitToShamir_Pre14( cluster.Cleanup() // Migrate the backend from transit to shamir + fmt.Printf("-----------------------------------------------------------------\n") + fmt.Printf("migrateFromTransitToShamir_Pre14\n") migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys) // Now that migration is done, we can nuke the transit server, since we @@ -58,6 +62,8 @@ func testSealMigrationTransitToShamir_Pre14( // Run the backend with shamir. Note that the recovery keys are now the // barrier keys. + fmt.Printf("-----------------------------------------------------------------\n") + fmt.Printf("runShamir\n") runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) } diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index c5030fd5fe3a..8a19009f6888 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/base64" "fmt" - "sync/atomic" "testing" "time" @@ -43,40 +42,40 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { tf(t, logger, storage, 51000) }) - t.Run("file", func(t *testing.T) { - t.Parallel() + //t.Run("file", func(t *testing.T) { + // t.Parallel() - logger := logger.Named("file") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeFileBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 52000) - }) + // logger := logger.Named("file") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeFileBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 52000) + //}) - t.Run("consul", func(t *testing.T) { - t.Parallel() + //t.Run("consul", func(t *testing.T) { + // t.Parallel() - logger := logger.Named("consul") - storage, cleanup := teststorage.MakeReusableStorage( - t, logger, teststorage.MakeConsulBackend(t, logger)) - defer cleanup() - tf(t, logger, storage, 53000) - }) + // logger := logger.Named("consul") + // storage, cleanup := teststorage.MakeReusableStorage( + // t, logger, teststorage.MakeConsulBackend(t, logger)) + // defer cleanup() + // tf(t, logger, storage, 53000) + //}) - if includeRaft { - t.Run("raft", func(t *testing.T) { - t.Parallel() + //if includeRaft { + // t.Run("raft", func(t *testing.T) { + // t.Parallel() - logger := logger.Named("raft") + // logger := logger.Named("raft") - atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) - addressProvider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, 54010) + // atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) + // addressProvider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, 54010) - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores, addressProvider) - defer cleanup() - tf(t, logger, storage, 54000) - }) - } + // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores, addressProvider) + // defer cleanup() + // tf(t, logger, storage, 54000) + // }) + //} } // TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal @@ -326,9 +325,13 @@ func testSealMigrationTransitToShamir_Post14( tss.MakeKey(t, "transit-seal-key") // Initialize the backend with transit. + fmt.Printf("-----------------------------------------------------------------\n") + fmt.Printf("initializeTransit\n") cluster, opts, transitSeal := initializeTransit(t, logger, storage, basePort, tss) // Migrate the backend from transit to shamir + fmt.Printf("-----------------------------------------------------------------\n") + fmt.Printf("migrateFromTransitToShamir_Post14\n") migrateFromTransitToShamir_Post14(t, logger, storage, basePort, tss, transitSeal, cluster, opts) cluster.EnsureCoresSealed(t) storage.Cleanup(t, cluster) @@ -341,6 +344,8 @@ func testSealMigrationTransitToShamir_Post14( //// Run the backend with shamir. Note that the recovery keys are now the //// barrier keys. + //fmt.Printf("-----------------------------------------------------------------\n") + //fmt.Printf("runShamir\n") //runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) } @@ -350,16 +355,11 @@ func migrateFromTransitToShamir_Post14( tss *sealhelper.TransitSealServer, transitSeal vault.Seal, cluster *vault.TestCluster, opts *vault.TestClusterOptions) { - // This will give us Shamir opts.SealFunc = nil - - // Restart each follower with the new config, and migrate to Shamir. - rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys for i := 1; i < numTestCores; i++ { - cluster.StopCore(t, i) - if storage.IsRaft { - teststorage.CloseRaftStorage(t, cluster, i) - } + + // Nil out the seal so it will be initialized as shamir. + cluster.Cores[i].CoreConfig.Seal = nil // N.B. Providing an UnwrapSeal puts us in migration mode. This is the // equivalent of doing the following in HCL: @@ -368,7 +368,15 @@ func migrateFromTransitToShamir_Post14( // disabled = "true" // } cluster.Cores[i].CoreConfig.UnwrapSeal = transitSeal + } + // Restart each follower with the new config, and migrate to Shamir. + rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys + for i := 1; i < numTestCores; i++ { + cluster.StopCore(t, i) + if storage.IsRaft { + teststorage.CloseRaftStorage(t, cluster, i) + } cluster.RestartCore(t, i, opts) cluster.Cores[i].Client.SetToken(rootToken) From 45df4113e3f5ad2b12f3fd2587b20061fe49ecfe Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 5 Jun 2020 10:21:17 -0400 Subject: [PATCH 71/86] migrateFromTransitToShamir_Post14 works for inmem --- .../sealmigration/seal_migration_test.go | 63 ++++++++++++++++++- 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 8a19009f6888..8ac59d941ec0 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "fmt" + "runtime/debug" "testing" "time" @@ -292,12 +293,14 @@ func migrateFromShamirToTransit_Post14( } // Make sure the seal configs were updated correctly. - b, r, err := leader.Core.PhysicalSealConfigs(context.Background()) + b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background()) if err != nil { t.Fatal(err) } - verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1) - verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0) + verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1) + if r != nil { + t.Fatalf("expected nil recovery config, got: %#v", r) + } return transitSeal } @@ -383,6 +386,59 @@ func migrateFromTransitToShamir_Post14( unsealMigrate(t, cluster.Cores[i].Client, recoveryKeys, true) time.Sleep(5 * time.Second) } + + // Bring down the leader + cluster.StopCore(t, 0) + if storage.IsRaft { + teststorage.CloseRaftStorage(t, cluster, 0) + } + + // Wait for the followers to establish a new leader + leaderIdx, err := testhelpers.AwaitLeader(t, cluster) + if err != nil { + t.Fatal(err) + } + if leaderIdx == 0 { + t.Fatalf("Core 0 cannot be the leader right now") + } + leader := cluster.Cores[leaderIdx] + leader.Client.SetToken(rootToken) + + // Bring core 0 back up + cluster.RestartCore(t, 0, opts) + cluster.Cores[0].Client.SetToken(rootToken) + + // TODO is this a bug? Why is raft different here? + unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + if storage.IsRaft { + unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + } else { + unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + //unseal(t, cluster.Cores[0].Client, recoveryKeys) + } + + time.Sleep(5 * time.Second) + + // Wait for migration to finish. + awaitMigration(t, leader.Client) + + // This is apparently necessary for the raft cluster to get itself + // situated. + if storage.IsRaft { + time.Sleep(15 * time.Second) + if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + t.Fatal(err) + } + } + + // Read the secret + secret, err := leader.Client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + t.Fatal(diff) + } } func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { @@ -461,6 +517,7 @@ func unseal(t *testing.T, client *api.Client, keys [][]byte) { if i < keyThreshold-1 { // Not enough keys have been provided yet. if err != nil { + debug.PrintStack() t.Fatal(err) } } else { From 5f80eee59b7a5aad60aa71213d801ffc5d316f7e Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 5 Jun 2020 10:47:03 -0400 Subject: [PATCH 72/86] migrateFromTransitToShamir_Post14 works for raft --- .../sealmigration/seal_migration_test.go | 63 +++++++++---------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 8ac59d941ec0..a522f6845c53 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -4,7 +4,7 @@ import ( "context" "encoding/base64" "fmt" - "runtime/debug" + "sync/atomic" "testing" "time" @@ -43,40 +43,40 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { tf(t, logger, storage, 51000) }) - //t.Run("file", func(t *testing.T) { - // t.Parallel() + t.Run("file", func(t *testing.T) { + t.Parallel() - // logger := logger.Named("file") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeFileBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 52000) - //}) + logger := logger.Named("file") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeFileBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 52000) + }) - //t.Run("consul", func(t *testing.T) { - // t.Parallel() + t.Run("consul", func(t *testing.T) { + t.Parallel() - // logger := logger.Named("consul") - // storage, cleanup := teststorage.MakeReusableStorage( - // t, logger, teststorage.MakeConsulBackend(t, logger)) - // defer cleanup() - // tf(t, logger, storage, 53000) - //}) + logger := logger.Named("consul") + storage, cleanup := teststorage.MakeReusableStorage( + t, logger, teststorage.MakeConsulBackend(t, logger)) + defer cleanup() + tf(t, logger, storage, 53000) + }) - //if includeRaft { - // t.Run("raft", func(t *testing.T) { - // t.Parallel() + if includeRaft { + t.Run("raft", func(t *testing.T) { + t.Parallel() - // logger := logger.Named("raft") + logger := logger.Named("raft") - // atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) - // addressProvider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, 54010) + atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) + addressProvider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, 54010) - // storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores, addressProvider) - // defer cleanup() - // tf(t, logger, storage, 54000) - // }) - //} + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores, addressProvider) + defer cleanup() + tf(t, logger, storage, 54000) + }) + } } // TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal @@ -311,7 +311,7 @@ func migrateFromShamirToTransit_Post14( func TestSealMigration_TransitToShamir_Post14(t *testing.T) { // Note that we do not test integrated raft storage since this is // a pre-1.4 test. - testVariousBackends(t, testSealMigrationTransitToShamir_Post14, false) + testVariousBackends(t, testSealMigrationTransitToShamir_Post14, true) } func testSealMigrationTransitToShamir_Post14( @@ -409,12 +409,10 @@ func migrateFromTransitToShamir_Post14( cluster.Cores[0].Client.SetToken(rootToken) // TODO is this a bug? Why is raft different here? - unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) if storage.IsRaft { - unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + unseal(t, cluster.Cores[0].Client, recoveryKeys) } else { unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) - //unseal(t, cluster.Cores[0].Client, recoveryKeys) } time.Sleep(5 * time.Second) @@ -517,7 +515,6 @@ func unseal(t *testing.T, client *api.Client, keys [][]byte) { if i < keyThreshold-1 { // Not enough keys have been provided yet. if err != nil { - debug.PrintStack() t.Fatal(err) } } else { From dcc8bccad489a1da44e968be63dd9db8720d7439 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 5 Jun 2020 11:36:44 -0400 Subject: [PATCH 73/86] use base ports per-test --- .../seal_migration_pre14_test.go | 8 +- .../sealmigration/seal_migration_test.go | 79 +++++-------------- 2 files changed, 20 insertions(+), 67 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_pre14_test.go b/vault/external_tests/sealmigration/seal_migration_pre14_test.go index 8f0e14cd5422..cdfc368f61ea 100644 --- a/vault/external_tests/sealmigration/seal_migration_pre14_test.go +++ b/vault/external_tests/sealmigration/seal_migration_pre14_test.go @@ -25,7 +25,7 @@ import ( func TestSealMigration_TransitToShamir_Pre14(t *testing.T) { // Note that we do not test integrated raft storage since this is // a pre-1.4 test. - testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false) + testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, basePort_TransitToShamir_Pre14, false) } func testSealMigrationTransitToShamir_Pre14( @@ -42,8 +42,6 @@ func testSealMigrationTransitToShamir_Pre14( tss.MakeKey(t, "transit-seal-key") // Initialize the backend with transit. - fmt.Printf("-----------------------------------------------------------------\n") - fmt.Printf("initializeTransit\n") cluster, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys cluster.EnsureCoresSealed(t) @@ -51,8 +49,6 @@ func testSealMigrationTransitToShamir_Pre14( cluster.Cleanup() // Migrate the backend from transit to shamir - fmt.Printf("-----------------------------------------------------------------\n") - fmt.Printf("migrateFromTransitToShamir_Pre14\n") migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys) // Now that migration is done, we can nuke the transit server, since we @@ -62,8 +58,6 @@ func testSealMigrationTransitToShamir_Pre14( // Run the backend with shamir. Note that the recovery keys are now the // barrier keys. - fmt.Printf("-----------------------------------------------------------------\n") - fmt.Printf("runShamir\n") runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) } diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index a522f6845c53..59d570b77e07 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -25,11 +25,16 @@ const ( numTestCores = 5 keyShares = 3 keyThreshold = 3 + + basePort_ShamirToTransit_Pre14 = 51000 + basePort_TransitToShamir_Pre14 = 52000 + basePort_ShamirToTransit_Post14 = 53000 + basePort_TransitToShamir_Post14 = 54000 ) type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) -func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { +func testVariousBackends(t *testing.T, tf testFunc, basePort int, includeRaft bool) { logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) @@ -40,7 +45,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { storage, cleanup := teststorage.MakeReusableStorage( t, logger, teststorage.MakeInmemBackend(t, logger)) defer cleanup() - tf(t, logger, storage, 51000) + tf(t, logger, storage, basePort+100) }) t.Run("file", func(t *testing.T) { @@ -50,7 +55,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { storage, cleanup := teststorage.MakeReusableStorage( t, logger, teststorage.MakeFileBackend(t, logger)) defer cleanup() - tf(t, logger, storage, 52000) + tf(t, logger, storage, basePort+200) }) t.Run("consul", func(t *testing.T) { @@ -60,7 +65,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { storage, cleanup := teststorage.MakeReusableStorage( t, logger, teststorage.MakeConsulBackend(t, logger)) defer cleanup() - tf(t, logger, storage, 53000) + tf(t, logger, storage, basePort+300) }) if includeRaft { @@ -68,13 +73,14 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { t.Parallel() logger := logger.Named("raft") + raftBasePort := basePort + 400 atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) - addressProvider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, 54010) + addressProvider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, raftBasePort+10) storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores, addressProvider) defer cleanup() - tf(t, logger, storage, 54000) + tf(t, logger, storage, raftBasePort) }) } } @@ -85,7 +91,7 @@ func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) { func TestSealMigration_ShamirToTransit_Pre14(t *testing.T) { // Note that we do not test integrated raft storage since this is // a pre-1.4 test. - testVariousBackends(t, testSealMigrationShamirToTransit_Pre14, false) + testVariousBackends(t, testSealMigrationShamirToTransit_Pre14, basePort_ShamirToTransit_Pre14, false) } func testSealMigrationShamirToTransit_Pre14( @@ -183,7 +189,7 @@ func migrateFromShamirToTransit_Pre14( // migration, using the post-1.4 method of bring individual nodes in the cluster // to do the migration. func TestSealMigration_ShamirToTransit_Post14(t *testing.T) { - testVariousBackends(t, testSealMigrationShamirToTransit_Post14, true) + testVariousBackends(t, testSealMigrationShamirToTransit_Post14, basePort_ShamirToTransit_Post14, true) } func testSealMigrationShamirToTransit_Post14( @@ -311,7 +317,7 @@ func migrateFromShamirToTransit_Post14( func TestSealMigration_TransitToShamir_Post14(t *testing.T) { // Note that we do not test integrated raft storage since this is // a pre-1.4 test. - testVariousBackends(t, testSealMigrationTransitToShamir_Post14, true) + testVariousBackends(t, testSealMigrationTransitToShamir_Post14, basePort_TransitToShamir_Post14, true) } func testSealMigrationTransitToShamir_Post14( @@ -328,13 +334,10 @@ func testSealMigrationTransitToShamir_Post14( tss.MakeKey(t, "transit-seal-key") // Initialize the backend with transit. - fmt.Printf("-----------------------------------------------------------------\n") - fmt.Printf("initializeTransit\n") cluster, opts, transitSeal := initializeTransit(t, logger, storage, basePort, tss) + rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys // Migrate the backend from transit to shamir - fmt.Printf("-----------------------------------------------------------------\n") - fmt.Printf("migrateFromTransitToShamir_Post14\n") migrateFromTransitToShamir_Post14(t, logger, storage, basePort, tss, transitSeal, cluster, opts) cluster.EnsureCoresSealed(t) storage.Cleanup(t, cluster) @@ -345,11 +348,9 @@ func testSealMigrationTransitToShamir_Post14( tss.Cleanup() tss = nil - //// Run the backend with shamir. Note that the recovery keys are now the - //// barrier keys. - //fmt.Printf("-----------------------------------------------------------------\n") - //fmt.Printf("runShamir\n") - //runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) + // Run the backend with shamir. Note that the recovery keys are now the + // barrier keys. + runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) } func migrateFromTransitToShamir_Post14( @@ -767,45 +768,3 @@ func runTransit( // Seal the cluster cluster.EnsureCoresSealed(t) } - -//-------------------------------------------------------------- - -func TestShamir(t *testing.T) { - testVariousBackends(t, testShamir, true) -} - -func testShamir( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - cluster, _ := initializeShamir(t, logger, storage, basePort) - rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys - - cluster.EnsureCoresSealed(t) - storage.Cleanup(t, cluster) - cluster.Cleanup() - - runShamir(t, logger, storage, basePort, rootToken, barrierKeys) -} - -func TestTransit(t *testing.T) { - testVariousBackends(t, testTransit, true) -} - -func testTransit( - t *testing.T, logger hclog.Logger, - storage teststorage.ReusableStorage, basePort int) { - - tss := sealhelper.NewTransitSealServer(t) - defer tss.Cleanup() - tss.MakeKey(t, "transit-seal-key") - - cluster, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) - rootToken := cluster.RootToken - - cluster.EnsureCoresSealed(t) - storage.Cleanup(t, cluster) - cluster.Cleanup() - - runTransit(t, logger, storage, basePort, rootToken, transitSeal) -} From 04dc99ddbd257700b1bb7d66077d7acb949b1fc3 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 5 Jun 2020 12:12:45 -0400 Subject: [PATCH 74/86] fix seal verification test code --- .../sealmigration/seal_migration_test.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 59d570b77e07..ec01d65adcbc 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -299,14 +299,12 @@ func migrateFromShamirToTransit_Post14( } // Make sure the seal configs were updated correctly. - b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background()) + b, r, err := leader.Core.PhysicalSealConfigs(context.Background()) if err != nil { t.Fatal(err) } - verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1) - if r != nil { - t.Fatalf("expected nil recovery config, got: %#v", r) - } + verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1) + verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0) return transitSeal } @@ -438,6 +436,16 @@ func migrateFromTransitToShamir_Post14( if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { t.Fatal(diff) } + + // Make sure the seal configs were updated correctly. + b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background()) + if err != nil { + t.Fatal(err) + } + verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1) + if r != nil { + t.Fatalf("expected nil recovery config, got: %#v", r) + } } func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { From 00f3c2327b205051ccff784c4f588a6c1c7b3b86 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 5 Jun 2020 12:47:12 -0400 Subject: [PATCH 75/86] simplify seal migration test suite --- .../sealmigration/seal_migration_test.go | 141 ++++++++---------- 1 file changed, 64 insertions(+), 77 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index ec01d65adcbc..7387a5a2e8cf 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -234,60 +234,11 @@ func migrateFromShamirToTransit_Post14( // Restart each follower with the new config, and migrate to Transit. // Note that the barrier keys are being used as recovery keys. - rootToken, recoveryKeys := cluster.RootToken, cluster.BarrierKeys - for i := 1; i < numTestCores; i++ { - cluster.StopCore(t, i) - if storage.IsRaft { - teststorage.CloseRaftStorage(t, cluster, i) - } - cluster.RestartCore(t, i, opts) - - cluster.Cores[i].Client.SetToken(rootToken) - unsealMigrate(t, cluster.Cores[i].Client, recoveryKeys, true) - time.Sleep(5 * time.Second) - } - - // Bring down the leader - cluster.StopCore(t, 0) - if storage.IsRaft { - teststorage.CloseRaftStorage(t, cluster, 0) - } - - // Wait for the followers to establish a new leader - leaderIdx, err := testhelpers.AwaitLeader(t, cluster) - if err != nil { - t.Fatal(err) - } - if leaderIdx == 0 { - t.Fatalf("Core 0 cannot be the leader right now") - } + leaderIdx := migratePost14( + t, logger, storage, cluster, opts, + cluster.RootToken, cluster.BarrierKeys, + migrateShamirToTransit) leader := cluster.Cores[leaderIdx] - leader.Client.SetToken(rootToken) - - // Bring core 0 back up - cluster.RestartCore(t, 0, opts) - cluster.Cores[0].Client.SetToken(rootToken) - - // TODO is this a bug? Why is raft different here? - if storage.IsRaft { - unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) - } else { - unseal(t, cluster.Cores[0].Client, recoveryKeys) - } - - time.Sleep(5 * time.Second) - - // Wait for migration to finish. - awaitMigration(t, leader.Client) - - // This is apparently necessary for the raft cluster to get itself - // situated. - if storage.IsRaft { - time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { - t.Fatal(err) - } - } // Read the secret secret, err := leader.Client.Logical().Read("secret/foo") @@ -373,7 +324,48 @@ func migrateFromTransitToShamir_Post14( } // Restart each follower with the new config, and migrate to Shamir. - rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys + leaderIdx := migratePost14( + t, logger, storage, cluster, opts, + cluster.RootToken, cluster.RecoveryKeys, + migrateTransitToShamir) + leader := cluster.Cores[leaderIdx] + + // Read the secret + secret, err := leader.Client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { + t.Fatal(diff) + } + + // Make sure the seal configs were updated correctly. + b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background()) + if err != nil { + t.Fatal(err) + } + verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1) + if r != nil { + t.Fatalf("expected nil recovery config, got: %#v", r) + } +} + +type migrationDirection int + +const ( + migrateShamirToTransit migrationDirection = iota + migrateTransitToShamir +) + +func migratePost14( + t *testing.T, logger hclog.Logger, + storage teststorage.ReusableStorage, + cluster *vault.TestCluster, opts *vault.TestClusterOptions, + rootToken string, recoveryKeys [][]byte, + migrate migrationDirection, +) int { + + // Restart each follower with the new config, and migrate. for i := 1; i < numTestCores; i++ { cluster.StopCore(t, i) if storage.IsRaft { @@ -407,11 +399,23 @@ func migrateFromTransitToShamir_Post14( cluster.RestartCore(t, 0, opts) cluster.Cores[0].Client.SetToken(rootToken) - // TODO is this a bug? Why is raft different here? - if storage.IsRaft { - unseal(t, cluster.Cores[0].Client, recoveryKeys) - } else { - unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + // TODO look into why this is different for different migration directions, + // and why it is swapped for raft. + switch migrate { + case migrateShamirToTransit: + if storage.IsRaft { + unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + } else { + unseal(t, cluster.Cores[0].Client, recoveryKeys) + } + case migrateTransitToShamir: + if storage.IsRaft { + unseal(t, cluster.Cores[0].Client, recoveryKeys) + } else { + unsealMigrate(t, cluster.Cores[0].Client, recoveryKeys, true) + } + default: + t.Fatalf("unreachable") } time.Sleep(5 * time.Second) @@ -428,24 +432,7 @@ func migrateFromTransitToShamir_Post14( } } - // Read the secret - secret, err := leader.Client.Logical().Read("secret/foo") - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { - t.Fatal(diff) - } - - // Make sure the seal configs were updated correctly. - b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background()) - if err != nil { - t.Fatal(err) - } - verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1) - if r != nil { - t.Fatalf("expected nil recovery config, got: %#v", r) - } + return leaderIdx } func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { From 22214327c26291e939ebf0ab3ce59e9536f53969 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Mon, 8 Jun 2020 10:59:47 -0400 Subject: [PATCH 76/86] simplify test suite --- .../sealmigration/seal_migration_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 7387a5a2e8cf..ed8e95e474e2 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -231,13 +231,14 @@ func migrateFromShamirToTransit_Post14( transitSeal = tss.MakeSeal(t, "transit-seal-key") return transitSeal } + modifyCoreConfig := func(tcc *vault.TestClusterCore) {} // Restart each follower with the new config, and migrate to Transit. // Note that the barrier keys are being used as recovery keys. leaderIdx := migratePost14( t, logger, storage, cluster, opts, cluster.RootToken, cluster.BarrierKeys, - migrateShamirToTransit) + migrateShamirToTransit, modifyCoreConfig) leader := cluster.Cores[leaderIdx] // Read the secret @@ -309,10 +310,9 @@ func migrateFromTransitToShamir_Post14( cluster *vault.TestCluster, opts *vault.TestClusterOptions) { opts.SealFunc = nil - for i := 1; i < numTestCores; i++ { - + modifyCoreConfig := func(tcc *vault.TestClusterCore) { // Nil out the seal so it will be initialized as shamir. - cluster.Cores[i].CoreConfig.Seal = nil + tcc.CoreConfig.Seal = nil // N.B. Providing an UnwrapSeal puts us in migration mode. This is the // equivalent of doing the following in HCL: @@ -320,14 +320,14 @@ func migrateFromTransitToShamir_Post14( // // ... // disabled = "true" // } - cluster.Cores[i].CoreConfig.UnwrapSeal = transitSeal + tcc.CoreConfig.UnwrapSeal = transitSeal } // Restart each follower with the new config, and migrate to Shamir. leaderIdx := migratePost14( t, logger, storage, cluster, opts, cluster.RootToken, cluster.RecoveryKeys, - migrateTransitToShamir) + migrateTransitToShamir, modifyCoreConfig) leader := cluster.Cores[leaderIdx] // Read the secret @@ -363,6 +363,7 @@ func migratePost14( cluster *vault.TestCluster, opts *vault.TestClusterOptions, rootToken string, recoveryKeys [][]byte, migrate migrationDirection, + modifyCoreConfig func(*vault.TestClusterCore), ) int { // Restart each follower with the new config, and migrate. @@ -371,6 +372,7 @@ func migratePost14( if storage.IsRaft { teststorage.CloseRaftStorage(t, cluster, i) } + modifyCoreConfig(cluster.Cores[i]) cluster.RestartCore(t, i, opts) cluster.Cores[i].Client.SetToken(rootToken) From a1f6bd710eb54b6d722f4d3dd5f63a2bd9961f9a Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Wed, 10 Jun 2020 15:15:16 -0400 Subject: [PATCH 77/86] cleanup test suite --- .../sealmigration/seal_migration_test.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index ed8e95e474e2..b3ee651285b8 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -132,7 +132,8 @@ func migrateFromShamirToTransit_Pre14( var transitSeal vault.Seal var conf = vault.CoreConfig{ - Logger: logger.Named("migrateFromShamirToTransit"), + Logger: logger.Named("migrateFromShamirToTransit"), + DisablePerformanceStandby: true, } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, @@ -553,7 +554,8 @@ func initializeShamir( // Start the cluster var conf = vault.CoreConfig{ - Logger: logger.Named("initializeShamir"), + Logger: logger.Named("initializeShamir"), + DisablePerformanceStandby: true, } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, @@ -600,7 +602,8 @@ func runShamir( // Start the cluster var conf = vault.CoreConfig{ - Logger: logger.Named("runShamir"), + Logger: logger.Named("runShamir"), + DisablePerformanceStandby: true, } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, @@ -663,7 +666,8 @@ func initializeTransit( // Start the cluster var conf = vault.CoreConfig{ - Logger: logger.Named("initializeTransit"), + Logger: logger.Named("initializeTransit"), + DisablePerformanceStandby: true, } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, @@ -712,8 +716,9 @@ func runTransit( // Start the cluster var conf = vault.CoreConfig{ - Logger: logger.Named("runTransit"), - Seal: transitSeal, + Logger: logger.Named("runTransit"), + DisablePerformanceStandby: true, + Seal: transitSeal, } var opts = vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, From 8087a0709e025b8fa5cb1a7f05059eda953b92aa Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 11 Jun 2020 13:07:43 -0400 Subject: [PATCH 78/86] use explicit ports below 30000 --- vault/external_tests/sealmigration/seal_migration_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index b3ee651285b8..d342bfc529e4 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -26,10 +26,10 @@ const ( keyShares = 3 keyThreshold = 3 - basePort_ShamirToTransit_Pre14 = 51000 - basePort_TransitToShamir_Pre14 = 52000 - basePort_ShamirToTransit_Post14 = 53000 - basePort_TransitToShamir_Post14 = 54000 + basePort_ShamirToTransit_Pre14 = 20000 + basePort_TransitToShamir_Pre14 = 21000 + basePort_ShamirToTransit_Post14 = 22000 + basePort_TransitToShamir_Post14 = 23000 ) type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) From 8c3375a6afd8415fd650cd9f99116ea08c5e69f7 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 12 Jun 2020 09:46:18 -0400 Subject: [PATCH 79/86] simplify use of numTestCores --- .../sealmigration/seal_migration_test.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index d342bfc529e4..8f3a91af1633 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -368,7 +368,7 @@ func migratePost14( ) int { // Restart each follower with the new config, and migrate. - for i := 1; i < numTestCores; i++ { + for i := 1; i < len(cluster.Cores); i++ { cluster.StopCore(t, i) if storage.IsRaft { teststorage.CloseRaftStorage(t, cluster, i) @@ -430,7 +430,7 @@ func migratePost14( // situated. if storage.IsRaft { time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { t.Fatal(err) } } @@ -573,13 +573,13 @@ func initializeShamir( // Unseal if storage.IsRaft { testhelpers.JoinRaftFollowers(t, cluster, false) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { t.Fatal(err) } } else { cluster.UnsealCores(t) } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) + testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) // Write a secret that we will read back out later. _, err := client.Logical().Write( @@ -633,13 +633,13 @@ func runShamir( // This is apparently necessary for the raft cluster to get itself // situated. time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { t.Fatal(err) } } else { cluster.UnsealCores(t) } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) + testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) // Read the secret secret, err := client.Logical().Read("secret/foo") @@ -690,11 +690,11 @@ func initializeTransit( if storage.IsRaft { testhelpers.JoinRaftFollowers(t, cluster, true) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { t.Fatal(err) } } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) + testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) // Write a secret that we will read back out later. _, err := client.Logical().Write( @@ -748,7 +748,7 @@ func runTransit( // This is apparently necessary for the raft cluster to get itself // situated. time.Sleep(15 * time.Second) - if err := testhelpers.VerifyRaftConfiguration(leader, numTestCores); err != nil { + if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { t.Fatal(err) } } else { @@ -756,7 +756,7 @@ func runTransit( t.Fatal(err) } } - testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores) + testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) // Read the secret secret, err := client.Logical().Read("secret/foo") From a6140cb6f890655eb9fb58ce464cc0fbab00035d Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 16 Jun 2020 12:20:47 -0400 Subject: [PATCH 80/86] Update vault/external_tests/sealmigration/seal_migration_test.go Co-authored-by: Calvin Leung Huang --- vault/external_tests/sealmigration/seal_migration_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 8f3a91af1633..81c0541d5532 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -266,7 +266,6 @@ func migrateFromShamirToTransit_Post14( // migration, using the post-1.4 method of bring individual nodes in the // cluster to do the migration. func TestSealMigration_TransitToShamir_Post14(t *testing.T) { - // Note that we do not test integrated raft storage since this is // a pre-1.4 test. testVariousBackends(t, testSealMigrationTransitToShamir_Post14, basePort_TransitToShamir_Post14, true) } From f85159b7ab56bb38b7b27a45bec405ee998a11c2 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 16 Jun 2020 12:21:04 -0400 Subject: [PATCH 81/86] Update vault/external_tests/sealmigration/seal_migration_test.go Co-authored-by: Calvin Leung Huang --- vault/external_tests/sealmigration/seal_migration_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 81c0541d5532..0a590f9d15a9 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -266,7 +266,6 @@ func migrateFromShamirToTransit_Post14( // migration, using the post-1.4 method of bring individual nodes in the // cluster to do the migration. func TestSealMigration_TransitToShamir_Post14(t *testing.T) { - // a pre-1.4 test. testVariousBackends(t, testSealMigrationTransitToShamir_Post14, basePort_TransitToShamir_Post14, true) } From fdff2c4ae89561411dda24a9d62545483f9c73e5 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 16 Jun 2020 12:30:16 -0400 Subject: [PATCH 82/86] clean up imports --- vault/testing.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vault/testing.go b/vault/testing.go index 8f69118007cf..db2c50faf162 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -33,7 +33,6 @@ import ( "golang.org/x/net/http2" cleanhttp "github.com/hashicorp/go-cleanhttp" - hclog "github.com/hashicorp/go-hclog" log "github.com/hashicorp/go-hclog" raftlib "github.com/hashicorp/raft" "github.com/hashicorp/vault/api" @@ -1016,7 +1015,7 @@ type TestClusterOptions struct { // core in cluster will have 0, second 1, etc. // If the backend is shared across the cluster (i.e. is not Raft) then it // should return nil when coreIdx != 0. - PhysicalFactory func(t testing.T, coreIdx int, logger hclog.Logger) *PhysicalBackendBundle + PhysicalFactory func(t testing.T, coreIdx int, logger log.Logger) *PhysicalBackendBundle // FirstCoreNumber is used to assign a unique number to each core within // a multi-cluster setup. FirstCoreNumber int @@ -1056,7 +1055,7 @@ type certInfo struct { } type TestLogger struct { - hclog.Logger + log.Logger Path string File *os.File } From ebf9716613138d3b65374a9f78a28ee0db29752c Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 16 Jun 2020 12:34:07 -0400 Subject: [PATCH 83/86] rename to StartCore() --- vault/external_tests/sealmigration/seal_migration_test.go | 4 ++-- vault/testing.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 0a590f9d15a9..1a40f29a252f 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -372,7 +372,7 @@ func migratePost14( teststorage.CloseRaftStorage(t, cluster, i) } modifyCoreConfig(cluster.Cores[i]) - cluster.RestartCore(t, i, opts) + cluster.StartCore(t, i, opts) cluster.Cores[i].Client.SetToken(rootToken) unsealMigrate(t, cluster.Cores[i].Client, recoveryKeys, true) @@ -397,7 +397,7 @@ func migratePost14( leader.Client.SetToken(rootToken) // Bring core 0 back up - cluster.RestartCore(t, 0, opts) + cluster.StartCore(t, 0, opts) cluster.Cores[0].Client.SetToken(rootToken) // TODO look into why this is different for different migration directions, diff --git a/vault/testing.go b/vault/testing.go index db2c50faf162..eb97c813d3d0 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1590,7 +1590,7 @@ func (cluster *TestCluster) StopCore(t testing.T, idx int) { // Restart a TestClusterCore that was stopped, by replacing the // underlying Core. -func (cluster *TestCluster) RestartCore(t testing.T, idx int, opts *TestClusterOptions) { +func (cluster *TestCluster) StartCore(t testing.T, idx int, opts *TestClusterOptions) { t.Helper() if idx < 0 || idx > len(cluster.Cores) { From c1cf0855ecb5aed55f48d2510410bc222ef5f351 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 16 Jun 2020 12:37:12 -0400 Subject: [PATCH 84/86] Update vault/testing.go Co-authored-by: Calvin Leung Huang --- vault/testing.go | 1 - 1 file changed, 1 deletion(-) diff --git a/vault/testing.go b/vault/testing.go index eb97c813d3d0..21d7e307e2a5 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1686,7 +1686,6 @@ func (testCluster *TestCluster) newCore( testCluster.Logger.Info("created physical backend", "instance", idx) coreConfig.Physical = physBundle.Backend localConfig.Physical = physBundle.Backend - //base.Physical = physBundle.Backend haBackend := physBundle.HABackend if haBackend == nil { if ha, ok := physBundle.Backend.(physical.HABackend); ok { From d78f80e7c7dbf4f388ad062cc7eced50c15c7cf8 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 16 Jun 2020 13:06:23 -0400 Subject: [PATCH 85/86] simplify test suite --- helper/testhelpers/testhelpers.go | 56 ----------------- .../sealmigration/seal_migration_test.go | 62 ++++++++++++++++++- vault/testing.go | 8 --- 3 files changed, 60 insertions(+), 66 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 3647cefea160..6bae6e68708e 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -449,62 +449,6 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) } -// JoinRaftFollowers unseals the leader, and then joins-and-unseals the -// followers one at a time. We assume that the ServerAddressProvider has -// already been installed on all the nodes. -func JoinRaftFollowers(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) { - - leader := cluster.Cores[0] - - cluster.UnsealCore(t, leader) - vault.TestWaitActive(t, leader.Core) - - leaderInfos := []*raft.LeaderJoinInfo{ - &raft.LeaderJoinInfo{ - LeaderAPIAddr: leader.Client.Address(), - TLSConfig: leader.TLSConfig, - }, - } - - // Join followers - for i := 1; i < len(cluster.Cores); i++ { - core := cluster.Cores[i] - _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) - if err != nil { - t.Fatal(err) - } - - if useStoredKeys { - // For autounseal, the raft backend is not initialized right away - // after the join. We need to wait briefly before we can unseal. - awaitUnsealWithStoredKeys(t, core) - } else { - cluster.UnsealCore(t, core) - } - } - - WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) -} - -func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) { - - timeout := time.Now().Add(30 * time.Second) - for { - if time.Now().After(timeout) { - t.Fatal("raft join: timeout waiting for core to unseal") - } - // Its actually ok for an error to happen here the first couple of - // times -- it means the raft join hasn't gotten around to initializing - // the backend yet. - err := core.UnsealWithStoredKeys(context.Background()) - if err == nil { - return - } - core.Logger().Warn("raft join: failed to unseal core", "error", err) - time.Sleep(time.Second) - } -} - // HardcodedServerAddressProvider is a ServerAddressProvider that uses // a hardcoded map of raft node addresses. // diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 1a40f29a252f..12dde238b762 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -13,10 +13,12 @@ import ( "github.com/hashicorp/go-hclog" wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/testhelpers" sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal" "github.com/hashicorp/vault/helper/testhelpers/teststorage" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/vault" ) @@ -570,7 +572,7 @@ func initializeShamir( // Unseal if storage.IsRaft { - testhelpers.JoinRaftFollowers(t, cluster, false) + joinRaftFollowers(t, cluster, false) if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { t.Fatal(err) } @@ -686,7 +688,7 @@ func initializeTransit( // Join raft if storage.IsRaft { - testhelpers.JoinRaftFollowers(t, cluster, true) + joinRaftFollowers(t, cluster, true) if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil { t.Fatal(err) @@ -768,3 +770,59 @@ func runTransit( // Seal the cluster cluster.EnsureCoresSealed(t) } + +// joinRaftFollowers unseals the leader, and then joins-and-unseals the +// followers one at a time. We assume that the ServerAddressProvider has +// already been installed on all the nodes. +func joinRaftFollowers(t *testing.T, cluster *vault.TestCluster, useStoredKeys bool) { + + leader := cluster.Cores[0] + + cluster.UnsealCore(t, leader) + vault.TestWaitActive(t, leader.Core) + + leaderInfos := []*raft.LeaderJoinInfo{ + &raft.LeaderJoinInfo{ + LeaderAPIAddr: leader.Client.Address(), + TLSConfig: leader.TLSConfig, + }, + } + + // Join followers + for i := 1; i < len(cluster.Cores); i++ { + core := cluster.Cores[i] + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) + if err != nil { + t.Fatal(err) + } + + if useStoredKeys { + // For autounseal, the raft backend is not initialized right away + // after the join. We need to wait briefly before we can unseal. + awaitUnsealWithStoredKeys(t, core) + } else { + cluster.UnsealCore(t, core) + } + } + + testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) +} + +func awaitUnsealWithStoredKeys(t *testing.T, core *vault.TestClusterCore) { + + timeout := time.Now().Add(30 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("raft join: timeout waiting for core to unseal") + } + // Its actually ok for an error to happen here the first couple of + // times -- it means the raft join hasn't gotten around to initializing + // the backend yet. + err := core.UnsealWithStoredKeys(context.Background()) + if err == nil { + return + } + core.Logger().Warn("raft join: failed to unseal core", "error", err) + time.Sleep(time.Second) + } +} diff --git a/vault/testing.go b/vault/testing.go index 21d7e307e2a5..6f11fceea92b 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1034,14 +1034,6 @@ type TestClusterOptions struct { // RaftAddressProvider should only be specified if the underlying physical // storage is Raft. RaftAddressProvider raftlib.ServerAddressProvider - - // JoinRaftFollowers specifies that each follower core will be joined to - // the raft cluster just before it is unsealed in InitializeCores(). - // - // If SkipInit is true, then JoinRaftFollowers has no effect. - // JoinRaftFollowers should only be specified if the underlying physical - // storage is Raft. - JoinRaftFollowers bool } var DefaultNumCores = 3 From 0528d78ea49fc1d3eb018a343e74ebb5954ed788 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Tue, 16 Jun 2020 13:22:12 -0400 Subject: [PATCH 86/86] clean up tests --- vault/external_tests/sealmigration/seal_migration_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 12dde238b762..68fa55e665e3 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -421,8 +421,6 @@ func migratePost14( t.Fatalf("unreachable") } - time.Sleep(5 * time.Second) - // Wait for migration to finish. awaitMigration(t, leader.Client)