diff --git a/.circleci/config.yml b/.circleci/config.yml index 88e8a9b74348..658fc2212c75 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,7 +35,9 @@ jobs: command: make ci-verify install-ui-dependencies: docker: - - image: node:10-buster + - environment: + JOBS: 2 + image: node:10-buster shell: /usr/bin/env bash -euo pipefail -c working_directory: /go/src/github.com/hashicorp/vault steps: @@ -98,7 +100,9 @@ jobs: - GOTESTSUM_VERSION: 0.3.3 test-ui: docker: - - image: node:10-buster + - environment: + JOBS: 2 + image: node:10-buster shell: /usr/bin/env bash -euo pipefail -c working_directory: /go/src/github.com/hashicorp/vault resource_class: medium+ @@ -136,7 +140,9 @@ jobs: path: ui/test-results test-ui-browserstack: docker: - - image: node:10-buster + - environment: + JOBS: 2 + image: node:10-buster shell: /usr/bin/env bash -euo pipefail -c working_directory: /go/src/github.com/hashicorp/vault resource_class: medium+ @@ -299,6 +305,29 @@ jobs: - GO_VERSION: 1.12.14 - GO111MODULE: 'off' - GOTESTSUM_VERSION: 0.3.3 + website-docker-image: + docker: + - image: circleci/buildpack-deps + shell: /usr/bin/env bash -euo pipefail -c + steps: + - checkout + - setup_remote_docker + - run: + command: | + echo 'export PACKAGE_LOCK_CHANGED=$(git diff --name-only $(git log --pretty=format:'%h' -n1 HEAD~1)...HEAD | grep -c website/package-lock.json)' >> $BASH_ENV + name: Diff package-lock.json + - run: + command: | + if [ "$CIRCLE_BRANCH" = "master" ] && [ $PACKAGE_LOCK_CHANGED -gt 0 ]; then + cd website/ + docker build -t hashicorp/vault-website:$CIRCLE_SHA1 . + docker tag hashicorp/vault-website:$CIRCLE_SHA1 hashicorp/vault-website:latest + docker login -u $DOCKER_USER -p $DOCKER_PASS + docker push hashicorp/vault-website + else + echo "Not building a new website docker image - branch is not master and/or dependencies have not changed." + fi + name: Build Docker Image if Necessary workflows: ci: jobs: @@ -326,6 +355,7 @@ workflows: - test-go-race: requires: - build-go-dev + - website-docker-image version: 2 # Original config.yml file: @@ -426,7 +456,9 @@ workflows: # working_directory: /go/src/github.com/hashicorp/vault # node: # docker: -# - image: node:10-buster +# - environment: +# JOBS: 2 +# image: node:10-buster # shell: /usr/bin/env bash -euo pipefail -c # working_directory: /go/src/github.com/hashicorp/vault # python: @@ -566,6 +598,29 @@ workflows: # export PATH=\"${PWD}\"/bin:${PATH} # make test-ui-browserstack # name: Run Browserstack Tests +# website-docker-image: +# docker: +# - image: circleci/buildpack-deps +# shell: /usr/bin/env bash -euo pipefail -c +# steps: +# - checkout +# - setup_remote_docker +# - run: +# command: | +# echo 'export PACKAGE_LOCK_CHANGED=$(git diff --name-only $(git log --pretty=format:'%h' -n1 HEAD~1)...HEAD | grep -c website/package-lock.json)' >> $BASH_ENV +# name: Diff package-lock.json +# - run: +# command: | +# if [ \"$CIRCLE_BRANCH\" = \"master\" ] && [ $PACKAGE_LOCK_CHANGED -gt 0 ]; then +# cd website/ +# docker build -t hashicorp/vault-website:$CIRCLE_SHA1 . +# docker tag hashicorp/vault-website:$CIRCLE_SHA1 hashicorp/vault-website:latest +# docker login -u $DOCKER_USER -p $DOCKER_PASS +# docker push hashicorp/vault-website +# else +# echo \"Not building a new website docker image - branch is not master and/or dependencies have not changed.\" +# fi +# name: Build Docker Image if Necessary # references: # cache: # go-sum: go-sum-v1-{{ checksum \"go.sum\" }} @@ -599,4 +654,5 @@ workflows: # - build-go-dev # - test-go-race: # requires: -# - build-go-dev \ No newline at end of file +# - build-go-dev +# - website-docker-image \ No newline at end of file diff --git a/.circleci/config/@config.yml b/.circleci/config/@config.yml index 54288b372515..47a6abb16f02 100644 --- a/.circleci/config/@config.yml +++ b/.circleci/config/@config.yml @@ -36,6 +36,8 @@ executors: node: docker: - image: *NODE_IMAGE + environment: + JOBS: 2 shell: /usr/bin/env bash -euo pipefail -c working_directory: /go/src/github.com/hashicorp/vault python: diff --git a/.circleci/config/jobs/website-docker-image.yml b/.circleci/config/jobs/website-docker-image.yml new file mode 100644 index 000000000000..fb7299853204 --- /dev/null +++ b/.circleci/config/jobs/website-docker-image.yml @@ -0,0 +1,22 @@ +docker: + - image: circleci/buildpack-deps +shell: /usr/bin/env bash -euo pipefail -c +steps: + - checkout + - setup_remote_docker + - run: + name: Diff package-lock.json + command: | + echo 'export PACKAGE_LOCK_CHANGED=$(git diff --name-only $(git log --pretty=format:'%h' -n1 HEAD~1)...HEAD | grep -c website/package-lock.json)' >> $BASH_ENV + - run: + name: Build Docker Image if Necessary + command: | + if [ "$CIRCLE_BRANCH" = "master" ] && [ $PACKAGE_LOCK_CHANGED -gt 0 ]; then + cd website/ + docker build -t hashicorp/vault-website:$CIRCLE_SHA1 . + docker tag hashicorp/vault-website:$CIRCLE_SHA1 hashicorp/vault-website:latest + docker login -u $DOCKER_USER -p $DOCKER_PASS + docker push hashicorp/vault-website + else + echo "Not building a new website docker image - branch is not master and/or dependencies have not changed." + fi diff --git a/.circleci/config/workflows/ci.yml b/.circleci/config/workflows/ci.yml index 8716556ccaab..af4ff61f7464 100644 --- a/.circleci/config/workflows/ci.yml +++ b/.circleci/config/workflows/ci.yml @@ -15,12 +15,13 @@ jobs: - install-ui-dependencies - build-go-dev filters: - branches: - # Forked pull requests have CIRCLE_BRANCH set to pull/XXX - ignore: /pull\/[0-9]+/ + branches: + # Forked pull requests have CIRCLE_BRANCH set to pull/XXX + ignore: /pull\/[0-9]+/ - test-go: requires: - build-go-dev - test-go-race: requires: - build-go-dev + - website-docker-image diff --git a/CHANGELOG.md b/CHANGELOG.md index f510048d87f1..f262b47014b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,6 @@ IMPROVEMENTS: -* auth/azure: Fix Azure compute client to use correct base URL [AZURE-27] * auth/jwt: Additional OIDC callback parameters available for CLI logins [JWT-80 & JWT-86] * auth/jwt: Bound claims may be optionally configured using globs [JWT-89] * core: Separate out service discovery interface from storage interface to allow @@ -10,19 +9,40 @@ IMPROVEMENTS: * cli: Incorrect TLS configuration will now correctly fail [GH-8025] * secrets/gcp: Allow specifying the TTL for a service key [GCP-54] * secrets/gcp: Add support for rotating root keys [GCP-53] +* secrets/nomad: Add support to specify TLS options per Nomad backend [GH-8083] +* storage/raft: Nodes in the raft cluster can all be given possible leader + addresses for them to continuously try and join one of them, thus automating + the process of join to a greater extent [GH-7856] BUG FIXES: -* plugin: Fix issue where a plugin unwrap request potentially used an expired token [GH-8058] -* secrets/database: Fix issue where a manual static role rotation could potentially panic [GH-8098] -* secrets/database/mysql: Fix issue where special characters for a MySQL password were encoded [GH-8040] * ui: Update headless Chrome flag to fix `yarn run test:oss` [GH-8035] -* ui: Change `.box-radio` height to min-height to prevent overflow issues [GH-8065] ## 1.3.2 (Unreleased) +IMPROVEMENTS: + * auth/aws: Add aws metadata to identity alias [GH-7975] + BUG FIXES: + +* auth/azure: Fix Azure compute client to use correct base URL [AZURE-27] +* auth/ldap: Fix renewal of tokens without cofigured policies that are + generated by an LDAP login [GH-8072] +* auth/okta: Fix renewal of tokens without configured policies that are + generated by an Okta login [GH-8072] +* plugin: Fix issue where a plugin unwrap request potentially used an expired token [GH-8058] +* replication: Fix issue where a forwarded request from a performance/standby node could run in + a timeout +* secrets/database: Fix issue where a manual static role rotation could potentially panic [GH-8098] +* secrets/database: Fix issue where a manual root credential rotation request is not forwarded + to the primary node [GH-8125] +* secrets/database: Fix issue where a manual static role rotation request is not forwarded + to the primary node [GH-8126] +* secrets/database/mysql: Fix issue where special characters for a MySQL password were encoded [GH-8040] * ui: Fix deleting namespaces [GH-8132] +* ui: Fix Error handler on kv-secret edit and kv-secret view pages [GH-8133] +* ui: Fix OIDC callback to check storage [GH-7929]. +* ui: Change `.box-radio` height to min-height to prevent overflow issues [GH-8065] ## 1.3.1 (December 18th, 2019) diff --git a/builtin/credential/aws/cli.go b/builtin/credential/aws/cli.go index 7096f6cb4feb..75046ec0d2a0 100644 --- a/builtin/credential/aws/cli.go +++ b/builtin/credential/aws/cli.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/helper/awsutil" ) @@ -40,7 +41,8 @@ func GenerateLoginData(creds *credentials.Credentials, headerValue, configuredRe // Use the credentials we've found to construct an STS session region, err := awsutil.GetRegion(configuredRegion) if err != nil { - return nil, err + hclog.Default().Warn(fmt.Sprintf("defaulting region to %q due to %s", awsutil.DefaultRegion, err.Error())) + region = awsutil.DefaultRegion } stsSession, err := session.NewSessionWithOptions(session.Options{ Config: aws.Config{ diff --git a/builtin/credential/ldap/path_login.go b/builtin/credential/ldap/path_login.go index a798a1c2497e..18123323e04f 100644 --- a/builtin/credential/ldap/path_login.go +++ b/builtin/credential/ldap/path_login.go @@ -133,9 +133,10 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f password := req.Auth.InternalData["password"].(string) loginPolicies, resp, groupNames, err := b.Login(ctx, req, username, password) - if len(loginPolicies) == 0 { + if err != nil || (resp != nil && resp.IsError()) { return resp, err } + finalPolicies := cfg.TokenPolicies if len(loginPolicies) > 0 { finalPolicies = append(finalPolicies, loginPolicies...) diff --git a/builtin/credential/okta/path_login.go b/builtin/credential/okta/path_login.go index 8538a7a3c957..b1fb6b857414 100644 --- a/builtin/credential/okta/path_login.go +++ b/builtin/credential/okta/path_login.go @@ -118,7 +118,7 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f } loginPolicies, resp, groupNames, err := b.Login(ctx, req, username, password) - if len(loginPolicies) == 0 { + if err != nil || (resp != nil && resp.IsError()) { return resp, err } diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 32d21a231181..56c99f0f15a7 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -62,6 +62,15 @@ func (b *backend) client(ctx context.Context, s logical.Storage) (*api.Client, e if conf.Token != "" { nomadConf.SecretID = conf.Token } + if conf.CACert != "" { + nomadConf.TLSConfig.CACertPEM = []byte(conf.CACert) + } + if conf.ClientCert != "" { + nomadConf.TLSConfig.ClientCertPEM = []byte(conf.ClientCert) + } + if conf.ClientKey != "" { + nomadConf.TLSConfig.ClientKeyPEM = []byte(conf.ClientKey) + } } client, err := api.NewClient(nomadConf) diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index 3943ae932834..3044394d4ca3 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -28,6 +28,21 @@ func pathConfigAccess(b *backend) *framework.Path { Type: framework.TypeInt, Description: "Max length for name of generated Nomad tokens", }, + "ca_cert": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `CA certificate to use when verifying Nomad server certificate, +must be x509 PEM encoded.`, + }, + "client_cert": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Client certificate used for Nomad's TLS communication, +must be x509 PEM encoded and if this is set you need to also set client_key.`, + }, + "client_key": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Client key used for Nomad's TLS communication, +must be x509 PEM encoded and if this is set you need to also set client_cert.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -101,6 +116,18 @@ func (b *backend) pathConfigAccessWrite(ctx context.Context, req *logical.Reques if ok { conf.Token = token.(string) } + caCert, ok := data.GetOk("ca_cert") + if ok { + conf.CACert = caCert.(string) + } + clientCert, ok := data.GetOk("client_cert") + if ok { + conf.ClientCert = clientCert.(string) + } + clientKey, ok := data.GetOk("client_key") + if ok { + conf.ClientKey = clientKey.(string) + } conf.MaxTokenNameLength = data.Get("max_token_name_length").(int) @@ -126,4 +153,7 @@ type accessConfig struct { Address string `json:"address"` Token string `json:"token"` MaxTokenNameLength int `json:"max_token_name_length"` + CACert string `json:"ca_cert"` + ClientCert string `json:"client_cert"` + ClientKey string `json:"client_key"` } diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go index 62460e1180e1..065fdf30ba3c 100644 --- a/builtin/logical/ssh/backend_test.go +++ b/builtin/logical/ssh/backend_test.go @@ -35,6 +35,7 @@ const ( testOTPKeyType = "otp" testDynamicKeyType = "dynamic" testCIDRList = "127.0.0.1/32" + testAtRoleName = "test@RoleName" testDynamicRoleName = "testDynamicRoleName" testOTPRoleName = "testOTPRoleName" testKeyName = "testKeyName" @@ -256,6 +257,7 @@ func TestSSHBackend_Lookup(t *testing.T) { resp2 := []string{testOTPRoleName} resp3 := []string{testDynamicRoleName, testOTPRoleName} resp4 := []string{testDynamicRoleName} + resp5 := []string{testAtRoleName} logicaltest.Test(t, logicaltest.TestCase{ AcceptanceTest: true, LogicalFactory: testingFactory, @@ -270,6 +272,10 @@ func TestSSHBackend_Lookup(t *testing.T) { testLookupRead(t, data, resp4), testRoleDelete(t, testDynamicRoleName), testLookupRead(t, data, resp1), + testRoleWrite(t, testAtRoleName, testDynamicRoleData), + testLookupRead(t, data, resp5), + testRoleDelete(t, testAtRoleName), + testLookupRead(t, data, resp1), }, }) } @@ -289,12 +295,29 @@ func TestSSHBackend_RoleList(t *testing.T) { }, }, } + resp3 := map[string]interface{}{ + "keys": []string{testAtRoleName, testOTPRoleName}, + "key_info": map[string]interface{}{ + testOTPRoleName: map[string]interface{}{ + "key_type": testOTPKeyType, + }, + testAtRoleName: map[string]interface{}{ + "key_type": testOTPKeyType, + }, + }, + } logicaltest.Test(t, logicaltest.TestCase{ LogicalFactory: testingFactory, Steps: []logicaltest.TestStep{ testRoleList(t, resp1), testRoleWrite(t, testOTPRoleName, testOTPRoleData), testRoleList(t, resp2), + testRoleWrite(t, testAtRoleName, testOTPRoleData), + testRoleList(t, resp3), + testRoleDelete(t, testAtRoleName), + testRoleList(t, resp2), + testRoleDelete(t, testOTPRoleName), + testRoleList(t, resp1), }, }) } @@ -319,6 +342,8 @@ func TestSSHBackend_DynamicKeyCreate(t *testing.T) { testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), testRoleWrite(t, testDynamicRoleName, testDynamicRoleData), testCredsWrite(t, testDynamicRoleName, data, false), + testRoleWrite(t, testAtRoleName, testDynamicRoleData), + testCredsWrite(t, testAtRoleName, data, false), }, }) } @@ -343,6 +368,10 @@ func TestSSHBackend_OTPRoleCrud(t *testing.T) { testRoleRead(t, testOTPRoleName, respOTPRoleData), testRoleDelete(t, testOTPRoleName), testRoleRead(t, testOTPRoleName, nil), + testRoleWrite(t, testAtRoleName, testOTPRoleData), + testRoleRead(t, testAtRoleName, respOTPRoleData), + testRoleDelete(t, testAtRoleName), + testRoleRead(t, testAtRoleName, nil), }, }) } @@ -374,6 +403,10 @@ func TestSSHBackend_DynamicRoleCrud(t *testing.T) { testRoleRead(t, testDynamicRoleName, respDynamicRoleData), testRoleDelete(t, testDynamicRoleName), testRoleRead(t, testDynamicRoleName, nil), + testRoleWrite(t, testAtRoleName, testDynamicRoleData), + testRoleRead(t, testAtRoleName, respDynamicRoleData), + testRoleDelete(t, testAtRoleName), + testRoleRead(t, testAtRoleName, nil), }, }) } @@ -405,6 +438,8 @@ func TestSSHBackend_OTPCreate(t *testing.T) { Steps: []logicaltest.TestStep{ testRoleWrite(t, testOTPRoleName, testOTPRoleData), testCredsWrite(t, testOTPRoleName, data, false), + testRoleWrite(t, testAtRoleName, testOTPRoleData), + testCredsWrite(t, testAtRoleName, data, false), }, }) } @@ -1108,14 +1143,17 @@ func testRoleRead(t *testing.T, roleName string, expected map[string]interface{} if err := mapstructure.Decode(resp.Data, &d); err != nil { return fmt.Errorf("error decoding response:%s", err) } - if roleName == testOTPRoleName { + switch d.KeyType { + case "otp": if d.KeyType != expected["key_type"] || d.DefaultUser != expected["default_user"] || d.CIDRList != expected["cidr_list"] { return fmt.Errorf("data mismatch. bad: %#v", resp) } - } else { + case "dynamic": if d.AdminUser != expected["admin_user"] || d.CIDRList != expected["cidr_list"] || d.KeyName != expected["key"] || d.KeyType != expected["key_type"] { return fmt.Errorf("data mismatch. bad: %#v", resp) } + default: + return fmt.Errorf("unknown key type. bad: %#v", resp) } return nil }, diff --git a/builtin/logical/ssh/path_creds_create.go b/builtin/logical/ssh/path_creds_create.go index b9b9a6410405..f6fa7e766043 100644 --- a/builtin/logical/ssh/path_creds_create.go +++ b/builtin/logical/ssh/path_creds_create.go @@ -20,7 +20,7 @@ type sshOTP struct { func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ - Pattern: "creds/" + framework.GenericNameRegex("role"), + Pattern: "creds/" + framework.GenericNameWithAtRegex("role"), Fields: map[string]*framework.FieldSchema{ "role": &framework.FieldSchema{ Type: framework.TypeString, diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index 450d76844779..6cd20cba6aa5 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -69,7 +69,7 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("role"), + Pattern: "roles/" + framework.GenericNameWithAtRegex("role"), Fields: map[string]*framework.FieldSchema{ "role": &framework.FieldSchema{ Type: framework.TypeString, diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go index a64edfa2d9a8..95e12af1a42d 100644 --- a/builtin/logical/ssh/path_sign.go +++ b/builtin/logical/ssh/path_sign.go @@ -37,7 +37,7 @@ type creationBundle struct { func pathSign(b *backend) *framework.Path { return &framework.Path{ - Pattern: "sign/" + framework.GenericNameRegex("role"), + Pattern: "sign/" + framework.GenericNameWithAtRegex("role"), Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathSign, diff --git a/command/seal_migration_test.go b/command/seal_migration_test.go index ca1d5693cf1b..c4e724132ab9 100644 --- a/command/seal_migration_test.go +++ b/command/seal_migration_test.go @@ -7,19 +7,125 @@ import ( "encoding/base64" "testing" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/shamir" + + "github.com/hashicorp/go-hclog" wrapping "github.com/hashicorp/go-kms-wrapping" aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" - "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" physInmem "github.com/hashicorp/vault/sdk/physical/inmem" - "github.com/hashicorp/vault/shamir" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" ) +func TestSealMigrationAutoToShamir(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name()) + phys, err := physInmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + haPhys, err := physInmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + autoSeal := vault.NewAutoSeal(seal.NewTestSeal(nil)) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + Seal: autoSeal, + Physical: phys, + HAPhysical: haPhys.(physical.HABackend), + DisableSealWrap: true, + }, &vault.TestClusterOptions{ + Logger: logger, + HandlerFunc: vaulthttp.Handler, + SkipInit: true, + NumCores: 1, + }) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + initResp, err := client.Sys().Init(&api.InitRequest{ + RecoveryShares: 5, + RecoveryThreshold: 3, + }) + if err != nil { + t.Fatal(err) + } + + testhelpers.WaitForActiveNode(t, cluster) + + keys := initResp.RecoveryKeysB64 + rootToken := initResp.RootToken + core := cluster.Cores[0].Core + + client.SetToken(rootToken) + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + + shamirSeal := vault.NewDefaultSeal(&seal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: logger.Named("shamir"), + }), + }) + shamirSeal.SetCore(core) + + if err := adjustCoreForSealMigration(logger, core, shamirSeal, autoSeal); err != nil { + t.Fatal(err) + } + + var resp *api.SealStatusResponse + unsealOpts := &api.UnsealOpts{} + for _, key := range keys { + unsealOpts.Key = key + unsealOpts.Migrate = false + resp, err = client.Sys().UnsealWithOptions(unsealOpts) + if err == nil { + t.Fatal("expected error due to lack of migrate parameter") + } + unsealOpts.Migrate = true + resp, err = client.Sys().UnsealWithOptions(unsealOpts) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected response") + } + if !resp.Sealed { + break + } + } + if resp.Sealed { + t.Fatalf("expected unsealed state; got %#v", *resp) + } + + // Seal and unseal again to verify that things are working fine + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + unsealOpts.Migrate = false + for _, key := range keys { + unsealOpts.Key = key + resp, err = client.Sys().UnsealWithOptions(unsealOpts) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected response") + } + if !resp.Sealed { + break + } + } + if resp.Sealed { + t.Fatalf("expected unsealed state; got %#v", *resp) + } +} + func TestSealMigration(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name()) phys, err := physInmem.NewInmem(nil, logger) @@ -30,13 +136,13 @@ func TestSealMigration(t *testing.T) { if err != nil { t.Fatal(err) } - shamirwrapper := vault.NewDefaultSeal(&seal.Access{ + wrapper := vault.NewDefaultSeal(&seal.Access{ Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ Logger: logger.Named("shamir"), }), }) coreConfig := &vault.CoreConfig{ - Seal: shamirwrapper, + Seal: wrapper, Physical: phys, HAPhysical: haPhys.(physical.HABackend), DisableSealWrap: true, @@ -149,6 +255,41 @@ func TestSealMigration(t *testing.T) { t.Fatalf("expected unsealed state; got %#v", *resp) } + // Make sure the seal configs were updated correctly + b, err := autoSeal.BarrierConfig(context.Background()) + if err != nil { + t.Fatal(err) + } + if b.Type != autoSeal.BarrierType() { + t.Fatalf("bad seal config: %#v", b) + } + if b.SecretShares != 1 { + t.Fatalf("bad seal config: %#v", b) + } + if b.SecretThreshold != 1 { + t.Fatalf("bad seal config: %#v", b) + } + if b.StoredShares != 1 { + t.Fatalf("bad seal config: %#v", b) + } + + r, err := autoSeal.RecoveryConfig(context.Background()) + if err != nil { + t.Fatal(err) + } + if r.Type != wrapping.Shamir { + t.Fatalf("bad seal config: %#v", r) + } + if r.SecretShares != 2 { + t.Fatalf("bad seal config: %#v", r) + } + if r.SecretThreshold != 2 { + t.Fatalf("bad seal config: %#v", r) + } + if r.StoredShares != 0 { + t.Fatalf("bad seal config: %#v", r) + } + cluster.Cleanup() cluster.Cores = nil } @@ -243,6 +384,41 @@ func TestSealMigration(t *testing.T) { t.Fatalf("expected unsealed state; got %#v", *resp) } + // Make sure the seal configs were updated correctly + b, err := altSeal.BarrierConfig(context.Background()) + if err != nil { + t.Fatal(err) + } + if b.Type != altSeal.BarrierType() { + t.Fatalf("bad seal config: %#v", b) + } + if b.SecretShares != 1 { + t.Fatalf("bad seal config: %#v", b) + } + if b.SecretThreshold != 1 { + t.Fatalf("bad seal config: %#v", b) + } + if b.StoredShares != 1 { + t.Fatalf("bad seal config: %#v", b) + } + + r, err := altSeal.RecoveryConfig(context.Background()) + if err != nil { + t.Fatal(err) + } + if r.Type != wrapping.Shamir { + t.Fatalf("bad seal config: %#v", r) + } + if r.SecretShares != 2 { + t.Fatalf("bad seal config: %#v", r) + } + if r.SecretThreshold != 2 { + t.Fatalf("bad seal config: %#v", r) + } + if r.StoredShares != 0 { + t.Fatalf("bad seal config: %#v", r) + } + cluster.Cleanup() cluster.Cores = nil } @@ -257,7 +433,13 @@ func TestSealMigration(t *testing.T) { core := cluster.Cores[0].Core - if err := adjustCoreForSealMigration(logger, core, shamirwrapper, altSeal); err != nil { + wrapper := vault.NewDefaultSeal(&seal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: logger.Named("shamir"), + }), + }) + + if err := adjustCoreForSealMigration(logger, core, wrapper, altSeal); err != nil { t.Fatal(err) } @@ -286,6 +468,29 @@ func TestSealMigration(t *testing.T) { t.Fatalf("expected unsealed state; got %#v", *resp) } + // Make sure the seal configs were updated correctly + b, err := wrapper.BarrierConfig(context.Background()) + if err != nil { + t.Fatal(err) + } + if b.Type != wrapping.Shamir { + t.Fatalf("bad seal config: %#v", b) + } + if b.SecretShares != 2 { + t.Fatalf("bad seal config: %#v", b) + } + if b.SecretThreshold != 2 { + t.Fatalf("bad seal config: %#v", b) + } + if b.StoredShares != 1 { + t.Fatalf("bad seal config: %#v", b) + } + + _, err = wrapper.RecoveryConfig(context.Background()) + if err == nil { + t.Fatal("expected error") + } + cluster.Cleanup() cluster.Cores = nil } @@ -293,7 +498,7 @@ func TestSealMigration(t *testing.T) { { logger.SetLevel(hclog.Trace) logger.Info("integ: verify autoseal is off and the expected key shares work") - coreConfig.Seal = shamirwrapper + coreConfig.Seal = wrapper cluster := vault.NewTestCluster(t, coreConfig, clusterConfig) cluster.Start() defer cluster.Cleanup() diff --git a/command/server.go b/command/server.go index add994b7dd4f..b586e7a86d4d 100644 --- a/command/server.go +++ b/command/server.go @@ -1505,6 +1505,15 @@ CLUSTER_SYNTHESIS_COMPLETE: }() } + // When the underlying storage is raft, kick off retry join if it was specified + // in the configuration + if config.Storage.Type == "raft" { + if err := core.InitiateRetryJoin(context.Background()); err != nil { + c.UI.Error(fmt.Sprintf("Failed to initiate raft retry join, %q", err.Error())) + return 1 + } + } + // Perform service discovery registrations and initialization of // HTTP server after the verifyOnly check. diff --git a/command/server/config.go b/command/server/config.go index e78656aec1e6..6c6b7963691e 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -3,6 +3,7 @@ package server import ( "errors" "fmt" + "github.com/hashicorp/vault/sdk/helper/jsonutil" "io" "io/ioutil" "os" @@ -730,11 +731,25 @@ func ParseStorage(result *Config, list *ast.ObjectList, name string) error { key = item.Keys[0].Token.Value().(string) } - var m map[string]string - if err := hcl.DecodeObject(&m, item.Val); err != nil { + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) } + m := make(map[string]string) + for key, val := range config { + valStr, ok := val.(string) + if ok { + m[key] = valStr + continue + } + valBytes, err := jsonutil.EncodeJSON(val) + if err != nil { + return err + } + m[key] = string(valBytes) + } + // Pull out the redirect address since it's common to all backends var redirectAddr string if v, ok := m["redirect_addr"]; ok { diff --git a/command/server/config_test.go b/command/server/config_test.go index 808df7bffe58..e287a1a1251d 100644 --- a/command/server/config_test.go +++ b/command/server/config_test.go @@ -37,3 +37,7 @@ func TestParseListeners(t *testing.T) { func TestParseEntropy(t *testing.T) { testParseEntropy(t, true) } + +func TestConfigRaftRetryJoin(t *testing.T) { + testConfigRaftRetryJoin(t) +} diff --git a/command/server/config_test_helpers.go b/command/server/config_test_helpers.go index b639a7e01b84..54f479a158d0 100644 --- a/command/server/config_test_helpers.go +++ b/command/server/config_test_helpers.go @@ -12,6 +12,38 @@ import ( "github.com/hashicorp/hcl/hcl/ast" ) +func testConfigRaftRetryJoin(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/raft_retry_join.hcl") + if err != nil { + t.Fatal(err) + } + retryJoinConfig := `[{"leader_api_addr":"http://127.0.0.1:8200"},{"leader_api_addr":"http://127.0.0.2:8200"},{"leader_api_addr":"http://127.0.0.3:8200"}]` + "\n" + expected := &Config{ + Listeners: []*Listener{ + { + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:8200", + }, + }, + }, + + Storage: &Storage{ + Type: "raft", + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "retry_join": retryJoinConfig, + }, + }, + DisableMlock: true, + DisableMlockRaw: true, + } + if !reflect.DeepEqual(config, expected) { + t.Fatalf("\nexpected: %#v\n actual:%#v\n", config, expected) + } +} + func testLoadConfigFile_topLevel(t *testing.T, entropy *Entropy) { config, err := LoadConfigFile("./test-fixtures/config2.hcl") if err != nil { diff --git a/command/server/seal/server_seal_awskms.go b/command/server/seal/server_seal_awskms.go index 2d5b71ea583e..173fbaadf7e2 100644 --- a/command/server/seal/server_seal_awskms.go +++ b/command/server/seal/server_seal_awskms.go @@ -3,6 +3,7 @@ package seal import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/go-kms-wrapping/wrappers/awskms" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/sdk/logical" @@ -10,9 +11,14 @@ import ( "github.com/hashicorp/vault/vault/seal" ) -func configureAWSKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger hclog.Logger, inseal vault.Seal) (vault.Seal, error) { +var getAWSKMSFunc = func(opts *wrapping.WrapperOptions, config map[string]string) (wrapping.Wrapper, map[string]string, error) { kms := awskms.NewWrapper(nil) - kmsInfo, err := kms.SetConfig(configSeal.Config) + kmsInfo, err := kms.SetConfig(config) + return kms, kmsInfo, err +} + +func configureAWSKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger hclog.Logger, inseal vault.Seal) (vault.Seal, error) { + kms, kmsInfo, err := getAWSKMSFunc(nil, configSeal.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { diff --git a/command/server/seal/server_seal_transit.go b/command/server/seal/server_seal_transit.go index 0a9bc1e7a689..e8838d98a3a1 100644 --- a/command/server/seal/server_seal_transit.go +++ b/command/server/seal/server_seal_transit.go @@ -11,11 +11,17 @@ import ( "github.com/hashicorp/vault/vault/seal" ) +var GetTransitKMSFunc = func(opts *wrapping.WrapperOptions, config map[string]string) (wrapping.Wrapper, map[string]string, error) { + transitSeal := transit.NewWrapper(opts) + sealInfo, err := transitSeal.SetConfig(config) + return transitSeal, sealInfo, err +} + func configureTransitSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) { - transitSeal := transit.NewWrapper(&wrapping.WrapperOptions{ - Logger: logger.ResetNamed("seal-transit"), - }) - sealInfo, err := transitSeal.SetConfig(configSeal.Config) + transitSeal, sealInfo, err := GetTransitKMSFunc( + &wrapping.WrapperOptions{ + Logger: logger.ResetNamed("seal-transit"), + }, configSeal.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { diff --git a/command/server/seal/server_seal_transit_acc_test.go b/command/server/seal/server_seal_transit_acc_test.go index 5b469c04109c..2c13958fa775 100644 --- a/command/server/seal/server_seal_transit_acc_test.go +++ b/command/server/seal/server_seal_transit_acc_test.go @@ -10,9 +10,9 @@ import ( "testing" "time" - "github.com/hashicorp/go-kms-wrapping/wrappers/transit" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/server/seal" "github.com/ory/dockertest" ) @@ -29,8 +29,8 @@ func TestTransitWrapper_Lifecycle(t *testing.T) { "mount_path": mountPath, "key_name": keyName, } - s := transit.NewWrapper(nil) - _, err := s.SetConfig(wrapperConfig) + + s, _, err := seal.GetTransitKMSFunc(nil, wrapperConfig) if err != nil { t.Fatalf("error setting wrapper config: %v", err) } @@ -86,8 +86,7 @@ func TestTransitSeal_TokenRenewal(t *testing.T) { "mount_path": mountPath, "key_name": keyName, } - s := transit.NewWrapper(nil) - _, err = s.SetConfig(wrapperConfig) + s, _, err := seal.GetTransitKMSFunc(nil, wrapperConfig) if err != nil { t.Fatalf("error setting wrapper config: %v", err) } diff --git a/command/server/test-fixtures/raft_retry_join.hcl b/command/server/test-fixtures/raft_retry_join.hcl new file mode 100644 index 000000000000..a4f1f3df0139 --- /dev/null +++ b/command/server/test-fixtures/raft_retry_join.hcl @@ -0,0 +1,19 @@ +storage "raft" { + path = "/storage/path/raft" + node_id = "raft1" + retry_join = [ + { + "leader_api_addr" = "http://127.0.0.1:8200" + }, + { + "leader_api_addr" = "http://127.0.0.2:8200" + }, + { + "leader_api_addr" = "http://127.0.0.3:8200" + } + ] +} +listener "tcp" { + address = "127.0.0.1:8200" +} +disable_mlock = true diff --git a/command/server_util.go b/command/server_util.go index 0098dfbf586a..6d58028bac12 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -54,9 +54,6 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal } } - var existSeal vault.Seal - var newSeal vault.Seal - if existBarrierSealConfig.Type == barrierSeal.BarrierType() { // In this case our migration seal is set so we are using it // (potentially) for unwrapping. Set it on core for that purpose then @@ -69,15 +66,49 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal return errors.New(`Recovery seal configuration not found for existing seal`) } + if onEnterprise && barrierSeal.BarrierType() == wrapping.Shamir { + return errors.New("Migrating from autoseal to Shamir seal is not currently supported on Vault Enterprise") + } + + var migrationSeal vault.Seal + var newSeal vault.Seal + + // Determine the migrationSeal. This is either going to be an instance of + // shamir or the unwrapSeal. switch existBarrierSealConfig.Type { case wrapping.Shamir: // The value reflected in config is what we're going to - existSeal = vault.NewDefaultSeal(&vaultseal.Access{ + migrationSeal = vault.NewDefaultSeal(&vaultseal.Access{ Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ Logger: logger.Named("shamir"), }), }) - newSeal = barrierSeal + + default: + // If we're not coming from Shamir we expect the previous seal to be + // in the config and disabled. + migrationSeal = unwrapSeal + } + + // newSeal will be the barrierSeal + newSeal = barrierSeal + + // Set the appropriate barrier and recovery configs. + switch { + case migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported(): + // Migrating from auto->auto, copy the configs over + newSeal.SetCachedBarrierConfig(existBarrierSealConfig) + newSeal.SetCachedRecoveryConfig(existRecoverySealConfig) + case migrationSeal.RecoveryKeySupported(): + // Migrating from auto->shamir, clone auto's recovery config and set + // stored keys to 1. + newSealConfig := existRecoverySealConfig.Clone() + newSealConfig.StoredShares = 1 + newSeal.SetCachedBarrierConfig(newSealConfig) + case newSeal.RecoveryKeySupported(): + // Migrating from shamir->auto, set a new barrier config and set + // recovery config to a clone of shamir's barrier config with stored + // keys set to 0. newBarrierSealConfig := &vault.SealConfig{ Type: newSeal.BarrierType(), SecretShares: 1, @@ -85,21 +116,13 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal StoredShares: 1, } newSeal.SetCachedBarrierConfig(newBarrierSealConfig) - newSeal.SetCachedRecoveryConfig(existBarrierSealConfig) - - default: - if onEnterprise && barrierSeal.BarrierType() == wrapping.Shamir { - return errors.New("Migrating from autoseal to Shamir seal is not currently supported on Vault Enterprise") - } - // If we're not coming from Shamir we expect the previous seal to be - // in the config and disabled. - existSeal = unwrapSeal - newSeal = barrierSeal - newSeal.SetCachedBarrierConfig(existRecoverySealConfig) + newRecoveryConfig := existBarrierSealConfig.Clone() + newRecoveryConfig.StoredShares = 0 + newSeal.SetCachedRecoveryConfig(newRecoveryConfig) } - core.SetSealsForMigration(existSeal, newSeal, unwrapSeal) + core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal) return nil } diff --git a/go.mod b/go.mod index fa0acc2259e4..92a7b4628b25 100644 --- a/go.mod +++ b/go.mod @@ -59,14 +59,14 @@ require ( github.com/hashicorp/go-msgpack v0.5.5 github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a - github.com/hashicorp/go-rootcerts v1.0.1 + github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/gokrb5 v7.3.1-0.20191209171754-1a6fa9886ec3+incompatible github.com/hashicorp/golang-lru v0.5.3 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf + github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190814210027-93970f08f2ec diff --git a/go.sum b/go.sum index 2275b2b2b398..c680772f4c0b 100644 --- a/go.sum +++ b/go.sum @@ -265,6 +265,8 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c h1:Lh2aW+HnU2Nbe1gqD9SOJLJxW1jBMmQOktN2acDyJk8= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -326,6 +328,8 @@ github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6 github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -355,8 +359,8 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.4 h1:gkyML/r71w3FL8gUi74Vk76avkj/9lYAY9lvg0OcoGs= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf h1:U/40PQvWkaXCDdK9QHKf1pVDVcA+NIDVbzzonFGkgIA= -github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf/go.mod h1:BDngVi1f4UA6aJq9WYTgxhfWSE1+42xshvstLU2fRGk= +github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d h1:BXqsASWhyiAiEVm6FcltF0dg8XvoookQwmpHn8lstu8= +github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d/go.mod h1:WKCL+tLVhN1D+APwH3JiTRZoxcdwRk86bWu1LVCUPaE= github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 h1:p+2EISNdFCnD9R+B4xCiqSn429MCFtvM41aHJDJ6qW4= github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= diff --git a/helper/testhelpers/mysql/mysqlhelper.go b/helper/testhelpers/mysql/mysqlhelper.go new file mode 100644 index 000000000000..d5a26ffbc946 --- /dev/null +++ b/helper/testhelpers/mysql/mysqlhelper.go @@ -0,0 +1,67 @@ +package mysqlhelper + +import ( + "database/sql" + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/ory/dockertest" +) + +func PrepareMySQLTestContainer(t *testing.T, legacy bool, pw string) (cleanup func(), retURL string) { + if os.Getenv("MYSQL_URL") != "" { + return func() {}, os.Getenv("MYSQL_URL") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + imageVersion := "5.7" + if legacy { + imageVersion = "5.6" + } + + resource, err := pool.Run("mysql", imageVersion, []string{"MYSQL_ROOT_PASSWORD=" + pw}) + if err != nil { + t.Fatalf("Could not start local MySQL docker container: %s", err) + } + + cleanup = func() { + docker.CleanupResource(t, pool, resource) + } + + retURL = fmt.Sprintf("root:%s@(localhost:%s)/mysql?parseTime=true", pw, resource.GetPort("3306/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + var db *sql.DB + db, err = sql.Open("mysql", retURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() + }); err != nil { + cleanup() + t.Fatalf("Could not connect to MySQL docker container: %s", err) + } + + return +} + +func TestCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + connURL = strings.Replace(connURL, "root:secret", fmt.Sprintf("%s:%s", username, password), 1) + db, err := sql.Open("mysql", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index d6cf76c60932..1a657d58ec2c 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -378,11 +378,19 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { vault.TestWaitActive(t, leaderCore.Core) } + leaderInfo := &raft.LeaderJoinInfo{ + LeaderAPIAddr: leaderAPI, + TLSConfig: leaderCore.TLSConfig, + } + // Join core1 { core := cluster.Cores[1] core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderAPI, leaderCore.TLSConfig, false, false) + leaderInfos := []*raft.LeaderJoinInfo{ + leaderInfo, + } + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) if err != nil { t.Fatal(err) } @@ -394,7 +402,10 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { { core := cluster.Cores[2] core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderAPI, leaderCore.TLSConfig, false, false) + leaderInfos := []*raft.LeaderJoinInfo{ + leaderInfo, + } + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) if err != nil { t.Fatal(err) } diff --git a/http/sys_raft.go b/http/sys_raft.go index 75d6ccf96b20..09828a3d6bf4 100644 --- a/http/sys_raft.go +++ b/http/sys_raft.go @@ -4,6 +4,7 @@ import ( "context" "crypto/tls" "errors" + "github.com/hashicorp/vault/physical/raft" "io" "net/http" @@ -44,7 +45,14 @@ func handleSysRaftJoinPost(core *vault.Core, w http.ResponseWriter, r *http.Requ } } - joined, err := core.JoinRaftCluster(context.Background(), req.LeaderAPIAddr, tlsConfig, req.Retry, req.NonVoter) + leaderInfos := []*raft.LeaderJoinInfo{ + { + LeaderAPIAddr: req.LeaderAPIAddr, + TLSConfig: tlsConfig, + Retry: req.Retry, + }, + } + joined, err := core.JoinRaftCluster(context.Background(), leaderInfos, req.NonVoter) if err != nil { respondError(w, http.StatusInternalServerError, err) return diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 2bee74e96dea..88c1c7b11f35 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -2,8 +2,11 @@ package raft import ( "context" + "crypto/tls" "errors" "fmt" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/tlsutil" "io" "io/ioutil" "os" @@ -107,6 +110,64 @@ type RaftBackend struct { permitPool *physical.PermitPool } +// LeaderJoinInfo contains information required by a node to join itself as a +// follower to an existing raft cluster +type LeaderJoinInfo struct { + // LeaderAPIAddr is the address of the leader node to connect to + LeaderAPIAddr string `json:"leader_api_addr"` + + // LeaderCACert is the CA cert of the leader node + LeaderCACert string `json:"leader_ca_cert"` + + // LeaderClientCert is the client certificate for the follower node to establish + // client authentication during TLS + LeaderClientCert string `json:"leader_client_cert"` + + // LeaderClientKey is the client key for the follower node to establish client + // authentication during TLS + LeaderClientKey string `json:"leader_client_key"` + + // Retry indicates if the join process should automatically be retried + Retry bool `json:"-"` + + // TLSConfig for the API client to use when communicating with the leader node + TLSConfig *tls.Config `json:"-"` +} + +// JoinConfig returns a list of information about possible leader nodes that +// this node can join as a follower +func (b *RaftBackend) JoinConfig() ([]*LeaderJoinInfo, error) { + config := b.conf["retry_join"] + if config == "" { + return nil, nil + } + + var leaderInfos []*LeaderJoinInfo + err := jsonutil.DecodeJSON([]byte(config), &leaderInfos) + if err != nil { + return nil, errwrap.Wrapf("failed to decode retry_join config: {{err}}", err) + } + + if len(leaderInfos) == 0 { + return nil, errors.New("invalid retry_join config") + } + + for _, info := range leaderInfos { + info.Retry = true + var tlsConfig *tls.Config + var err error + if len(info.LeaderCACert) != 0 || len(info.LeaderClientCert) != 0 || len(info.LeaderClientKey) != 0 { + tlsConfig, err = tlsutil.ClientTLSConfig([]byte(info.LeaderCACert), []byte(info.LeaderClientCert), []byte(info.LeaderClientKey)) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("failed to create tls config to communicate with leader node %q: {{err}}", info.LeaderAPIAddr), err) + } + } + info.TLSConfig = tlsConfig + } + + return leaderInfos, nil +} + // EnsurePath is used to make sure a path exists func EnsurePath(path string, dir bool) error { if !dir { @@ -442,14 +503,8 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { case opts.ClusterListener == nil: return errors.New("no cluster listener provided") default: - // Load the base TLS config from the cluster listener. - baseTLSConfig, err := opts.ClusterListener.TLSConfig(ctx) - if err != nil { - return err - } - // Set the local address and localID in the streaming layer and the raft config. - streamLayer, err := NewRaftLayer(b.logger.Named("stream"), opts.TLSKeyring, opts.ClusterListener.Addr(), baseTLSConfig) + streamLayer, err := NewRaftLayer(b.logger.Named("stream"), opts.TLSKeyring, opts.ClusterListener) if err != nil { return err } diff --git a/physical/raft/streamlayer.go b/physical/raft/streamlayer.go index e1fcedbda4b4..fcf0a0be57f8 100644 --- a/physical/raft/streamlayer.go +++ b/physical/raft/streamlayer.go @@ -110,7 +110,7 @@ func GenerateTLSKey(reader io.Reader) (*TLSKey, error) { KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, SerialNumber: big.NewInt(mathrand.Int63()), NotBefore: time.Now().Add(-30 * time.Second), - // 30 years of single-active uptime ought to be enough for anybody + // 30 years ought to be enough for anybody NotAfter: time.Now().Add(262980 * time.Hour), BasicConstraintsValid: true, IsCA: true, @@ -162,13 +162,14 @@ type raftLayer struct { dialerFunc func(string, time.Duration) (net.Conn, error) // TLS config - keyring *TLSKeyring - baseTLSConfig *tls.Config + keyring *TLSKeyring + clusterListener cluster.ClusterHook } // NewRaftLayer creates a new raftLayer object. It parses the TLS information // from the network config. -func NewRaftLayer(logger log.Logger, raftTLSKeyring *TLSKeyring, clusterAddr net.Addr, baseTLSConfig *tls.Config) (*raftLayer, error) { +func NewRaftLayer(logger log.Logger, raftTLSKeyring *TLSKeyring, clusterListener cluster.ClusterHook) (*raftLayer, error) { + clusterAddr := clusterListener.Addr() switch { case clusterAddr == nil: // Clustering disabled on the server, don't try to look for params @@ -176,11 +177,11 @@ func NewRaftLayer(logger log.Logger, raftTLSKeyring *TLSKeyring, clusterAddr net } layer := &raftLayer{ - addr: clusterAddr, - connCh: make(chan net.Conn), - closeCh: make(chan struct{}), - logger: logger, - baseTLSConfig: baseTLSConfig, + addr: clusterAddr, + connCh: make(chan net.Conn), + closeCh: make(chan struct{}), + logger: logger, + clusterListener: clusterListener, } if err := layer.setTLSKeyring(raftTLSKeyring); err != nil { @@ -236,6 +237,24 @@ func (l *raftLayer) setTLSKeyring(keyring *TLSKeyring) error { return nil } +func (l *raftLayer) ServerName() string { + key := l.keyring.GetActive() + if key == nil { + return "" + } + + return key.parsedCert.Subject.CommonName +} + +func (l *raftLayer) CACert(ctx context.Context) *x509.Certificate { + key := l.keyring.GetActive() + if key == nil { + return nil + } + + return key.parsedCert +} + func (l *raftLayer) ClientLookup(ctx context.Context, requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) { for _, subj := range requestInfo.AcceptableCAs { for _, key := range l.keyring.Keys { @@ -346,26 +365,6 @@ func (l *raftLayer) Addr() net.Addr { // Dial is used to create a new outgoing connection func (l *raftLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error) { - - tlsConfig := l.baseTLSConfig.Clone() - - key := l.keyring.GetActive() - if key == nil { - return nil, errors.New("no active key") - } - - tlsConfig.NextProtos = []string{consts.RaftStorageALPN} - tlsConfig.ServerName = key.parsedCert.Subject.CommonName - - l.logger.Debug("creating rpc dialer", "host", tlsConfig.ServerName) - - pool := x509.NewCertPool() - pool.AddCert(key.parsedCert) - tlsConfig.RootCAs = pool - tlsConfig.ClientCAs = pool - - dialer := &net.Dialer{ - Timeout: timeout, - } - return tls.DialWithDialer(dialer, "tcp", string(address), tlsConfig) + dialFunc := l.clusterListener.GetDialerFunc(context.Background(), consts.RaftStorageALPN) + return dialFunc(string(address), timeout) } diff --git a/plugins/database/mysql/mysql_test.go b/plugins/database/mysql/mysql_test.go index 8f7ea22ade66..3c39b44acc36 100644 --- a/plugins/database/mysql/mysql_test.go +++ b/plugins/database/mysql/mysql_test.go @@ -3,69 +3,22 @@ package mysql import ( "context" "database/sql" - "fmt" - "os" "strings" "testing" "time" stdmysql "github.com/go-sql-driver/mysql" - "github.com/hashicorp/vault/helper/testhelpers/docker" + mysqlhelper "github.com/hashicorp/vault/helper/testhelpers/mysql" "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/database/helper/credsutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/hashicorp/vault/sdk/helper/strutil" - "github.com/ory/dockertest" ) var _ dbplugin.Database = (*MySQL)(nil) -func prepareMySQLTestContainer(t *testing.T, legacy bool, pw string) (cleanup func(), retURL string) { - if os.Getenv("MYSQL_URL") != "" { - return func() {}, os.Getenv("MYSQL_URL") - } - - pool, err := dockertest.NewPool("") - if err != nil { - t.Fatalf("Failed to connect to docker: %s", err) - } - - imageVersion := "5.7" - if legacy { - imageVersion = "5.6" - } - - resource, err := pool.Run("mysql", imageVersion, []string{"MYSQL_ROOT_PASSWORD=" + pw}) - if err != nil { - t.Fatalf("Could not start local MySQL docker container: %s", err) - } - - cleanup = func() { - docker.CleanupResource(t, pool, resource) - } - - retURL = fmt.Sprintf("root:%s@(localhost:%s)/mysql?parseTime=true", pw, resource.GetPort("3306/tcp")) - - // exponential backoff-retry - if err = pool.Retry(func() error { - var err error - var db *sql.DB - db, err = sql.Open("mysql", retURL) - if err != nil { - return err - } - defer db.Close() - return db.Ping() - }); err != nil { - cleanup() - t.Fatalf("Could not connect to MySQL docker container: %s", err) - } - - return -} - func TestMySQL_Initialize(t *testing.T) { - cleanup, connURL := prepareMySQLTestContainer(t, false, "secret") + cleanup, connURL := mysqlhelper.PrepareMySQLTestContainer(t, false, "secret") defer cleanup() connectionDetails := map[string]interface{}{ @@ -100,7 +53,7 @@ func TestMySQL_Initialize(t *testing.T) { } func TestMySQL_CreateUser(t *testing.T) { - cleanup, connURL := prepareMySQLTestContainer(t, false, "secret") + cleanup, connURL := mysqlhelper.PrepareMySQLTestContainer(t, false, "secret") defer cleanup() connectionDetails := map[string]interface{}{ @@ -133,7 +86,7 @@ func TestMySQL_CreateUser(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, username, password); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, username, password); err != nil { t.Fatalf("Could not connect with new credentials: %s", err) } @@ -143,7 +96,7 @@ func TestMySQL_CreateUser(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, username, password); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, username, password); err != nil { t.Fatalf("Could not connect with new credentials: %s", err) } @@ -155,14 +108,14 @@ func TestMySQL_CreateUser(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, username, password); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, username, password); err != nil { t.Fatalf("Could not connect with new credentials: %s", err) } } func TestMySQL_CreateUser_Legacy(t *testing.T) { - cleanup, connURL := prepareMySQLTestContainer(t, true, "secret") + cleanup, connURL := mysqlhelper.PrepareMySQLTestContainer(t, true, "secret") defer cleanup() connectionDetails := map[string]interface{}{ @@ -195,7 +148,7 @@ func TestMySQL_CreateUser_Legacy(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, username, password); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, username, password); err != nil { t.Fatalf("Could not connect with new credentials: %s", err) } @@ -205,13 +158,13 @@ func TestMySQL_CreateUser_Legacy(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, username, password); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, username, password); err != nil { t.Fatalf("Could not connect with new credentials: %s", err) } } func TestMySQL_RotateRootCredentials(t *testing.T) { - cleanup, connURL := prepareMySQLTestContainer(t, false, "secret") + cleanup, connURL := mysqlhelper.PrepareMySQLTestContainer(t, false, "secret") defer cleanup() connURL = strings.Replace(connURL, "root:secret", `{{username}}:{{password}}`, -1) @@ -247,7 +200,7 @@ func TestMySQL_RotateRootCredentials(t *testing.T) { } func TestMySQL_RevokeUser(t *testing.T) { - cleanup, connURL := prepareMySQLTestContainer(t, false, "secret") + cleanup, connURL := mysqlhelper.PrepareMySQLTestContainer(t, false, "secret") defer cleanup() connectionDetails := map[string]interface{}{ @@ -274,7 +227,7 @@ func TestMySQL_RevokeUser(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, username, password); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, username, password); err != nil { t.Fatalf("Could not connect with new credentials: %s", err) } @@ -284,7 +237,7 @@ func TestMySQL_RevokeUser(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, username, password); err == nil { + if err := mysqlhelper.TestCredsExist(t, connURL, username, password); err == nil { t.Fatal("Credentials were not revoked") } @@ -294,7 +247,7 @@ func TestMySQL_RevokeUser(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, username, password); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, username, password); err != nil { t.Fatalf("Could not connect with new credentials: %s", err) } @@ -305,19 +258,19 @@ func TestMySQL_RevokeUser(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, username, password); err == nil { + if err := mysqlhelper.TestCredsExist(t, connURL, username, password); err == nil { t.Fatal("Credentials were not revoked") } } func TestMySQL_SetCredentials(t *testing.T) { - cleanup, connURL := prepareMySQLTestContainer(t, false, "secret") + cleanup, connURL := mysqlhelper.PrepareMySQLTestContainer(t, false, "secret") defer cleanup() // create the database user and verify we can access dbUser := "vaultstatictest" createTestMySQLUser(t, connURL, dbUser, "password", testRoleStaticCreate) - if err := testCredsExist(t, connURL, dbUser, "password"); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, dbUser, "password"); err != nil { t.Fatalf("Could not connect with credentials: %s", err) } @@ -351,7 +304,7 @@ func TestMySQL_SetCredentials(t *testing.T) { } // verify new password works - if err := testCredsExist(t, connURL, dbUser, newPassword); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, dbUser, newPassword); err != nil { t.Fatalf("Could not connect with new credentials: %s", err) } @@ -363,14 +316,14 @@ func TestMySQL_SetCredentials(t *testing.T) { t.Fatalf("err: %s", err) } - if err := testCredsExist(t, connURL, dbUser, newPassword); err != nil { + if err := mysqlhelper.TestCredsExist(t, connURL, dbUser, newPassword); err != nil { t.Fatalf("Could not connect with new credentials: %s", err) } } func TestMySQL_Initialize_ReservedChars(t *testing.T) { pw := "#secret!%25#{@}" - cleanup, connURL := prepareMySQLTestContainer(t, false, pw) + cleanup, connURL := mysqlhelper.PrepareMySQLTestContainer(t, false, pw) defer cleanup() // Revert password set to test replacement by db.Init @@ -397,17 +350,6 @@ func TestMySQL_Initialize_ReservedChars(t *testing.T) { } } -func testCredsExist(t testing.TB, connURL, username, password string) error { - // Log in with the new creds - connURL = strings.Replace(connURL, "root:secret", fmt.Sprintf("%s:%s", username, password), 1) - db, err := sql.Open("mysql", connURL) - if err != nil { - return err - } - defer db.Close() - return db.Ping() -} - func createTestMySQLUser(t *testing.T, connURL, username, password, query string) { t.Helper() db, err := sql.Open("mysql", connURL) diff --git a/sdk/helper/awsutil/region.go b/sdk/helper/awsutil/region.go index 7ab0c21e1cd8..727c3b91044d 100644 --- a/sdk/helper/awsutil/region.go +++ b/sdk/helper/awsutil/region.go @@ -14,7 +14,8 @@ import ( // is a widely used region, and is the most common one for some services like STS. const DefaultRegion = "us-east-1" -var ec2MetadataBaseURL = "http://169.254.169.254" +// This is nil by default, but is exposed in case it needs to be changed for tests. +var ec2Endpoint *string /* It's impossible to mimic "normal" AWS behavior here because it's not consistent @@ -54,7 +55,7 @@ func GetRegion(configuredRegion string) (string, error) { } metadata := ec2metadata.New(sess, &aws.Config{ - Endpoint: aws.String(ec2MetadataBaseURL + "/latest"), + Endpoint: ec2Endpoint, EC2MetadataDisableTimeoutOverride: aws.Bool(true), HTTPClient: &http.Client{ Timeout: time.Second, @@ -68,6 +69,5 @@ func GetRegion(configuredRegion string) (string, error) { if err != nil { return "", errwrap.Wrapf("unable to retrieve region from instance metadata: {{err}}", err) } - return region, nil } diff --git a/ui/app/components/auth-jwt.js b/ui/app/components/auth-jwt.js index acaeb98ae011..f349c5cf4060 100644 --- a/ui/app/components/auth-jwt.js +++ b/ui/app/components/auth-jwt.js @@ -109,13 +109,14 @@ export default Component.extend({ }, exchangeOIDC: task(function*(event, oidcWindow) { - if (event.key !== 'oidcState') { + let oidcState = event.storageArea.getItem('oidcState'); + if (oidcState === null || oidcState === undefined) { return; } this.onLoading(true); // get the info from the event fired by the other window and // then remove it from localStorage - let { namespace, path, state, code } = JSON.parse(event.newValue); + let { namespace, path, state, code } = JSON.parse(oidcState); this.getWindow().localStorage.removeItem('oidcState'); // defer closing of the window, but continue executing the task diff --git a/ui/app/templates/partials/secret-form-create.hbs b/ui/app/templates/partials/secret-form-create.hbs index 4f427b823b1b..3470343406df 100644 --- a/ui/app/templates/partials/secret-form-create.hbs +++ b/ui/app/templates/partials/secret-form-create.hbs @@ -1,7 +1,7 @@