From 14e91624000e5e2412880f7fd085afbb5c491e95 Mon Sep 17 00:00:00 2001
From: ncabatoff <ncabatoff@hashicorp.com>
Date: Tue, 9 Jun 2020 16:54:06 -0400
Subject: [PATCH 01/29] Explain what lease tidy does. (#9178)

---
 website/pages/api-docs/system/leases.mdx | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/website/pages/api-docs/system/leases.mdx b/website/pages/api-docs/system/leases.mdx
index fde389d7522b..cc1462af8a6e 100644
--- a/website/pages/api-docs/system/leases.mdx
+++ b/website/pages/api-docs/system/leases.mdx
@@ -218,9 +218,14 @@ $ curl \
 
 ## Tidy Leases
 
-This endpoint cleans up the dangling storage entries for leases. Generally,
-running this is not needed unless upgrade notes or support personnel suggest it.
-This may perform a lot of I/O to the storage method so should be used sparingly.
+This endpoint cleans up the dangling storage entries for leases: for each lease
+entry in storage, Vault will verify that it has an associated valid non-expired
+token in storage, and if not, the lease will be revoked.
+
+Generally, running this is not needed unless upgrade notes or support personnel
+suggest it. This may perform a lot of I/O to the storage method so should be
+used sparingly.
+
 
 | Method | Path               |
 | :----- | :----------------- |

From b792adb9a1178a65637ecd92486aabd83f31a331 Mon Sep 17 00:00:00 2001
From: Theron Voran <tvoran@users.noreply.github.com>
Date: Tue, 9 Jun 2020 16:56:12 -0700
Subject: [PATCH 02/29] AWS: Add iam_groups parameter to role create/update
 (#8811)

Allows vault roles to be associated with IAM groups in the AWS
secrets engine, since IAM groups are a recommended way to manage
IAM user policies. IAM users generated against a vault role will
be added to the IAM Groups. For a credential type of
`assumed_role` or `federation_token`, the policies sent to the
corresponding AWS call (sts:AssumeRole or sts:GetFederation) will
be the policies from each group in `iam_groups` combined with the
`policy_document` and `policy_arns` parameters.

Co-authored-by: Jim Kalafut <jkalafut@hashicorp.com>
---
 builtin/logical/aws/backend_test.go         | 374 +++++++++++++++++++-
 builtin/logical/aws/iam_policies.go         | 141 ++++++++
 builtin/logical/aws/iam_policies_test.go    | 255 +++++++++++++
 builtin/logical/aws/path_roles.go           |  19 +
 builtin/logical/aws/path_user.go            |   4 +-
 builtin/logical/aws/secret_access_keys.go   |  69 +++-
 website/pages/api-docs/secret/aws/index.mdx |  36 +-
 website/pages/docs/secrets/aws/index.mdx    |  28 +-
 8 files changed, 893 insertions(+), 33 deletions(-)
 create mode 100644 builtin/logical/aws/iam_policies.go
 create mode 100644 builtin/logical/aws/iam_policies_test.go

diff --git a/builtin/logical/aws/backend_test.go b/builtin/logical/aws/backend_test.go
index 19dad90889b4..9876de1cc154 100644
--- a/builtin/logical/aws/backend_test.go
+++ b/builtin/logical/aws/backend_test.go
@@ -20,6 +20,7 @@ import (
 	"github.com/aws/aws-sdk-go/service/ec2"
 	"github.com/aws/aws-sdk-go/service/iam"
 	"github.com/aws/aws-sdk-go/service/iam/iamiface"
+	"github.com/aws/aws-sdk-go/service/s3"
 	"github.com/aws/aws-sdk-go/service/sts"
 	cleanhttp "github.com/hashicorp/go-cleanhttp"
 	"github.com/hashicorp/vault/helper/testhelpers"
@@ -357,6 +358,49 @@ func createUser(t *testing.T, userName string, accessKey *awsAccessKey) {
 	accessKey.SecretAccessKey = *genAccessKey.SecretAccessKey
 }
 
+// Create an IAM Group and add an inline policy and managed policies if specified
+func createGroup(t *testing.T, groupName string, inlinePolicy string, managedPolicies []string) {
+	awsConfig := &aws.Config{
+		Region:     aws.String("us-east-1"),
+		HTTPClient: cleanhttp.DefaultClient(),
+	}
+	sess, err := session.NewSession(awsConfig)
+	if err != nil {
+		t.Fatal(err)
+	}
+	svc := iam.New(sess)
+	createGroupInput := &iam.CreateGroupInput{
+		GroupName: aws.String(groupName),
+	}
+	log.Printf("[INFO] AWS CreateGroup: %s", groupName)
+	if _, err := svc.CreateGroup(createGroupInput); err != nil {
+		t.Fatalf("AWS CreateGroup failed: %v", err)
+	}
+
+	if len(inlinePolicy) > 0 {
+		putPolicyInput := &iam.PutGroupPolicyInput{
+			PolicyDocument: aws.String(inlinePolicy),
+			PolicyName:     aws.String("InlinePolicy"),
+			GroupName:      aws.String(groupName),
+		}
+		_, err = svc.PutGroupPolicy(putPolicyInput)
+		if err != nil {
+			t.Fatalf("AWS PutGroupPolicy failed: %v", err)
+		}
+	}
+
+	for _, mp := range managedPolicies {
+		attachGroupPolicyInput := &iam.AttachGroupPolicyInput{
+			PolicyArn: aws.String(mp),
+			GroupName: aws.String(groupName),
+		}
+		_, err = svc.AttachGroupPolicy(attachGroupPolicyInput)
+		if err != nil {
+			t.Fatalf("AWS AttachGroupPolicy failed, %v", err)
+		}
+	}
+}
+
 func deleteTestRole(roleName string) error {
 	awsConfig := &aws.Config{
 		Region:     aws.String("us-east-1"),
@@ -452,6 +496,71 @@ func deleteTestUser(accessKey *awsAccessKey, userName string) error {
 	return nil
 }
 
+func deleteTestGroup(groupName string) error {
+	awsConfig := &aws.Config{
+		Region:     aws.String("us-east-1"),
+		HTTPClient: cleanhttp.DefaultClient(),
+	}
+	sess, err := session.NewSession(awsConfig)
+	if err != nil {
+		return err
+	}
+	svc := iam.New(sess)
+
+	// Detach any managed group policies
+	getGroupsInput := &iam.ListAttachedGroupPoliciesInput{
+		GroupName: aws.String(groupName),
+	}
+	getGroupsOutput, err := svc.ListAttachedGroupPolicies(getGroupsInput)
+	if err != nil {
+		log.Printf("[WARN] AWS ListAttachedGroupPolicies failed: %v", err)
+		return err
+	}
+	for _, g := range getGroupsOutput.AttachedPolicies {
+		detachGroupInput := &iam.DetachGroupPolicyInput{
+			GroupName: aws.String(groupName),
+			PolicyArn: g.PolicyArn,
+		}
+		if _, err := svc.DetachGroupPolicy(detachGroupInput); err != nil {
+			log.Printf("[WARN] AWS DetachGroupPolicy failed: %v", err)
+			return err
+		}
+	}
+
+	// Remove any inline policies
+	listGroupPoliciesInput := &iam.ListGroupPoliciesInput{
+		GroupName: aws.String(groupName),
+	}
+	listGroupPoliciesOutput, err := svc.ListGroupPolicies(listGroupPoliciesInput)
+	if err != nil {
+		log.Printf("[WARN] AWS ListGroupPolicies failed: %v", err)
+		return err
+	}
+	for _, g := range listGroupPoliciesOutput.PolicyNames {
+		deleteGroupPolicyInput := &iam.DeleteGroupPolicyInput{
+			GroupName:  aws.String(groupName),
+			PolicyName: g,
+		}
+		if _, err := svc.DeleteGroupPolicy(deleteGroupPolicyInput); err != nil {
+			log.Printf("[WARN] AWS DeleteGroupPolicy failed: %v", err)
+			return err
+		}
+	}
+
+	// Delete the group
+	deleteTestGroupInput := &iam.DeleteGroupInput{
+		GroupName: aws.String(groupName),
+	}
+	log.Printf("[INFO] AWS DeleteGroup: %s", groupName)
+	_, err = svc.DeleteGroup(deleteTestGroupInput)
+	if err != nil {
+		log.Printf("[WARN] AWS DeleteGroup failed: %v", err)
+		return err
+	}
+
+	return nil
+}
+
 func testAccStepConfig(t *testing.T) logicaltest.TestStep {
 	return logicaltest.TestStep{
 		Operation: logical.UpdateOperation,
@@ -674,6 +783,25 @@ func listDynamoTablesTest(accessKey, secretKey, token string) error {
 	})
 }
 
+func listS3BucketsTest(accessKey, secretKey, token string) error {
+	creds := credentials.NewStaticCredentials(accessKey, secretKey, token)
+	awsConfig := &aws.Config{
+		Credentials: creds,
+		Region:      aws.String("us-east-1"),
+		HTTPClient:  cleanhttp.DefaultClient(),
+	}
+	sess, err := session.NewSession(awsConfig)
+	if err != nil {
+		return err
+	}
+	client := s3.New(sess)
+	log.Printf("[WARN] Verifying that the generated credentials work with s3:ListBuckets...")
+	return retryUntilSuccess(func() error {
+		_, err := client.ListBuckets(&s3.ListBucketsInput{})
+		return err
+	})
+}
+
 func retryUntilSuccess(op func() error) error {
 	retryCount := 0
 	success := false
@@ -743,6 +871,7 @@ func testAccStepReadPolicy(t *testing.T, name string, value string) logicaltest.
 				"max_sts_ttl":              int64(0),
 				"user_path":                "",
 				"permissions_boundary_arn": "",
+				"iam_groups":               []string(nil),
 			}
 			if !reflect.DeepEqual(resp.Data, expected) {
 				return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected)
@@ -769,6 +898,20 @@ const testDynamoPolicy = `{
 }
 `
 
+const testS3Policy = `{
+    "Version": "2012-10-17",
+    "Statement": [
+        {
+            "Effect": "Allow",
+            "Action": [
+                "s3:Get*",
+                "s3:List*"
+            ],
+            "Resource": "*"
+        }
+    ]
+}`
+
 const adminAccessPolicyArn = "arn:aws:iam::aws:policy/AdministratorAccess"
 const ec2PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess"
 const iamPolicyArn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess"
@@ -825,15 +968,17 @@ func TestBackend_basicPolicyArnRef(t *testing.T) {
 	})
 }
 
-func TestBackend_iamUserManagedInlinePolicies(t *testing.T) {
+func TestBackend_iamUserManagedInlinePoliciesGroups(t *testing.T) {
 	t.Parallel()
 	compacted, err := compactJSON(testDynamoPolicy)
 	if err != nil {
 		t.Fatalf("bad: %#v", err)
 	}
+	groupName := generateUniqueName(t.Name())
 	roleData := map[string]interface{}{
 		"policy_document": testDynamoPolicy,
 		"policy_arns":     []string{ec2PolicyArn, iamPolicyArn},
+		"iam_groups":      []string{groupName},
 		"credential_type": iamUserCred,
 		"user_path":       "/path/",
 	}
@@ -846,18 +991,72 @@ func TestBackend_iamUserManagedInlinePolicies(t *testing.T) {
 		"max_sts_ttl":              int64(0),
 		"user_path":                "/path/",
 		"permissions_boundary_arn": "",
+		"iam_groups":               []string{groupName},
 	}
 
 	logicaltest.Test(t, logicaltest.TestCase{
 		AcceptanceTest: true,
-		PreCheck:       func() { testAccPreCheck(t) },
+		PreCheck: func() {
+			testAccPreCheck(t)
+			createGroup(t, groupName, testS3Policy, []string{})
+		},
 		LogicalBackend: getBackend(t),
 		Steps: []logicaltest.TestStep{
 			testAccStepConfig(t),
 			testAccStepWriteRole(t, "test", roleData),
 			testAccStepReadRole(t, "test", expectedRoleData),
-			testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest, assertCreatedIAMUser}),
-			testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest}),
+			testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest, assertCreatedIAMUser, listS3BucketsTest}),
+			testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest, listS3BucketsTest}),
+		},
+		Teardown: func() error {
+			return deleteTestGroup(groupName)
+		},
+	})
+}
+
+// Similar to TestBackend_iamUserManagedInlinePoliciesGroups() but managing
+// policies only with groups
+func TestBackend_iamUserGroups(t *testing.T) {
+	t.Parallel()
+	group1Name := generateUniqueName(t.Name())
+	group2Name := generateUniqueName(t.Name())
+	roleData := map[string]interface{}{
+		"iam_groups":      []string{group1Name, group2Name},
+		"credential_type": iamUserCred,
+		"user_path":       "/path/",
+	}
+	expectedRoleData := map[string]interface{}{
+		"policy_document":          "",
+		"policy_arns":              []string(nil),
+		"credential_type":          iamUserCred,
+		"role_arns":                []string(nil),
+		"default_sts_ttl":          int64(0),
+		"max_sts_ttl":              int64(0),
+		"user_path":                "/path/",
+		"permissions_boundary_arn": "",
+		"iam_groups":               []string{group1Name, group2Name},
+	}
+
+	logicaltest.Test(t, logicaltest.TestCase{
+		AcceptanceTest: true,
+		PreCheck: func() {
+			testAccPreCheck(t)
+			createGroup(t, group1Name, testS3Policy, []string{ec2PolicyArn, iamPolicyArn})
+			createGroup(t, group2Name, testDynamoPolicy, []string{})
+		},
+		LogicalBackend: getBackend(t),
+		Steps: []logicaltest.TestStep{
+			testAccStepConfig(t),
+			testAccStepWriteRole(t, "test", roleData),
+			testAccStepReadRole(t, "test", expectedRoleData),
+			testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest, assertCreatedIAMUser, listS3BucketsTest}),
+			testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest, listS3BucketsTest}),
+		},
+		Teardown: func() error {
+			if err := deleteTestGroup(group1Name); err != nil {
+				return err
+			}
+			return deleteTestGroup(group2Name)
 		},
 	})
 }
@@ -948,6 +1147,63 @@ func TestBackend_AssumedRoleWithPolicyARN(t *testing.T) {
 	})
 }
 
+func TestBackend_AssumedRoleWithGroups(t *testing.T) {
+	t.Parallel()
+	roleName := generateUniqueName(t.Name())
+	groupName := generateUniqueName(t.Name())
+	// This looks a bit curious. The policy document and the role document act
+	// as a logical intersection of policies. The role allows ec2:Describe*
+	// (among other permissions). This policy allows everything BUT
+	// ec2:DescribeAvailabilityZones. Thus, the logical intersection of the two
+	// is all ec2:Describe* EXCEPT ec2:DescribeAvailabilityZones, and so the
+	// describeAZs call should fail
+	allowAllButDescribeAzs := `{
+	"Version": "2012-10-17",
+	"Statement": [
+		{
+			"Effect": "Allow",
+			"NotAction": "ec2:DescribeAvailabilityZones",
+			"Resource": "*"
+		}
+	]
+}`
+	awsAccountID, err := getAccountID()
+	if err != nil {
+		t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err)
+		t.Skip("Could not determine AWS account ID from sts:GetCallerIdentity for acceptance tests, skipping")
+	}
+
+	roleData := map[string]interface{}{
+		"iam_groups":      []string{groupName},
+		"role_arns":       []string{fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountID, roleName)},
+		"credential_type": assumedRoleCred,
+	}
+	logicaltest.Test(t, logicaltest.TestCase{
+		AcceptanceTest: true,
+		PreCheck: func() {
+			testAccPreCheck(t)
+			createRole(t, roleName, awsAccountID, []string{ec2PolicyArn})
+			createGroup(t, groupName, allowAllButDescribeAzs, []string{})
+			// Sleep sometime because AWS is eventually consistent
+			log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
+			time.Sleep(10 * time.Second)
+		},
+		LogicalBackend: getBackend(t),
+		Steps: []logicaltest.TestStep{
+			testAccStepConfig(t),
+			testAccStepWriteRole(t, "test", roleData),
+			testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}),
+			testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}),
+		},
+		Teardown: func() error {
+			if err := deleteTestGroup(groupName); err != nil {
+				return err
+			}
+			return deleteTestRole(roleName)
+		},
+	})
+}
+
 func TestBackend_FederationTokenWithPolicyARN(t *testing.T) {
 	t.Parallel()
 	userName := generateUniqueName(t.Name())
@@ -979,6 +1235,56 @@ func TestBackend_FederationTokenWithPolicyARN(t *testing.T) {
 	})
 }
 
+func TestBackend_FederationTokenWithGroups(t *testing.T) {
+	t.Parallel()
+	userName := generateUniqueName(t.Name())
+	groupName := generateUniqueName(t.Name())
+	accessKey := &awsAccessKey{}
+
+	// IAM policy where Statement is a single element, not a list
+	iamSingleStatementPolicy := `{
+		"Version": "2012-10-17",
+		"Statement": {
+			"Effect": "Allow",
+			"Action": [
+				"s3:Get*",
+				"s3:List*"
+			],
+			"Resource": "*"
+		}
+	}`
+
+	roleData := map[string]interface{}{
+		"iam_groups":      []string{groupName},
+		"policy_document": iamSingleStatementPolicy,
+		"credential_type": federationTokenCred,
+	}
+	logicaltest.Test(t, logicaltest.TestCase{
+		AcceptanceTest: true,
+		PreCheck: func() {
+			testAccPreCheck(t)
+			createUser(t, userName, accessKey)
+			createGroup(t, groupName, "", []string{dynamoPolicyArn})
+			// Sleep sometime because AWS is eventually consistent
+			log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
+			time.Sleep(10 * time.Second)
+		},
+		LogicalBackend: getBackend(t),
+		Steps: []logicaltest.TestStep{
+			testAccStepConfigWithCreds(t, accessKey),
+			testAccStepWriteRole(t, "test", roleData),
+			testAccStepRead(t, "sts", "test", []credentialTestFunc{listDynamoTablesTest, describeAzsTestUnauthorized, listS3BucketsTest}),
+			testAccStepRead(t, "creds", "test", []credentialTestFunc{listDynamoTablesTest, describeAzsTestUnauthorized, listS3BucketsTest}),
+		},
+		Teardown: func() error {
+			if err := deleteTestGroup(groupName); err != nil {
+				return err
+			}
+			return deleteTestUser(accessKey, userName)
+		},
+	})
+}
+
 func TestBackend_RoleDefaultSTSTTL(t *testing.T) {
 	t.Parallel()
 	roleName := generateUniqueName(t.Name())
@@ -1051,6 +1357,7 @@ func testAccStepReadArnPolicy(t *testing.T, name string, value string) logicalte
 				"max_sts_ttl":              int64(0),
 				"user_path":                "",
 				"permissions_boundary_arn": "",
+				"iam_groups":               []string(nil),
 			}
 			if !reflect.DeepEqual(resp.Data, expected) {
 				return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected)
@@ -1071,6 +1378,65 @@ func testAccStepWriteArnRoleRef(t *testing.T, vaultRoleName, awsRoleName, awsAcc
 	}
 }
 
+func TestBackend_iamGroupsCrud(t *testing.T) {
+	t.Parallel()
+	logicaltest.Test(t, logicaltest.TestCase{
+		AcceptanceTest: true,
+		LogicalBackend: getBackend(t),
+		Steps: []logicaltest.TestStep{
+			testAccStepConfig(t),
+			testAccStepWriteIamGroups(t, "test", []string{"group1", "group2"}),
+			testAccStepReadIamGroups(t, "test", []string{"group1", "group2"}),
+			testAccStepDeletePolicy(t, "test"),
+			testAccStepReadIamGroups(t, "test", []string{}),
+		},
+	})
+}
+
+func testAccStepWriteIamGroups(t *testing.T, name string, groups []string) logicaltest.TestStep {
+	return logicaltest.TestStep{
+		Operation: logical.UpdateOperation,
+		Path:      "roles/" + name,
+		Data: map[string]interface{}{
+			"credential_type": iamUserCred,
+			"iam_groups":      groups,
+		},
+	}
+}
+
+func testAccStepReadIamGroups(t *testing.T, name string, groups []string) logicaltest.TestStep {
+	return logicaltest.TestStep{
+		Operation: logical.ReadOperation,
+		Path:      "roles/" + name,
+		Check: func(resp *logical.Response) error {
+			if resp == nil {
+				if len(groups) == 0 {
+					return nil
+				}
+
+				return fmt.Errorf("bad: %#v", resp)
+			}
+
+			expected := map[string]interface{}{
+				"policy_arns":              []string(nil),
+				"role_arns":                []string(nil),
+				"policy_document":          "",
+				"credential_type":          iamUserCred,
+				"default_sts_ttl":          int64(0),
+				"max_sts_ttl":              int64(0),
+				"user_path":                "",
+				"permissions_boundary_arn": "",
+				"iam_groups":               groups,
+			}
+			if !reflect.DeepEqual(resp.Data, expected) {
+				return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected)
+			}
+
+			return nil
+		},
+	}
+}
+
 func generateUniqueName(prefix string) string {
 	return testhelpers.RandomWithPrefix(prefix)
 }
diff --git a/builtin/logical/aws/iam_policies.go b/builtin/logical/aws/iam_policies.go
new file mode 100644
index 000000000000..a8c134de6081
--- /dev/null
+++ b/builtin/logical/aws/iam_policies.go
@@ -0,0 +1,141 @@
+package aws
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/service/iam"
+	"github.com/aws/aws-sdk-go/service/iam/iamiface"
+	"github.com/hashicorp/vault/sdk/logical"
+)
+
+// PolicyDocument represents an IAM policy document
+type PolicyDocument struct {
+	Version    string           `json:"Version"`
+	Statements StatementEntries `json:"Statement"`
+}
+
+// StatementEntries is a slice of statements that make up a PolicyDocument
+type StatementEntries []interface{}
+
+// UnmarshalJSON is defined here for StatementEntries because the Statement
+// portion of an IAM Policy can either be a list or a single element, so if it's
+// a single element this wraps it in a []interface{} so that it's easy to
+// combine with other policy statements:
+// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_statement.html
+func (se *StatementEntries) UnmarshalJSON(b []byte) error {
+	var out StatementEntries
+
+	var data interface{}
+	if err := json.Unmarshal(b, &data); err != nil {
+		return err
+	}
+
+	switch t := data.(type) {
+	case []interface{}:
+		out = t
+	case interface{}:
+		out = []interface{}{t}
+	default:
+		return fmt.Errorf("unsupported data type %T for StatementEntries", t)
+	}
+	*se = out
+	return nil
+}
+
+// getGroupPolicies takes a list of IAM Group names and returns a list of their
+// inline policy documents, and a list of the attached managed policy ARNs
+func (b *backend) getGroupPolicies(ctx context.Context, s logical.Storage, iamGroups []string) ([]string, []string, error) {
+	var groupPolicies []string
+	var groupPolicyARNs []string
+	var err error
+	var agp *iam.ListAttachedGroupPoliciesOutput
+	var inlinePolicies *iam.ListGroupPoliciesOutput
+	var inlinePolicyDoc *iam.GetGroupPolicyOutput
+	var iamClient iamiface.IAMAPI
+
+	// Return early if there are no groups, to avoid creating an IAM client
+	// needlessly
+	if len(iamGroups) == 0 {
+		return nil, nil, nil
+	}
+
+	iamClient, err = b.clientIAM(ctx, s)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	for _, g := range iamGroups {
+		// Collect managed policy ARNs from the IAM Group
+		agp, err = iamClient.ListAttachedGroupPolicies(&iam.ListAttachedGroupPoliciesInput{
+			GroupName: aws.String(g),
+		})
+		if err != nil {
+			return nil, nil, err
+		}
+		for _, p := range agp.AttachedPolicies {
+			groupPolicyARNs = append(groupPolicyARNs, *p.PolicyArn)
+		}
+
+		// Collect inline policy names from the IAM Group
+		inlinePolicies, err = iamClient.ListGroupPolicies(&iam.ListGroupPoliciesInput{
+			GroupName: aws.String(g),
+		})
+		if err != nil {
+			return nil, nil, err
+		}
+		for _, iP := range inlinePolicies.PolicyNames {
+			inlinePolicyDoc, err = iamClient.GetGroupPolicy(&iam.GetGroupPolicyInput{
+				GroupName:  &g,
+				PolicyName: iP,
+			})
+			if err != nil {
+				return nil, nil, err
+			}
+			if inlinePolicyDoc != nil && inlinePolicyDoc.PolicyDocument != nil {
+				var policyStr string
+				if policyStr, err = url.QueryUnescape(*inlinePolicyDoc.PolicyDocument); err != nil {
+					return nil, nil, err
+				}
+				groupPolicies = append(groupPolicies, policyStr)
+			}
+		}
+	}
+	return groupPolicies, groupPolicyARNs, nil
+}
+
+// combinePolicyDocuments takes policy strings as input, and combines them into
+// a single policy document string
+func combinePolicyDocuments(policies ...string) (string, error) {
+	var policy string
+	var err error
+	var policyBytes []byte
+	var newPolicy = PolicyDocument{
+		// 2012-10-17 is the current version of the AWS policy language:
+		// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html
+		Version: "2012-10-17",
+	}
+	newPolicy.Statements = make(StatementEntries, 0, len(policies))
+
+	for _, p := range policies {
+		if len(p) == 0 {
+			continue
+		}
+		var tmpDoc PolicyDocument
+		err = json.Unmarshal([]byte(p), &tmpDoc)
+		if err != nil {
+			return "", err
+		}
+		newPolicy.Statements = append(newPolicy.Statements, tmpDoc.Statements...)
+	}
+
+	policyBytes, err = json.Marshal(&newPolicy)
+	if err != nil {
+		return "", err
+	}
+	policy = string(policyBytes)
+	return policy, nil
+}
diff --git a/builtin/logical/aws/iam_policies_test.go b/builtin/logical/aws/iam_policies_test.go
new file mode 100644
index 000000000000..d2521b1dc151
--- /dev/null
+++ b/builtin/logical/aws/iam_policies_test.go
@@ -0,0 +1,255 @@
+package aws
+
+import (
+	"context"
+	"testing"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/service/iam"
+	"github.com/aws/aws-sdk-go/service/iam/iamiface"
+	"github.com/hashicorp/vault/sdk/logical"
+	"github.com/stretchr/testify/assert"
+)
+
+const ec2DescribePolicy = `{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Action": ["ec2:DescribeInstances"], "Resource": "*"}]}`
+
+// ec2AllPolicy also uses a string instead of a list for the Action
+const ec2AllPolicy = `{"Version": "2012-10-17","Statement": [{"Effect": "Allow", "Action": "ec2:*", "Resource": "*"}]}`
+
+// ec2SingleStatement is an example of the Statement portion containing a single statement that's not a list
+const ec2SingleStatement = `{"Version": "2012-10-17", "Statement": {"Effect": "Allow", "Action": ["ec2:DescribeInstances"], "Resource": "*"}}`
+
+type mockGroupIAMClient struct {
+	iamiface.IAMAPI
+	ListAttachedGroupPoliciesResp iam.ListAttachedGroupPoliciesOutput
+	ListGroupPoliciesResp         iam.ListGroupPoliciesOutput
+	GetGroupPolicyResp            iam.GetGroupPolicyOutput
+}
+
+func (m mockGroupIAMClient) ListAttachedGroupPolicies(in *iam.ListAttachedGroupPoliciesInput) (*iam.ListAttachedGroupPoliciesOutput, error) {
+	return &m.ListAttachedGroupPoliciesResp, nil
+}
+
+func (m mockGroupIAMClient) ListGroupPolicies(in *iam.ListGroupPoliciesInput) (*iam.ListGroupPoliciesOutput, error) {
+	return &m.ListGroupPoliciesResp, nil
+}
+
+func (m mockGroupIAMClient) GetGroupPolicy(in *iam.GetGroupPolicyInput) (*iam.GetGroupPolicyOutput, error) {
+	return &m.GetGroupPolicyResp, nil
+}
+
+func Test_getGroupPolicies(t *testing.T) {
+	t.Parallel()
+	testCases := []struct {
+		description         string
+		listAGPResp         iam.ListAttachedGroupPoliciesOutput
+		listGPResp          iam.ListGroupPoliciesOutput
+		getGPResp           iam.GetGroupPolicyOutput
+		iamGroupArg         []string
+		wantGroupPolicies   []string
+		wantGroupPolicyARNs []string
+		wantErr             bool
+	}{
+		{
+			description: "All IAM calls respond with data",
+			listAGPResp: iam.ListAttachedGroupPoliciesOutput{
+				AttachedPolicies: []*iam.AttachedPolicy{
+					{
+						PolicyArn:  aws.String("abcdefghijklmnopqrst"),
+						PolicyName: aws.String("test policy"),
+					},
+				},
+			},
+			listGPResp: iam.ListGroupPoliciesOutput{
+				PolicyNames: []*string{
+					aws.String("inline policy"),
+				},
+			},
+			getGPResp: iam.GetGroupPolicyOutput{
+				GroupName:      aws.String("inline policy"),
+				PolicyDocument: aws.String(ec2DescribePolicy),
+				PolicyName:     aws.String("ec2 describe"),
+			},
+			iamGroupArg:         []string{"testgroup1"},
+			wantGroupPolicies:   []string{ec2DescribePolicy},
+			wantGroupPolicyARNs: []string{"abcdefghijklmnopqrst"},
+			wantErr:             false,
+		},
+		{
+			description: "No managed policies",
+			listAGPResp: iam.ListAttachedGroupPoliciesOutput{},
+			listGPResp: iam.ListGroupPoliciesOutput{
+				PolicyNames: []*string{
+					aws.String("inline policy"),
+				},
+			},
+			getGPResp: iam.GetGroupPolicyOutput{
+				GroupName:      aws.String("inline policy"),
+				PolicyDocument: aws.String(ec2DescribePolicy),
+				PolicyName:     aws.String("ec2 describe"),
+			},
+			iamGroupArg:         []string{"testgroup1", "testgroup2"},
+			wantGroupPolicies:   []string{ec2DescribePolicy, ec2DescribePolicy},
+			wantGroupPolicyARNs: []string(nil),
+			wantErr:             false,
+		},
+		{
+			description: "No inline policies",
+			listAGPResp: iam.ListAttachedGroupPoliciesOutput{
+				AttachedPolicies: []*iam.AttachedPolicy{
+					{
+						PolicyArn:  aws.String("abcdefghijklmnopqrst"),
+						PolicyName: aws.String("test policy"),
+					},
+				},
+			},
+			listGPResp:          iam.ListGroupPoliciesOutput{},
+			getGPResp:           iam.GetGroupPolicyOutput{},
+			iamGroupArg:         []string{"testgroup1"},
+			wantGroupPolicies:   []string(nil),
+			wantGroupPolicyARNs: []string{"abcdefghijklmnopqrst"},
+			wantErr:             false,
+		},
+		{
+			description:         "No policies",
+			listAGPResp:         iam.ListAttachedGroupPoliciesOutput{},
+			listGPResp:          iam.ListGroupPoliciesOutput{},
+			getGPResp:           iam.GetGroupPolicyOutput{},
+			iamGroupArg:         []string{"testgroup1"},
+			wantGroupPolicies:   []string(nil),
+			wantGroupPolicyARNs: []string(nil),
+			wantErr:             false,
+		},
+		{
+			description:         "empty iam_groups arg",
+			listAGPResp:         iam.ListAttachedGroupPoliciesOutput{},
+			listGPResp:          iam.ListGroupPoliciesOutput{},
+			getGPResp:           iam.GetGroupPolicyOutput{},
+			iamGroupArg:         []string{},
+			wantGroupPolicies:   []string(nil),
+			wantGroupPolicyARNs: []string(nil),
+			wantErr:             false,
+		},
+	}
+	for _, tc := range testCases {
+		t.Run(tc.description, func(t *testing.T) {
+			// configure backend and iam client
+			config := logical.TestBackendConfig()
+			config.StorageView = &logical.InmemStorage{}
+
+			b := Backend()
+			if err := b.Setup(context.Background(), config); err != nil {
+				t.Fatal(err)
+			}
+			b.iamClient = &mockGroupIAMClient{
+				ListAttachedGroupPoliciesResp: tc.listAGPResp,
+				ListGroupPoliciesResp:         tc.listGPResp,
+				GetGroupPolicyResp:            tc.getGPResp,
+			}
+
+			// run the test and compare results
+			groupPolicies, groupPolicyARNs, err := b.getGroupPolicies(context.TODO(), config.StorageView, tc.iamGroupArg)
+			assert.Equal(t, tc.wantGroupPolicies, groupPolicies)
+			assert.Equal(t, tc.wantGroupPolicyARNs, groupPolicyARNs)
+			assert.Equal(t, tc.wantErr, err != nil)
+		})
+	}
+}
+
+func Test_combinePolicyDocuments(t *testing.T) {
+	t.Parallel()
+	testCases := []struct {
+		description    string
+		input          []string
+		expectedOutput string
+		expectedErr    bool
+	}{
+		{
+			description: "one policy",
+			input: []string{
+				ec2AllPolicy,
+			},
+			expectedOutput: `{"Version":"2012-10-17","Statement":[{"Action":"ec2:*","Effect":"Allow","Resource":"*"}]}`,
+			expectedErr:    false,
+		},
+		{
+			description: "two policies",
+			input: []string{
+				ec2AllPolicy,
+				ec2DescribePolicy,
+			},
+			expectedOutput: `{"Version": "2012-10-17", "Statement":[
+				{"Action": "ec2:*", "Effect": "Allow", "Resource": "*"},
+				{"Action": ["ec2:DescribeInstances"], "Effect": "Allow", "Resource": "*"}]}`,
+			expectedErr: false,
+		},
+		{
+			description: "two policies, one with empty statement",
+			input: []string{
+				ec2AllPolicy,
+				`{"Version": "2012-10-17", "Statement": []}`,
+			},
+			expectedOutput: `{"Version": "2012-10-17", "Statement": [{"Action": "ec2:*", "Effect": "Allow", "Resource": "*"}]}`,
+			expectedErr:    false,
+		},
+		{
+			description: "malformed json",
+			input: []string{
+				`"Version": "2012-10-17","Statement": [{"Effect": "Allow", "Action": "ec2:*", "Resource": "*"}]}`,
+				`{"Version": "2012-10-17", "Statement": []}`,
+			},
+			expectedOutput: ``,
+			expectedErr:    true,
+		},
+		{
+			description: "not action",
+			input: []string{
+				`{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "NotAction": "ec2:DescribeAvailabilityZones", "Resource": "*"}]}`,
+			},
+			expectedOutput: `{"Version": "2012-10-17","Statement":[{"Effect": "Allow","NotAction": "ec2:DescribeAvailabilityZones",	"Resource": "*"}]}`,
+			expectedErr: false,
+		},
+		{
+			description: "one blank policy",
+			input: []string{
+				"",
+				`{"Version": "2012-10-17", "Statement": []}`,
+			},
+			expectedOutput: `{"Version": "2012-10-17", "Statement": []}`,
+			expectedErr:    false,
+		},
+		{
+			description: "when statement is not a list",
+			input: []string{
+				ec2SingleStatement,
+			},
+			expectedOutput: `{"Version": "2012-10-17", "Statement": [{"Action": ["ec2:DescribeInstances"], "Effect": "Allow", "Resource": "*"}]}`,
+			expectedErr:    false,
+		},
+		{
+			description: "statement is malformed json",
+			input: []string{
+				`{"Version": "2012-10-17", "Statement": {true}`,
+			},
+			expectedOutput: "",
+			expectedErr:    true,
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.description, func(t *testing.T) {
+			policyOut, err := combinePolicyDocuments(tc.input...)
+			if (err != nil) != tc.expectedErr {
+				t.Fatalf("got unexpected error: %s", err)
+			}
+			if (err != nil) != tc.expectedErr {
+				t.Fatalf("got unexpected error: %s", err)
+			}
+			// remove whitespace
+			tc.expectedOutput, err = compactJSON(tc.expectedOutput)
+			if policyOut != tc.expectedOutput {
+				t.Fatalf("did not receive expected output: want %s, got %s", tc.expectedOutput, policyOut)
+			}
+		})
+	}
+}
diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go
index 6633c48a62ac..a5f225479a87 100644
--- a/builtin/logical/aws/path_roles.go
+++ b/builtin/logical/aws/path_roles.go
@@ -80,6 +80,19 @@ will be passed in as the Policy parameter to the AssumeRole or
 GetFederationToken API call, acting as a filter on permissions available.`,
 			},
 
+			"iam_groups": &framework.FieldSchema{
+				Type: framework.TypeCommaStringSlice,
+				Description: `Names of IAM groups that generated IAM users will be added to. For a credential
+type of assumed_role or federation_token, the policies sent to the
+corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
+policies from each group in iam_groups combined with the policy_document
+and policy_arns parameters.`,
+				DisplayAttrs: &framework.DisplayAttributes{
+					Name:  "IAM Groups",
+					Value: "group1,group2",
+				},
+			},
+
 			"default_sts_ttl": &framework.FieldSchema{
 				Type:        framework.TypeDurationSecond,
 				Description: fmt.Sprintf("Default TTL for %s and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred),
@@ -284,6 +297,10 @@ func (b *backend) pathRolesWrite(ctx context.Context, req *logical.Request, d *f
 		roleEntry.PermissionsBoundaryARN = permissionsBoundaryARNRaw.(string)
 	}
 
+	if iamGroups, ok := d.GetOk("iam_groups"); ok {
+		roleEntry.IAMGroups = iamGroups.([]string)
+	}
+
 	if legacyRole != "" {
 		roleEntry = upgradeLegacyPolicyEntry(legacyRole)
 		if roleEntry.InvalidData != "" {
@@ -468,6 +485,7 @@ type awsRoleEntry struct {
 	PolicyArns               []string      `json:"policy_arns"`                           // ARNs of managed policies to attach to an IAM user
 	RoleArns                 []string      `json:"role_arns"`                             // ARNs of roles to assume for AssumedRole credentials
 	PolicyDocument           string        `json:"policy_document"`                       // JSON-serialized inline policy to attach to IAM users and/or to specify as the Policy parameter in AssumeRole calls
+	IAMGroups                []string      `json:"iam_groups"`                            // Names of IAM groups that generated IAM users will be added to
 	InvalidData              string        `json:"invalid_data,omitempty"`                // Invalid role data. Exists to support converting the legacy role data into the new format
 	ProhibitFlexibleCredPath bool          `json:"prohibit_flexible_cred_path,omitempty"` // Disallow accessing STS credentials via the creds path and vice verse
 	Version                  int           `json:"version"`                               // Version number of the role format
@@ -483,6 +501,7 @@ func (r *awsRoleEntry) toResponseData() map[string]interface{} {
 		"policy_arns":              r.PolicyArns,
 		"role_arns":                r.RoleArns,
 		"policy_document":          r.PolicyDocument,
+		"iam_groups":               r.IAMGroups,
 		"default_sts_ttl":          int64(r.DefaultSTSTTL.Seconds()),
 		"max_sts_ttl":              int64(r.MaxSTSTTL.Seconds()),
 		"user_path":                r.UserPath,
diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go
index 6c1f89ad15eb..c9b43e97ef68 100644
--- a/builtin/logical/aws/path_user.go
+++ b/builtin/logical/aws/path_user.go
@@ -126,9 +126,9 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr
 		case !strutil.StrListContains(role.RoleArns, roleArn):
 			return logical.ErrorResponse(fmt.Sprintf("role_arn %q not in allowed role arns for Vault role %q", roleArn, roleName)), nil
 		}
-		return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, ttl)
+		return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl)
 	case federationTokenCred:
-		return b.getFederationToken(ctx, req.Storage, req.DisplayName, roleName, role.PolicyDocument, role.PolicyArns, ttl)
+		return b.getFederationToken(ctx, req.Storage, req.DisplayName, roleName, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl)
 	default:
 		return logical.ErrorResponse(fmt.Sprintf("unknown credential_type: %q", credentialType)), nil
 	}
diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go
index 36198196263b..bbc4011c43fc 100644
--- a/builtin/logical/aws/secret_access_keys.go
+++ b/builtin/logical/aws/secret_access_keys.go
@@ -67,7 +67,23 @@ func genUsername(displayName, policyName, userType string) (ret string, warning
 
 func (b *backend) getFederationToken(ctx context.Context, s logical.Storage,
 	displayName, policyName, policy string, policyARNs []string,
-	lifeTimeInSeconds int64) (*logical.Response, error) {
+	iamGroups []string, lifeTimeInSeconds int64) (*logical.Response, error) {
+
+	groupPolicies, groupPolicyARNs, err := b.getGroupPolicies(ctx, s, iamGroups)
+	if err != nil {
+		return logical.ErrorResponse(err.Error()), nil
+	}
+	if groupPolicies != nil {
+		groupPolicies = append(groupPolicies, policy)
+		policy, err = combinePolicyDocuments(groupPolicies...)
+		if err != nil {
+			return logical.ErrorResponse(err.Error()), nil
+		}
+	}
+	if len(groupPolicyARNs) > 0 {
+		policyARNs = append(policyARNs, groupPolicyARNs...)
+	}
+
 	stsClient, err := b.clientSTS(ctx, s)
 	if err != nil {
 		return logical.ErrorResponse(err.Error()), nil
@@ -91,14 +107,13 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage,
 	// that by default; the behavior can be explicitly opted in to by associating the Vault role
 	// with a policy ARN or document that allows the appropriate permissions.
 	if policy == "" && len(policyARNs) == 0 {
-		return logical.ErrorResponse(fmt.Sprintf("must specify at least one of policy_arns or policy_document with %s credential_type", federationTokenCred)), nil
+		return logical.ErrorResponse("must specify at least one of policy_arns or policy_document with %s credential_type", federationTokenCred), nil
 	}
 
 	tokenResp, err := stsClient.GetFederationToken(getTokenInput)
 
 	if err != nil {
-		return logical.ErrorResponse(fmt.Sprintf(
-			"Error generating STS keys: %s", err)), awsutil.CheckAWSError(err)
+		return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err)
 	}
 
 	resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{
@@ -126,7 +141,25 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage,
 
 func (b *backend) assumeRole(ctx context.Context, s logical.Storage,
 	displayName, roleName, roleArn, policy string, policyARNs []string,
-	lifeTimeInSeconds int64) (*logical.Response, error) {
+	iamGroups []string, lifeTimeInSeconds int64) (*logical.Response, error) {
+
+	// grab any IAM group policies associated with the vault role, both inline
+	// and managed
+	groupPolicies, groupPolicyARNs, err := b.getGroupPolicies(ctx, s, iamGroups)
+	if err != nil {
+		return logical.ErrorResponse(err.Error()), nil
+	}
+	if len(groupPolicies) > 0 {
+		groupPolicies = append(groupPolicies, policy)
+		policy, err = combinePolicyDocuments(groupPolicies...)
+		if err != nil {
+			return logical.ErrorResponse(err.Error()), nil
+		}
+	}
+	if len(groupPolicyARNs) > 0 {
+		policyARNs = append(policyARNs, groupPolicyARNs...)
+	}
+
 	stsClient, err := b.clientSTS(ctx, s)
 	if err != nil {
 		return logical.ErrorResponse(err.Error()), nil
@@ -148,8 +181,7 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage,
 	tokenResp, err := stsClient.AssumeRole(assumeRoleInput)
 
 	if err != nil {
-		return logical.ErrorResponse(fmt.Sprintf(
-			"Error assuming role: %s", err)), awsutil.CheckAWSError(err)
+		return logical.ErrorResponse("Error assuming role: %s", err), awsutil.CheckAWSError(err)
 	}
 
 	resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{
@@ -217,8 +249,7 @@ func (b *backend) secretAccessKeysCreate(
 			iamErr := errwrap.Wrapf("error creating IAM user: {{err}}", err)
 			return nil, errwrap.Wrap(errwrap.Wrapf("failed to delete WAL entry: {{err}}", walErr), iamErr)
 		}
-		return logical.ErrorResponse(fmt.Sprintf(
-			"Error creating IAM user: %s", err)), awsutil.CheckAWSError(err)
+		return logical.ErrorResponse("Error creating IAM user: %s", err), awsutil.CheckAWSError(err)
 	}
 
 	for _, arn := range role.PolicyArns {
@@ -228,8 +259,7 @@ func (b *backend) secretAccessKeysCreate(
 			PolicyArn: aws.String(arn),
 		})
 		if err != nil {
-			return logical.ErrorResponse(fmt.Sprintf(
-				"Error attaching user policy: %s", err)), awsutil.CheckAWSError(err)
+			return logical.ErrorResponse("Error attaching user policy: %s", err), awsutil.CheckAWSError(err)
 		}
 
 	}
@@ -241,8 +271,18 @@ func (b *backend) secretAccessKeysCreate(
 			PolicyDocument: aws.String(role.PolicyDocument),
 		})
 		if err != nil {
-			return logical.ErrorResponse(fmt.Sprintf(
-				"Error putting user policy: %s", err)), awsutil.CheckAWSError(err)
+			return logical.ErrorResponse("Error putting user policy: %s", err), awsutil.CheckAWSError(err)
+		}
+	}
+
+	for _, group := range role.IAMGroups {
+		// Add user to IAM groups
+		_, err = iamClient.AddUserToGroup(&iam.AddUserToGroupInput{
+			UserName:  aws.String(username),
+			GroupName: aws.String(group),
+		})
+		if err != nil {
+			return logical.ErrorResponse("Error adding user to group: %s", err), awsutil.CheckAWSError(err)
 		}
 	}
 
@@ -251,8 +291,7 @@ func (b *backend) secretAccessKeysCreate(
 		UserName: aws.String(username),
 	})
 	if err != nil {
-		return logical.ErrorResponse(fmt.Sprintf(
-			"Error creating access keys: %s", err)), awsutil.CheckAWSError(err)
+		return logical.ErrorResponse("Error creating access keys: %s", err), awsutil.CheckAWSError(err)
 	}
 
 	// Remove the WAL entry, we succeeded! If we fail, we don't return
diff --git a/website/pages/api-docs/secret/aws/index.mdx b/website/pages/api-docs/secret/aws/index.mdx
index 52787cb36c3c..2c3da63e3661 100644
--- a/website/pages/api-docs/secret/aws/index.mdx
+++ b/website/pages/api-docs/secret/aws/index.mdx
@@ -254,6 +254,13 @@ updated with the new attributes.
   user has. With `assumed_role` and `federation_token`, the policy document will
   act as a filter on what the credentials can do, similar to `policy_arns`.
 
+- `iam_groups` `(list: [])` - A list of IAM group names. IAM users generated
+  against this vault role will be added to these IAM Groups. For a credential
+  type of `assumed_role` or `federation_token`, the policies sent to the
+  corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
+  policies from each group in `iam_groups` combined with the `policy_document`
+  and `policy_arns` parameters.
+
 - `default_sts_ttl` `(string)` - The default TTL for STS credentials. When a TTL is not
   specified when STS credentials are requested, and a default TTL is specified
   on the role, then this default TTL will be used. Valid only when
@@ -313,6 +320,15 @@ Using an ARN:
 }
 ```
 
+Using groups:
+
+```json
+{
+  "credential_type": "assumed_role",
+  "iam_groups": ["group1", "group2"]
+}
+```
+
 ## Read Role
 
 This endpoint queries an existing role by the given name. If the role does not
@@ -348,7 +364,8 @@ For an inline IAM policy:
     "policy_document": "{\"Version\": \"...\"}",
     "policy_arns": [],
     "credential_types": ["assumed_role"],
-    "role_arns": []
+    "role_arns": [],
+    "iam_groups": []
   }
 }
 ```
@@ -361,7 +378,22 @@ For a role ARN:
     "policy_document": "",
     "policy_arns": [],
     "credential_types": ["assumed_role"],
-    "role_arns": ["arn:aws:iam::123456789012:role/example-role"]
+    "role_arns": ["arn:aws:iam::123456789012:role/example-role"],
+    "iam_groups": []
+  }
+}
+```
+
+For IAM groups:
+
+```json
+{
+  "data": {
+    "policy_document": "",
+    "policy_arns": [],
+    "credential_types": ["assumed_role"],
+    "role_arns": [],
+    "iam_groups": ["group1", "group2"]
   }
 }
 ```
diff --git a/website/pages/docs/secrets/aws/index.mdx b/website/pages/docs/secrets/aws/index.mdx
index 566a94ad4573..fdceedfb6d47 100644
--- a/website/pages/docs/secrets/aws/index.mdx
+++ b/website/pages/docs/secrets/aws/index.mdx
@@ -98,11 +98,12 @@ management tool.
     document to the IAM user. Vault will then create an access key and secret
     key for the IAM user and return these credentials. You supply a
     user inline policy and/or provide references to an existing AWS policy's full
-    ARN:
+    ARN and/or a list of IAM groups:
 
     ```text
     $ vault write aws/roles/my-other-role \
         policy_arns=arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess,arn:aws:iam::aws:policy/IAMReadOnlyAccess \
+        iam_groups=group1,group2 \
         credential_type=iam_user \
         policy_document=-<<EOF
     {
@@ -177,6 +178,7 @@ permissions Vault needs:
         "iam:ListGroupsForUser",
         "iam:ListUserPolicies",
         "iam:PutUserPolicy",
+        "iam:AddUserToGroup",
         "iam:RemoveUserFromGroup"
       ],
       "Resource": ["arn:aws:iam::ACCOUNT-ID-WITHOUT-HYPHENS:user/vault-*"]
@@ -203,6 +205,7 @@ user, you can use a policy like:
         "iam:ListAttachedUserPolicies",
         "iam:ListGroupsForUser",
         "iam:ListUserPolicies",
+        "iam:AddUserToGroup",
         "iam:RemoveUserFromGroup"
       ],
       "Resource": ["arn:aws:iam::ACCOUNT-ID-WITHOUT-HYPHENS:user/vault-*"]
@@ -254,8 +257,9 @@ An STS federation token inherits a set of permissions that are the combination
 3. The managed policy ARNs configured in the Vault role
 4. An implicit deny policy on IAM or STS operations.
 
-Roles with a `credential_type` of `federation_token` can specify both a
-`policy_document` and `policy_arns` parameter in the Vault role.
+Roles with a `credential_type` of `federation_token` can specify one or more of
+the `policy_document`, `policy_arns`, and `iam_groups` parameters in the Vault
+role.
 
 The `aws/config/root` credentials require IAM permissions for
 `sts:GetFederationToken` and the permissions to delegate to the STS
@@ -373,20 +377,22 @@ specify more than one IAM role ARN. If you do so, Vault clients can select which
 role ARN they would like to assume when retrieving credentials from that role.
 
 Further, you can specify both a `policy_document` and `policy_arns` parameters;
-if specified, each acts as a
-filter on the IAM permissions granted to the assumed role. For an action to be
+if specified, each acts as a filter on the IAM permissions granted to the
+assumed role. If `iam_groups` is specified, the inline and attached policies for
+each IAM group will be added to the `policy_document` and `policy_arns`
+parameters, respectively, when calling [sts:AssumeRole]. For an action to be
 allowed, it must be permitted by both the IAM policy on the AWS role that is
 assumed, the `policy_document` specified on the Vault role (if specified), and
 the managed policies specified by the `policy_arns` parameter. (The
 `policy_document` parameter is passed in as the `Policy` parameter to the
-[sts:AssumeRole](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
-API call, while the `policy_arns` parameter is passed in as the `PolicyArns`
-parameter to the same call.)
+[sts:AssumeRole] API call, while the `policy_arns` parameter is passed in as the
+`PolicyArns` parameter to the same call.)
 
 Note: When multiple `role_arns` are specified, clients requesting credentials
 can specify any of the role ARNs that are defined on the Vault role in order to
-retrieve credentials. However, when a `policy_document` is specified, that will
-apply to ALL role credentials retrieved from AWS.
+retrieve credentials. However, when `policy_document`, `policy_arns`, or
+`iam_groups` are specified, that will apply to ALL role credentials retrieved
+from AWS.
 
 Let's create a "deploy" policy using the arn of our role to assume:
 
@@ -410,6 +416,8 @@ secret_key     	HSs0DYYYYYY9W81DXtI0K7X84H+OVZXK5BXXXX
 security_token 	AQoDYXdzEEwasAKwQyZUtZaCjVNDiXXXXXXXXgUgBBVUUbSyujLjsw6jYzboOQ89vUVIehUw/9MreAifXFmfdbjTr3g6zc0me9M+dB95DyhetFItX5QThw0lEsVQWSiIeIotGmg7mjT1//e7CJc4LpxbW707loFX1TYD1ilNnblEsIBKGlRNXZ+QJdguY4VkzXxv2urxIH0Sl14xtqsRPboV7eYruSEZlAuP3FLmqFbmA0AFPCT37cLf/vUHinSbvw49C4c9WQLH7CeFPhDub7/rub/QU/lCjjJ43IqIRo9jYgcEvvdRkQSt70zO8moGCc7pFvmL7XGhISegQpEzudErTE/PdhjlGpAKGR3d5qKrHpPYK/k480wk1Ai/t1dTa/8/3jUYTUeIkaJpNBnupQt7qoaXXXXXXXXXX
 ```
 
+[sts:AssumeRole]: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html
+
 ## Troubleshooting
 
 ### Dynamic IAM user errors

From af38517cf5e2d099b35713b2ee763096773a2cfa Mon Sep 17 00:00:00 2001
From: Theron Voran <tvoran@users.noreply.github.com>
Date: Tue, 9 Jun 2020 17:01:42 -0700
Subject: [PATCH 03/29] changelog++

---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 454284635d7c..068f3b8c624d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -21,6 +21,7 @@ IMPROVEMENTS:
 * plugin: Add SDK method, `Sys.ReloadPlugin`, and CLI command, `vault plugin reload`, 
   for reloading plugins. [[GH-8777](https://github.com/hashicorp/vault/pull/8777)]
 * sdk/framework: Support accepting TypeFloat parameters over the API [[GH-8923](https://github.com/hashicorp/vault/pull/8923)]
+* secrets/aws: Add iam_groups parameter to role create/update [[GH-8811](https://github.com/hashicorp/vault/pull/8811)]
 * secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-11](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/11)]
 * secrets/database: Allow InfluxDB to use insecure TLS without cert bundle [[GH-8778](https://github.com/hashicorp/vault/pull/8778)]
 * secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)]

From ca2309c321a85eb203697c85090f61f51dfcf608 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?F=C3=A9lix=20Mattrat?= <dysosmus@users.noreply.github.com>
Date: Wed, 10 Jun 2020 19:31:46 +0200
Subject: [PATCH 04/29] Improving transit batch encrypt and decrypt latencies
 (#8775)

Optimized batch items decoder bypassing mapstructure
---
 builtin/logical/transit/backend_test.go       |   8 +-
 builtin/logical/transit/path_decrypt.go       |   3 +-
 .../transit/path_decrypt_bench_test.go        |  88 ++++++++++++
 builtin/logical/transit/path_encrypt.go       |  95 +++++++++++--
 .../transit/path_encrypt_bench_test.go        |  68 ++++++++++
 builtin/logical/transit/path_encrypt_test.go  | 127 ++++++++++++++++++
 6 files changed, 374 insertions(+), 15 deletions(-)
 create mode 100644 builtin/logical/transit/path_decrypt_bench_test.go
 create mode 100644 builtin/logical/transit/path_encrypt_bench_test.go

diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go
index 17d44f021da3..562b6d3b9055 100644
--- a/builtin/logical/transit/backend_test.go
+++ b/builtin/logical/transit/backend_test.go
@@ -27,7 +27,7 @@ const (
 	testPlaintext = "the quick brown fox"
 )
 
-func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
+func createBackendWithStorage(t testing.TB) (*backend, logical.Storage) {
 	config := logical.TestBackendConfig()
 	config.StorageView = &logical.InmemStorage{}
 
@@ -42,7 +42,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
 	return b, config.StorageView
 }
 
-func createBackendWithSysView(t *testing.T) (*backend, logical.Storage) {
+func createBackendWithSysView(t testing.TB) (*backend, logical.Storage) {
 	sysView := logical.TestSystemView()
 	storage := &logical.InmemStorage{}
 
@@ -64,7 +64,7 @@ func createBackendWithSysView(t *testing.T) (*backend, logical.Storage) {
 	return b, storage
 }
 
-func createBackendWithSysViewWithStorage(t *testing.T, s logical.Storage) *backend {
+func createBackendWithSysViewWithStorage(t testing.TB, s logical.Storage) *backend {
 	sysView := logical.TestSystemView()
 
 	conf := &logical.BackendConfig{
@@ -85,7 +85,7 @@ func createBackendWithSysViewWithStorage(t *testing.T, s logical.Storage) *backe
 	return b
 }
 
-func createBackendWithForceNoCacheWithSysViewWithStorage(t *testing.T, s logical.Storage) *backend {
+func createBackendWithForceNoCacheWithSysViewWithStorage(t testing.TB, s logical.Storage) *backend {
 	sysView := logical.TestSystemView()
 	sysView.CachingDisabledVal = true
 
diff --git a/builtin/logical/transit/path_decrypt.go b/builtin/logical/transit/path_decrypt.go
index bd3d82541ac2..77d77f5176d1 100644
--- a/builtin/logical/transit/path_decrypt.go
+++ b/builtin/logical/transit/path_decrypt.go
@@ -9,7 +9,6 @@ import (
 	"github.com/hashicorp/vault/sdk/helper/errutil"
 	"github.com/hashicorp/vault/sdk/helper/keysutil"
 	"github.com/hashicorp/vault/sdk/logical"
-	"github.com/mitchellh/mapstructure"
 )
 
 func (b *backend) pathDecrypt() *framework.Path {
@@ -57,7 +56,7 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d
 	var batchInputItems []BatchRequestItem
 	var err error
 	if batchInputRaw != nil {
-		err = mapstructure.Decode(batchInputRaw, &batchInputItems)
+		err = decodeBatchRequestItems(batchInputRaw, &batchInputItems)
 		if err != nil {
 			return nil, errwrap.Wrapf("failed to parse batch input: {{err}}", err)
 		}
diff --git a/builtin/logical/transit/path_decrypt_bench_test.go b/builtin/logical/transit/path_decrypt_bench_test.go
new file mode 100644
index 000000000000..bc93fc5c4049
--- /dev/null
+++ b/builtin/logical/transit/path_decrypt_bench_test.go
@@ -0,0 +1,88 @@
+package transit
+
+import (
+	"context"
+	"testing"
+
+	"github.com/hashicorp/vault/sdk/logical"
+)
+
+func BenchmarkTransit_BatchDecryption1(b *testing.B) {
+	BTransit_BatchDecryption(b, 1)
+}
+
+func BenchmarkTransit_BatchDecryption10(b *testing.B) {
+	BTransit_BatchDecryption(b, 10)
+}
+
+func BenchmarkTransit_BatchDecryption50(b *testing.B) {
+	BTransit_BatchDecryption(b, 50)
+}
+
+func BenchmarkTransit_BatchDecryption100(b *testing.B) {
+	BTransit_BatchDecryption(b, 100)
+}
+
+func BenchmarkTransit_BatchDecryption1000(b *testing.B) {
+	BTransit_BatchDecryption(b, 1_000)
+}
+
+func BenchmarkTransit_BatchDecryption10000(b *testing.B) {
+	BTransit_BatchDecryption(b, 10_000)
+}
+
+func BTransit_BatchDecryption(b *testing.B, bsize int) {
+	b.StopTimer()
+
+	var resp *logical.Response
+	var err error
+
+	backend, s := createBackendWithStorage(b)
+
+	batchEncryptionInput := make([]interface{}, 0, bsize)
+	for i := 0; i < bsize; i++ {
+		batchEncryptionInput = append(
+			batchEncryptionInput,
+			map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+		)
+	}
+
+	batchEncryptionData := map[string]interface{}{
+		"batch_input": batchEncryptionInput,
+	}
+
+	batchEncryptionReq := &logical.Request{
+		Operation: logical.CreateOperation,
+		Path:      "encrypt/upserted_key",
+		Storage:   s,
+		Data:      batchEncryptionData,
+	}
+	resp, err = backend.HandleRequest(context.Background(), batchEncryptionReq)
+	if err != nil || (resp != nil && resp.IsError()) {
+		b.Fatalf("err:%v resp:%#v", err, resp)
+	}
+
+	batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
+	batchDecryptionInput := make([]interface{}, len(batchResponseItems))
+	for i, item := range batchResponseItems {
+		batchDecryptionInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext}
+	}
+	batchDecryptionData := map[string]interface{}{
+		"batch_input": batchDecryptionInput,
+	}
+
+	batchDecryptionReq := &logical.Request{
+		Operation: logical.UpdateOperation,
+		Path:      "decrypt/upserted_key",
+		Storage:   s,
+		Data:      batchDecryptionData,
+	}
+
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		resp, err = backend.HandleRequest(context.Background(), batchDecryptionReq)
+		if err != nil || (resp != nil && resp.IsError()) {
+			b.Fatalf("err:%v resp:%#v", err, resp)
+		}
+	}
+}
diff --git a/builtin/logical/transit/path_encrypt.go b/builtin/logical/transit/path_encrypt.go
index f085307620dc..414a3cbdc7d7 100644
--- a/builtin/logical/transit/path_encrypt.go
+++ b/builtin/logical/transit/path_encrypt.go
@@ -59,22 +59,22 @@ func (b *backend) pathEncrypt() *framework.Path {
 	return &framework.Path{
 		Pattern: "encrypt/" + framework.GenericNameRegex("name"),
 		Fields: map[string]*framework.FieldSchema{
-			"name": &framework.FieldSchema{
+			"name": {
 				Type:        framework.TypeString,
 				Description: "Name of the policy",
 			},
 
-			"plaintext": &framework.FieldSchema{
+			"plaintext": {
 				Type:        framework.TypeString,
 				Description: "Base64 encoded plaintext value to be encrypted",
 			},
 
-			"context": &framework.FieldSchema{
+			"context": {
 				Type:        framework.TypeString,
 				Description: "Base64 encoded context for key derivation. Required if key derivation is enabled",
 			},
 
-			"nonce": &framework.FieldSchema{
+			"nonce": {
 				Type: framework.TypeString,
 				Description: `
 Base64 encoded nonce value. Must be provided if convergent encryption is
@@ -85,7 +85,7 @@ encryption key) this nonce value is **never reused**.
 `,
 			},
 
-			"type": &framework.FieldSchema{
+			"type": {
 				Type:    framework.TypeString,
 				Default: "aes256-gcm96",
 				Description: `
@@ -94,7 +94,7 @@ When performing an upsert operation, the type of key to create. Currently,
 "aes128-gcm96" (symmetric) and "aes256-gcm96" (symmetric) are the only types supported. Defaults to "aes256-gcm96".`,
 			},
 
-			"convergent_encryption": &framework.FieldSchema{
+			"convergent_encryption": {
 				Type: framework.TypeBool,
 				Description: `
 This parameter will only be used when a key is expected to be created.  Whether
@@ -107,7 +107,7 @@ you ensure that all nonces are unique for a given context.  Failing to do so
 will severely impact the ciphertext's security.`,
 			},
 
-			"key_version": &framework.FieldSchema{
+			"key_version": {
 				Type: framework.TypeInt,
 				Description: `The version of the key to use for encryption.
 Must be 0 (for latest) or a value greater than or equal
@@ -127,6 +127,84 @@ to the min_encryption_version configured on the key.`,
 	}
 }
 
+// decodeBatchRequestItems is a fast path alternative to mapstructure.Decode to decode []BatchRequestItem.
+// It aims to behave as closely possible to the original mapstructure.Decode and will return the same errors.
+// https://github.com/hashicorp/vault/pull/8775/files#r437709722
+func decodeBatchRequestItems(src interface{}, dst *[]BatchRequestItem) error {
+	if src == nil || dst == nil {
+		return nil
+	}
+
+	items, ok := src.([]interface{})
+	if !ok {
+		return fmt.Errorf("source data must be an array or slice, got %T", src)
+	}
+
+	// Early return should happen before allocating the array if the batch is empty.
+	// However to comply with mapstructure output it's needed to allocate an empty array.
+	sitems := len(items)
+	*dst = make([]BatchRequestItem, sitems)
+	if sitems == 0 {
+		return nil
+	}
+
+	// To comply with mapstructure output the same error type is needed.
+	var errs mapstructure.Error
+
+	for i, iitem := range items {
+		item, ok := iitem.(map[string]interface{})
+		if !ok {
+			return fmt.Errorf("[%d] expected a map, got '%T'", i, iitem)
+		}
+
+		if v, has := item["context"]; has {
+			if casted, ok := v.(string); ok {
+				(*dst)[i].Context = casted
+			} else {
+				errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].context' expected type 'string', got unconvertible type '%T'", i, item["context"]))
+			}
+		}
+
+		if v, has := item["ciphertext"]; has {
+			if casted, ok := v.(string); ok {
+				(*dst)[i].Ciphertext = casted
+			} else {
+				errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].ciphertext' expected type 'string', got unconvertible type '%T'", i, item["ciphertext"]))
+			}
+		}
+
+		if v, has := item["plaintext"]; has {
+			if casted, ok := v.(string); ok {
+				(*dst)[i].Plaintext = casted
+			} else {
+				errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].plaintext' expected type 'string', got unconvertible type '%T'", i, item["plaintext"]))
+			}
+		}
+
+		if v, has := item["nonce"]; has {
+			if casted, ok := v.(string); ok {
+				(*dst)[i].Nonce = casted
+			} else {
+				errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].nonce' expected type 'string', got unconvertible type '%T'", i, item["nonce"]))
+			}
+		}
+
+		if v, has := item["key_version"]; has {
+			if casted, ok := v.(int); ok {
+				(*dst)[i].KeyVersion = casted
+			} else {
+				errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].key_version' expected type 'int', got unconvertible type '%T'", i, item["key_version"]))
+			}
+		}
+	}
+
+	if len(errs.Errors) > 0 {
+		return &errs
+	}
+
+	return nil
+}
+
 func (b *backend) pathEncryptExistenceCheck(ctx context.Context, req *logical.Request, d *framework.FieldData) (bool, error) {
 	name := d.Get("name").(string)
 	p, _, err := b.lm.GetPolicy(ctx, keysutil.PolicyRequest{
@@ -146,11 +224,10 @@ func (b *backend) pathEncryptExistenceCheck(ctx context.Context, req *logical.Re
 func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
 	name := d.Get("name").(string)
 	var err error
-
 	batchInputRaw := d.Raw["batch_input"]
 	var batchInputItems []BatchRequestItem
 	if batchInputRaw != nil {
-		err = mapstructure.Decode(batchInputRaw, &batchInputItems)
+		err = decodeBatchRequestItems(batchInputRaw, &batchInputItems)
 		if err != nil {
 			return nil, errwrap.Wrapf("failed to parse batch input: {{err}}", err)
 		}
diff --git a/builtin/logical/transit/path_encrypt_bench_test.go b/builtin/logical/transit/path_encrypt_bench_test.go
new file mode 100644
index 000000000000..e648c6e02fc3
--- /dev/null
+++ b/builtin/logical/transit/path_encrypt_bench_test.go
@@ -0,0 +1,68 @@
+package transit
+
+import (
+	"context"
+	"testing"
+
+	"github.com/hashicorp/vault/sdk/logical"
+)
+
+func BenchmarkTransit_BatchEncryption1(b *testing.B) {
+	BTransit_BatchEncryption(b, 1)
+}
+
+func BenchmarkTransit_BatchEncryption10(b *testing.B) {
+	BTransit_BatchEncryption(b, 10)
+}
+
+func BenchmarkTransit_BatchEncryption50(b *testing.B) {
+	BTransit_BatchEncryption(b, 50)
+}
+
+func BenchmarkTransit_BatchEncryption100(b *testing.B) {
+	BTransit_BatchEncryption(b, 100)
+}
+
+func BenchmarkTransit_BatchEncryption1000(b *testing.B) {
+	BTransit_BatchEncryption(b, 1_000)
+}
+
+func BenchmarkTransit_BatchEncryption10000(b *testing.B) {
+	BTransit_BatchEncryption(b, 10_000)
+}
+
+func BTransit_BatchEncryption(b *testing.B, bsize int) {
+	b.StopTimer()
+
+	var resp *logical.Response
+	var err error
+
+	backend, s := createBackendWithStorage(b)
+
+	batchEncryptionInput := make([]interface{}, 0, bsize)
+	for i := 0; i < bsize; i++ {
+		batchEncryptionInput = append(
+			batchEncryptionInput,
+			map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
+		)
+	}
+
+	batchEncryptionData := map[string]interface{}{
+		"batch_input": batchEncryptionInput,
+	}
+
+	batchEncryptionReq := &logical.Request{
+		Operation: logical.CreateOperation,
+		Path:      "encrypt/upserted_key",
+		Storage:   s,
+		Data:      batchEncryptionData,
+	}
+
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		resp, err = backend.HandleRequest(context.Background(), batchEncryptionReq)
+		if err != nil || (resp != nil && resp.IsError()) {
+			b.Fatalf("err:%v resp:%#v", err, resp)
+		}
+	}
+}
diff --git a/builtin/logical/transit/path_encrypt_test.go b/builtin/logical/transit/path_encrypt_test.go
index 283d06fad016..0b1a65846a59 100644
--- a/builtin/logical/transit/path_encrypt_test.go
+++ b/builtin/logical/transit/path_encrypt_test.go
@@ -2,6 +2,7 @@ package transit
 
 import (
 	"context"
+	"reflect"
 	"testing"
 
 	"github.com/hashicorp/vault/sdk/logical"
@@ -573,3 +574,129 @@ func TestTransit_BatchEncryptionCase12(t *testing.T) {
 		t.Fatalf("expected an error")
 	}
 }
+
+// Test that the fast path function decodeBatchRequestItems behave like mapstructure.Decode() to decode []BatchRequestItem.
+func TestTransit_decodeBatchRequestItems(t *testing.T) {
+	tests := []struct {
+		name string
+		src  interface{}
+		dest []BatchRequestItem
+	}{
+		// basic edge cases of nil values
+		{name: "nil-nil", src: nil, dest: nil},
+		{name: "nil-empty", src: nil, dest: []BatchRequestItem{}},
+		{name: "empty-nil", src: []interface{}{}, dest: nil},
+		{
+			name: "src-nil",
+			src:  []interface{}{map[string]interface{}{}},
+			dest: nil,
+		},
+		// empty src & dest
+		{
+			name: "src-dest",
+			src:  []interface{}{map[string]interface{}{}},
+			dest: []BatchRequestItem{},
+		},
+		// empty src but with already populated dest, mapstructure discard pre-populated data.
+		{
+			name: "src-dest_pre_filled",
+			src:  []interface{}{map[string]interface{}{}},
+			dest: []BatchRequestItem{{}},
+		},
+		// two test per properties to test valid and invalid input
+		{
+			name: "src_plaintext-dest",
+			src:  []interface{}{map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_plaintext_invalid-dest",
+			src:  []interface{}{map[string]interface{}{"plaintext": 666}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_ciphertext-dest",
+			src:  []interface{}{map[string]interface{}{"ciphertext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_ciphertext_invalid-dest",
+			src:  []interface{}{map[string]interface{}{"ciphertext": 666}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_key_version-dest",
+			src:  []interface{}{map[string]interface{}{"key_version": 1}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_key_version_invalid-dest",
+			src:  []interface{}{map[string]interface{}{"key_version": "666"}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_nonce-dest",
+			src:  []interface{}{map[string]interface{}{"nonce": "dGVzdGNvbnRleHQ="}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_nonce_invalid-dest",
+			src:  []interface{}{map[string]interface{}{"nonce": 666}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_context-dest",
+			src:  []interface{}{map[string]interface{}{"context": "dGVzdGNvbnRleHQ="}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_context_invalid-dest",
+			src:  []interface{}{map[string]interface{}{"context": 666}},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_multi_order-dest",
+			src: []interface{}{
+				map[string]interface{}{"context": "1"},
+				map[string]interface{}{"context": "2"},
+				map[string]interface{}{"context": "3"},
+			},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_multi_with_invalid-dest",
+			src: []interface{}{
+				map[string]interface{}{"context": "1"},
+				map[string]interface{}{"context": "2", "key_version": "666"},
+				map[string]interface{}{"context": "3"},
+			},
+			dest: []BatchRequestItem{},
+		},
+		{
+			name: "src_multi_with_multi_invalid-dest",
+			src: []interface{}{
+				map[string]interface{}{"context": "1"},
+				map[string]interface{}{"context": "2", "key_version": "666"},
+				map[string]interface{}{"context": "3", "key_version": "1337"},
+			},
+			dest: []BatchRequestItem{},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			expectedDest := append(tt.dest[:0:0], tt.dest...) // copy of the dest state
+			expectedErr := mapstructure.Decode(tt.src, &expectedDest)
+
+			gotErr := decodeBatchRequestItems(tt.src, &tt.dest)
+			gotDest := tt.dest
+
+			if !reflect.DeepEqual(expectedErr, gotErr) {
+				t.Errorf("decodeBatchRequestItems unexpected error value, want: '%v', got: '%v'", expectedErr, gotErr)
+			}
+
+			if !reflect.DeepEqual(expectedDest, gotDest) {
+				t.Errorf("decodeBatchRequestItems unexpected dest value, want: '%v', got: '%v'", expectedDest, gotDest)
+			}
+		})
+	}
+}

From fd6169bd43ad58e97acbdd131b394f1096effb46 Mon Sep 17 00:00:00 2001
From: ncabatoff <ncabatoff@hashicorp.com>
Date: Wed, 10 Jun 2020 13:33:51 -0400
Subject: [PATCH 05/29] changelog++

---
 CHANGELOG.md | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 068f3b8c624d..c57a868f52ba 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,7 +14,6 @@ CHANGES:
 
 IMPROVEMENTS:
 
-* secrets/transit: Transit requests that make use of keys now include a new field  `key_version` in their responses [[GH-9100](https://github.com/hashicorp/vault/pull/9100)]
 * core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)]
 * core: Added Password Policies for user-configurable password generation [[GH-8637](https://github.com/hashicorp/vault/pull/8637)]
 * cli: Support reading TLS parameters from file for the `vault operator raft join` command. [[GH-9060](https://github.com/hashicorp/vault/pull/9060)]
@@ -26,6 +25,8 @@ IMPROVEMENTS:
 * secrets/database: Allow InfluxDB to use insecure TLS without cert bundle [[GH-8778](https://github.com/hashicorp/vault/pull/8778)]
 * secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)]
 * secrets/pki: Allow 3072-bit RSA keys [[GH-8343](https://github.com/hashicorp/vault/pull/8343)]
+* secrets/transit: Transit requests that make use of keys now include a new field  `key_version` in their responses [[GH-8775](https://github.com/hashicorp/vault/pull/8775)]
+* secrets/transit: Improving transit batch encrypt and decrypt latencies [[GH-9100](https://github.com/hashicorp/vault/pull/9100)]
 * sentinel: Add a sentinel config section, and "additional_enabled_modules", a list of Sentinel modules that may be imported in addition to the defaults.
 * ui: Update TTL picker styling on SSH secret engine [[GH-8891](https://github.com/hashicorp/vault/pull/8891)]
 * ui: Only render the JWT input field of the Vault login form on mounts configured for JWT auth [[GH-8952](https://github.com/hashicorp/vault/pull/8952)]

From 51c1e0a5aab26eceb88d749200e3c54316364bd1 Mon Sep 17 00:00:00 2001
From: Mike Wickett <mwickett@hashicorp.com>
Date: Wed, 10 Jun 2020 16:01:23 -0400
Subject: [PATCH 06/29] website: remove whitepaper link from subnav (#9190)

---
 website/data/subnav.js | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/website/data/subnav.js b/website/data/subnav.js
index 3d94346cab67..7525c4f099b5 100644
--- a/website/data/subnav.js
+++ b/website/data/subnav.js
@@ -7,22 +7,17 @@ export default [
       { text: 'Data Encryption', url: '/use-cases/data-encryption' },
       {
         text: 'Identity-based Access',
-        url: '/use-cases/identity-based-access'
-      }
-    ]
+        url: '/use-cases/identity-based-access',
+      },
+    ],
   },
   {
     text: 'Enterprise',
-    url: 'https://www.hashicorp.com/products/vault/enterprise'
-  },
-  {
-    text: 'Whitepaper',
-    url:
-      'https://www.hashicorp.com/resources/unlocking-the-cloud-operating-model-security?utm_source=vaultsubnav'
+    url: 'https://www.hashicorp.com/products/vault/enterprise',
   },
   'divider',
   { text: 'Learn', url: 'https://learn.hashicorp.com/vault' },
   { text: 'Docs', url: '/docs' },
   { text: 'API', url: '/api-docs' },
-  { text: 'Community', url: '/community' }
+  { text: 'Community', url: '/community' },
 ]

From 164bc169b95b036202bcbbe554d9acb0dfe717a6 Mon Sep 17 00:00:00 2001
From: Austin Gebauer <34121980+austingebauer@users.noreply.github.com>
Date: Wed, 10 Jun 2020 20:53:48 -0700
Subject: [PATCH 07/29] fix: invalidate cached clients after a config change in
 the aws secrets backend (#9186)

---
 builtin/logical/aws/backend.go | 23 +++++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)

diff --git a/builtin/logical/aws/backend.go b/builtin/logical/aws/backend.go
index 95ecaba8fdbb..f73256099f21 100644
--- a/builtin/logical/aws/backend.go
+++ b/builtin/logical/aws/backend.go
@@ -12,6 +12,11 @@ import (
 	"github.com/hashicorp/vault/sdk/logical"
 )
 
+const (
+	rootConfigPath        = "config/root"
+	minAwsUserRollbackAge = 5 * time.Minute
+)
+
 func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
 	b := Backend()
 	if err := b.Setup(ctx, conf); err != nil {
@@ -47,6 +52,7 @@ func Backend() *backend {
 			secretAccessKeys(&b),
 		},
 
+		Invalidate:        b.invalidate,
 		WALRollback:       b.walRollback,
 		WALRollbackMinAge: minAwsUserRollbackAge,
 		BackendType:       logical.TypeLogical,
@@ -80,6 +86,21 @@ be configured with the "root" path and policies must be written using
 the "roles/" endpoints before any access keys can be generated.
 `
 
+func (b *backend) invalidate(ctx context.Context, key string) {
+	switch {
+	case key == rootConfigPath:
+		b.clearClients()
+	}
+}
+
+// clearClients clears the backend's IAM and STS clients
+func (b *backend) clearClients() {
+	b.clientMutex.Lock()
+	defer b.clientMutex.Unlock()
+	b.iamClient = nil
+	b.stsClient = nil
+}
+
 // clientIAM returns the configured IAM client. If nil, it constructs a new one
 // and returns it, setting it the internal variable
 func (b *backend) clientIAM(ctx context.Context, s logical.Storage) (iamiface.IAMAPI, error) {
@@ -135,5 +156,3 @@ func (b *backend) clientSTS(ctx context.Context, s logical.Storage) (stsiface.ST
 
 	return b.stsClient, nil
 }
-
-const minAwsUserRollbackAge = 5 * time.Minute

From 270f25e45f255cd1ef72cbdcf0af8fa96d5b9cf3 Mon Sep 17 00:00:00 2001
From: Austin Gebauer <34121980+austingebauer@users.noreply.github.com>
Date: Wed, 10 Jun 2020 21:37:33 -0700
Subject: [PATCH 08/29] changelog++

---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index c57a868f52ba..d12b0b580c68 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,6 +38,7 @@ BUG FIXES:
 * secrets/kv: Return the value of delete_version_after when reading kv/config, even if it is set to the default. [[GH-42](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/42)]
 * ui: Add Toggle component into core addon so it is available in KMIP and other Ember Engines.[[GH-8913]](https://github.com/hashicorp/vault/pull/8913)
 * secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9129](https://github.com/hashicorp/vault/pull/9129)]
+* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9186](https://github.com/hashicorp/vault/pull/9186)]
 
 ## 1.4.2 (May 21st, 2020)
 

From 40ca74204064ab7c724e266c79d3acb1bc8028a7 Mon Sep 17 00:00:00 2001
From: Andy Assareh <assareh@hashicorp.com>
Date: Thu, 11 Jun 2020 04:50:31 -0700
Subject: [PATCH 09/29] replacing "a key usage mode" as it is confusing (#9194)

Since the context of this page is transit and encryption keys, the use of the word "key" to mean effectively common seems ill advised. Proposing an alternative wording.
---
 website/pages/docs/secrets/transit/index.mdx | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/website/pages/docs/secrets/transit/index.mdx b/website/pages/docs/secrets/transit/index.mdx
index 0fae68c818ec..139149930ce7 100644
--- a/website/pages/docs/secrets/transit/index.mdx
+++ b/website/pages/docs/secrets/transit/index.mdx
@@ -84,7 +84,7 @@ derivation function but also by deterministically deriving a nonce. Because
 these properties differ for any combination of plaintext and ciphertext over a
 keyspace the size of 2^256, the risk of nonce reuse is near zero.
 
-This has many practical uses. A key usage mode is to allow values to be stored
+This has many practical uses. One common usage mode is to allow values to be stored
 encrypted in a database, but with limited lookup/query support, so that rows
 with the same value for a specific field can be returned from a query.
 

From de62507bd7f7588fe93269d4c1d4ff60d82aa06d Mon Sep 17 00:00:00 2001
From: ncabatoff <ncabatoff@hashicorp.com>
Date: Thu, 11 Jun 2020 08:10:13 -0400
Subject: [PATCH 10/29] Add ssh signing algorithm as a role option.   (#9096)

---
 builtin/logical/ssh/backend_test.go        | 417 ++++++++++++++++-----
 builtin/logical/ssh/path_config_ca_test.go |   8 +-
 builtin/logical/ssh/path_roles.go          |  26 +-
 builtin/logical/ssh/path_sign.go           |  28 +-
 builtin/logical/ssh/util.go                |   3 +-
 vault/testing.go                           |  95 +----
 6 files changed, 383 insertions(+), 194 deletions(-)

diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go
index 065fdf30ba3c..eb9f0d2d7273 100644
--- a/builtin/logical/ssh/backend_test.go
+++ b/builtin/logical/ssh/backend_test.go
@@ -1,9 +1,10 @@
 package ssh
 
 import (
+	"bytes"
 	"context"
 	"fmt"
-	"os/user"
+	"net"
 	"reflect"
 	"strconv"
 	"testing"
@@ -17,28 +18,27 @@ import (
 	"strings"
 
 	"github.com/hashicorp/vault/api"
+	"github.com/hashicorp/vault/helper/testhelpers/docker"
 	logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical"
 	"github.com/hashicorp/vault/sdk/logical"
 	"github.com/hashicorp/vault/vault"
 	"github.com/mitchellh/mapstructure"
+	"github.com/ory/dockertest"
 )
 
-// Before the following tests are run, a username going by the name 'vaultssh' has
-// to be created and its ~/.ssh/authorized_keys file should contain the below key.
-//
-// ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT
-
 const (
-	testIP               = "127.0.0.1"
-	testUserName         = "vaultssh"
-	testAdminUser        = "vaultssh"
-	testOTPKeyType       = "otp"
-	testDynamicKeyType   = "dynamic"
-	testCIDRList         = "127.0.0.1/32"
-	testAtRoleName       = "test@RoleName"
-	testDynamicRoleName  = "testDynamicRoleName"
-	testOTPRoleName      = "testOTPRoleName"
-	testKeyName          = "testKeyName"
+	testIP              = "127.0.0.1"
+	testUserName        = "vaultssh"
+	testAdminUser       = "vaultssh"
+	testOTPKeyType      = "otp"
+	testDynamicKeyType  = "dynamic"
+	testCIDRList        = "127.0.0.1/32"
+	testAtRoleName      = "test@RoleName"
+	testDynamicRoleName = "testDynamicRoleName"
+	testOTPRoleName     = "testOTPRoleName"
+	// testKeyName is the name of the entry that will be written to SSHMOUNTPOINT/ssh/keys
+	testKeyName = "testKeyName"
+	// testSharedPrivateKey is the value of the entry that will be written to SSHMOUNTPOINT/ssh/keys
 	testSharedPrivateKey = `
 -----BEGIN RSA PRIVATE KEY-----
 MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG
@@ -68,15 +68,15 @@ oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
 +B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs=
 -----END RSA PRIVATE KEY-----
 `
-	// Public half of `privateKey`, identical to how it would be fed in from a file
-	publicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArgK0ilRRfk8E7HIsjz5l3BuxmwpDd8DHRCVfOhbZ4gOSVxjEOOqBwWGjygdboBIZwFXmwDlU6sWX0hBJAgpQz0Cjvbjxtq/NjkvATrYPgnrXUhTaEn2eQO0PsqRNSFH46SK/oJfTp0q8/WgojxWJ2L7FUV8PO8uIk49DzqAqPV7WXU63vFsjx+3WQOX/ILeQvHCvaqs3dWjjzEoDudRWCOdUqcHEOshV9azIzPrXlQVzRV3QAKl6u7pC+/Secorpwt6IHpMKoVPGiR0tMMuNOVH8zrAKzIxPGfy2WmNDpJopbXMTvSOGAqNcp49O4SKOQl9Fzfq2HEevJamKLrMB dummy@example.com
+	// Public half of `testCAPrivateKey`, identical to how it would be fed in from a file
+	testCAPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArgK0ilRRfk8E7HIsjz5l3BuxmwpDd8DHRCVfOhbZ4gOSVxjEOOqBwWGjygdboBIZwFXmwDlU6sWX0hBJAgpQz0Cjvbjxtq/NjkvATrYPgnrXUhTaEn2eQO0PsqRNSFH46SK/oJfTp0q8/WgojxWJ2L7FUV8PO8uIk49DzqAqPV7WXU63vFsjx+3WQOX/ILeQvHCvaqs3dWjjzEoDudRWCOdUqcHEOshV9azIzPrXlQVzRV3QAKl6u7pC+/Secorpwt6IHpMKoVPGiR0tMMuNOVH8zrAKzIxPGfy2WmNDpJopbXMTvSOGAqNcp49O4SKOQl9Fzfq2HEevJamKLrMB dummy@example.com
 `
 	publicKey2 = `AAAAB3NzaC1yc2EAAAADAQABAAABAQDArgK0ilRRfk8E7HIsjz5l3BuxmwpDd8DHRCVfOhbZ4gOSVxjEOOqBwWGjygdboBIZwFXmwDlU6sWX0hBJAgpQz0Cjvbjxtq/NjkvATrYPgnrXUhTaEn2eQO0PsqRNSFH46SK/oJfTp0q8/WgojxWJ2L7FUV8PO8uIk49DzqAqPV7WXU63vFsjx+3WQOX/ILeQvHCvaqs3dWjjzEoDudRWCOdUqcHEOshV9azIzPrXlQVzRV3QAKl6u7pC+/Secorpwt6IHpMKoVPGiR0tMMuNOVH8zrAKzIxPGfy2WmNDpJopbXMTvSOGAqNcp49O4SKOQl9Fzfq2HEevJamKLrMB
 `
 
 	publicKey4096 = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC54Oj4YCFDYxYv69Q9KfU6rWYtUB1eByQdUW0nXFi/vr98QUIV77sEeUVhaQzZcuCojAi/GrloW7ta0Z2DaEv5jOQMAnGpXBcqLJsz3KdrHbpvl93MPNdmNaGPU0GnUEsjBVuDVn9HdIUa8CNrxShvPu7/VqoaRHKLqphGgzFb37vi4qvnQ+5VYAO/TzyVYMD6qJX6I/9Pw8d74jCfEdOh2yGKkP7rXWOghreyIl8H2zTJKg9KoZuPq9F5M8nNt7Oi3rf+DwQiYvamzIqlDP4s5oFVTZW0E9lwWvYDpyiJnUrkQqksebBK/rcyfiFG3onb4qLo2WVWXeK3si8IhGik/TEzprScyAWIf9RviT8O+l5hTA2/c+ctn3MVCLRNfez2lKpdxCoprv1MbIcySGWblTJEcY6RA+aauVJpu7FMtRxHHtZKtMpep8cLu8GKbiP6Ifq2JXBtXtNxDeIgo2MkNoMh/NHAsACJniE/dqV/+u9HvhvgrTbJ69ell0nE4ivzA7O4kZgbR/4MHlLgLFvaqC8RrWRLY6BdFagPIMxghWha7Qw16zqoIjRnolvRzUWvSXanJVg8Z6ua1VxwgirNaAH1ivmJhUh2+4lNxCX6jmZyR3zjJsWY03gjJTairvI762opjjalF8fH6Xrs15mB14JiAlNbk6+5REQcvXlGqw== dummy@example.com`
 
-	privateKey = `-----BEGIN RSA PRIVATE KEY-----
+	testCAPrivateKey = `-----BEGIN RSA PRIVATE KEY-----
 MIIEowIBAAKCAQEAwK4CtIpUUX5PBOxyLI8+ZdwbsZsKQ3fAx0QlXzoW2eIDklcY
 xDjqgcFho8oHW6ASGcBV5sA5VOrFl9IQSQIKUM9Ao7248bavzY5LwE62D4J611IU
 2hJ9nkDtD7KkTUhR+Okiv6CX06dKvP1oKI8Vidi+xVFfDzvLiJOPQ86gKj1e1l1O
@@ -104,8 +104,95 @@ lObH9Faf0WGdnACZvTz22U9gWhw79S0SpDV31tC5Kl8dXHFiZ09vYUKkYmSd/kms
 SeKWrUkryx46LVf6NMhkyYmRqCEjBwfOozzezi5WbiJy6nn54GQt
 -----END RSA PRIVATE KEY-----
 `
+
+	// testPublicKeyInstall is the public key that is installed in the
+	// admin account's authorized_keys
+	testPublicKeyInstall = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT"
+
+	dockerImageTagSupportsRSA   = "8.1_p1-r0-ls20"
+	dockerImageTagSupportsNoRSA = "8.3_p1-r0-ls21"
 )
 
+func prepareTestContainer(t *testing.T, tag, caPublicKeyPEM string) (func(), string) {
+	pool, err := dockertest.NewPool("")
+	if err != nil {
+		t.Fatalf("Failed to connect to docker: %s", err)
+	}
+
+	signer, err := ssh.ParsePrivateKey([]byte(testSharedPrivateKey))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if tag == "" {
+		tag = dockerImageTagSupportsNoRSA
+	}
+	resource, err := pool.RunWithOptions(&dockertest.RunOptions{
+		Repository: "linuxserver/openssh-server",
+		Tag:        tag,
+		Env: []string{
+			"DOCKER_MODS=linuxserver/mods:openssh-server-openssh-client",
+			"PUBLIC_KEY=" + testPublicKeyInstall,
+			"SUDO_ACCESS=true",
+			"USER_NAME=vaultssh",
+		},
+		ExposedPorts: []string{"2222/tcp"},
+	})
+	if err != nil {
+		t.Fatalf("Could not start local ssh docker container: %s", err)
+	}
+
+	cleanup := func() {
+		docker.CleanupResource(t, pool, resource)
+	}
+
+	sshAddress := fmt.Sprintf("127.0.0.1:%s", resource.GetPort("2222/tcp"))
+
+	// exponential backoff-retry
+	pool.MaxWait = 10 * time.Second
+	if err = pool.Retry(func() error {
+		// Install util-linux for non-busybox flock that supports timeout option
+		return testSSH(t, "vaultssh", sshAddress, ssh.PublicKeys(signer), fmt.Sprintf(`
+			set -e; 
+			sudo ln -s /config /home/vaultssh
+			sudo apk add util-linux;
+			echo "LogLevel DEBUG" | sudo tee -a /config/ssh_host_keys/sshd_config;
+			echo "TrustedUserCAKeys /config/ssh_host_keys/trusted-user-ca-keys.pem" | sudo tee -a /config/ssh_host_keys/sshd_config;
+			kill -HUP $(cat /config/sshd.pid)
+			echo "%s" | sudo tee /config/ssh_host_keys/trusted-user-ca-keys.pem
+		`, caPublicKeyPEM))
+	}); err != nil {
+		cleanup()
+		t.Fatalf("Could not connect to SSH docker container: %s", err)
+	}
+
+	return cleanup, sshAddress
+}
+
+func testSSH(t *testing.T, user, host string, auth ssh.AuthMethod, command string) error {
+	client, err := ssh.Dial("tcp", host, &ssh.ClientConfig{
+		User:            user,
+		Auth:            []ssh.AuthMethod{auth},
+		HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+		Timeout:         5 * time.Second,
+	})
+	if err != nil {
+		return fmt.Errorf("unable to dial sshd to host %q: %v", host, err)
+	}
+	session, err := client.NewSession()
+	if err != nil {
+		return fmt.Errorf("unable to create sshd session to host %q: %v", host, err)
+	}
+	var stderr bytes.Buffer
+	session.Stderr = &stderr
+	defer session.Close()
+	err = session.Run(command)
+	if err != nil {
+		t.Logf("command %v failed, error: %v, stderr: %v", command, err, stderr.String())
+	}
+	return err
+}
+
 func TestBackend_allowed_users(t *testing.T) {
 	config := logical.TestBackendConfig()
 	config.StorageView = &logical.InmemStorage{}
@@ -220,21 +307,19 @@ func TestBackend_allowed_users(t *testing.T) {
 	}
 }
 
-func testingFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
-	_, err := vault.StartSSHHostTestServer()
-	if err != nil {
-		panic(fmt.Sprintf("error starting mock server:%s", err))
-	}
-	defaultLeaseTTLVal := 2 * time.Minute
-	maxLeaseTTLVal := 10 * time.Minute
-	return Factory(context.Background(), &logical.BackendConfig{
-		Logger:      nil,
-		StorageView: &logical.InmemStorage{},
-		System: &logical.StaticSystemView{
-			DefaultLeaseTTLVal: defaultLeaseTTLVal,
-			MaxLeaseTTLVal:     maxLeaseTTLVal,
-		},
-	})
+func newTestingFactory(t *testing.T) func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
+	return func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
+		defaultLeaseTTLVal := 2 * time.Minute
+		maxLeaseTTLVal := 10 * time.Minute
+		return Factory(context.Background(), &logical.BackendConfig{
+			Logger:      vault.NewTestLogger(t),
+			StorageView: &logical.InmemStorage{},
+			System: &logical.StaticSystemView{
+				DefaultLeaseTTLVal: defaultLeaseTTLVal,
+				MaxLeaseTTLVal:     maxLeaseTTLVal,
+			},
+		})
+	}
 }
 
 func TestSSHBackend_Lookup(t *testing.T) {
@@ -259,8 +344,7 @@ func TestSSHBackend_Lookup(t *testing.T) {
 	resp4 := []string{testDynamicRoleName}
 	resp5 := []string{testAtRoleName}
 	logicaltest.Test(t, logicaltest.TestCase{
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testLookupRead(t, data, resp1),
 			testRoleWrite(t, testOTPRoleName, testOTPRoleData),
@@ -307,7 +391,7 @@ func TestSSHBackend_RoleList(t *testing.T) {
 		},
 	}
 	logicaltest.Test(t, logicaltest.TestCase{
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testRoleList(t, resp1),
 			testRoleWrite(t, testOTPRoleName, testOTPRoleData),
@@ -323,27 +407,34 @@ func TestSSHBackend_RoleList(t *testing.T) {
 }
 
 func TestSSHBackend_DynamicKeyCreate(t *testing.T) {
+	cleanup, sshAddress := prepareTestContainer(t, "", "")
+	defer cleanup()
+
+	host, port, err := net.SplitHostPort(sshAddress)
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	testDynamicRoleData := map[string]interface{}{
 		"key_type":     testDynamicKeyType,
 		"key":          testKeyName,
 		"admin_user":   testAdminUser,
 		"default_user": testAdminUser,
 		"cidr_list":    testCIDRList,
+		"port":         port,
 	}
 	data := map[string]interface{}{
 		"username": testUserName,
-		"ip":       testIP,
+		"ip":       host,
 	}
 	logicaltest.Test(t, logicaltest.TestCase{
-		PreCheck:       testAccUserPrecheckFunc(t),
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
 			testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
-			testCredsWrite(t, testDynamicRoleName, data, false),
+			testCredsWrite(t, testDynamicRoleName, data, false, sshAddress),
 			testRoleWrite(t, testAtRoleName, testDynamicRoleData),
-			testCredsWrite(t, testAtRoleName, data, false),
+			testCredsWrite(t, testAtRoleName, data, false, sshAddress),
 		},
 	})
 }
@@ -361,8 +452,7 @@ func TestSSHBackend_OTPRoleCrud(t *testing.T) {
 		"cidr_list":    testCIDRList,
 	}
 	logicaltest.Test(t, logicaltest.TestCase{
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testRoleWrite(t, testOTPRoleName, testOTPRoleData),
 			testRoleRead(t, testOTPRoleName, respOTPRoleData),
@@ -395,8 +485,7 @@ func TestSSHBackend_DynamicRoleCrud(t *testing.T) {
 		"key_type":       testDynamicKeyType,
 	}
 	logicaltest.Test(t, logicaltest.TestCase{
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
 			testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
@@ -413,8 +502,7 @@ func TestSSHBackend_DynamicRoleCrud(t *testing.T) {
 
 func TestSSHBackend_NamedKeysCrud(t *testing.T) {
 	logicaltest.Test(t, logicaltest.TestCase{
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
 			testNamedKeysDelete(t),
@@ -423,23 +511,33 @@ func TestSSHBackend_NamedKeysCrud(t *testing.T) {
 }
 
 func TestSSHBackend_OTPCreate(t *testing.T) {
+	cleanup, sshAddress := prepareTestContainer(t, "", "")
+	defer func() {
+		if !t.Failed() {
+			cleanup()
+		}
+	}()
+
+	host, port, err := net.SplitHostPort(sshAddress)
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	testOTPRoleData := map[string]interface{}{
 		"key_type":     testOTPKeyType,
 		"default_user": testUserName,
 		"cidr_list":    testCIDRList,
+		"port":         port,
 	}
 	data := map[string]interface{}{
 		"username": testUserName,
-		"ip":       testIP,
+		"ip":       host,
 	}
 	logicaltest.Test(t, logicaltest.TestCase{
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testRoleWrite(t, testOTPRoleName, testOTPRoleData),
-			testCredsWrite(t, testOTPRoleName, data, false),
-			testRoleWrite(t, testAtRoleName, testOTPRoleData),
-			testCredsWrite(t, testAtRoleName, data, false),
+			testCredsWrite(t, testOTPRoleName, data, false, sshAddress),
 		},
 	})
 }
@@ -452,8 +550,7 @@ func TestSSHBackend_VerifyEcho(t *testing.T) {
 		"message": api.VerifyEchoResponse,
 	}
 	logicaltest.Test(t, logicaltest.TestCase{
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testVerifyWrite(t, verifyData, expectedData),
 		},
@@ -490,8 +587,7 @@ func TestSSHBackend_ConfigZeroAddressCRUD(t *testing.T) {
 	}
 
 	logicaltest.Test(t, logicaltest.TestCase{
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testRoleWrite(t, testOTPRoleName, testOTPRoleData),
 			testConfigZeroAddressWrite(t, req1),
@@ -522,49 +618,184 @@ func TestSSHBackend_CredsForZeroAddressRoles_otp(t *testing.T) {
 		"roles": testOTPRoleName,
 	}
 	logicaltest.Test(t, logicaltest.TestCase{
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testRoleWrite(t, testOTPRoleName, otpRoleData),
-			testCredsWrite(t, testOTPRoleName, data, true),
+			testCredsWrite(t, testOTPRoleName, data, true, ""),
 			testConfigZeroAddressWrite(t, req1),
-			testCredsWrite(t, testOTPRoleName, data, false),
+			testCredsWrite(t, testOTPRoleName, data, false, ""),
 			testConfigZeroAddressDelete(t),
-			testCredsWrite(t, testOTPRoleName, data, true),
+			testCredsWrite(t, testOTPRoleName, data, true, ""),
 		},
 	})
 }
 
 func TestSSHBackend_CredsForZeroAddressRoles_dynamic(t *testing.T) {
+	cleanup, sshAddress := prepareTestContainer(t, "", "")
+	defer cleanup()
+
+	host, port, err := net.SplitHostPort(sshAddress)
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	dynamicRoleData := map[string]interface{}{
 		"key_type":     testDynamicKeyType,
 		"key":          testKeyName,
 		"admin_user":   testAdminUser,
 		"default_user": testAdminUser,
+		"port":         port,
 	}
 	data := map[string]interface{}{
 		"username": testUserName,
-		"ip":       testIP,
+		"ip":       host,
 	}
 	req2 := map[string]interface{}{
 		"roles": testDynamicRoleName,
 	}
 	logicaltest.Test(t, logicaltest.TestCase{
-		PreCheck:       testAccUserPrecheckFunc(t),
-		AcceptanceTest: true,
-		LogicalFactory: testingFactory,
+		LogicalFactory: newTestingFactory(t),
 		Steps: []logicaltest.TestStep{
 			testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
 			testRoleWrite(t, testDynamicRoleName, dynamicRoleData),
-			testCredsWrite(t, testDynamicRoleName, data, true),
+			testCredsWrite(t, testDynamicRoleName, data, true, sshAddress),
 			testConfigZeroAddressWrite(t, req2),
-			testCredsWrite(t, testDynamicRoleName, data, false),
+			testCredsWrite(t, testDynamicRoleName, data, false, sshAddress),
 			testConfigZeroAddressDelete(t),
-			testCredsWrite(t, testDynamicRoleName, data, true),
+			testCredsWrite(t, testDynamicRoleName, data, true, sshAddress),
 		},
 	})
 }
 
+func TestSSHBackend_CA(t *testing.T) {
+	testCases := []struct {
+		name        string
+		tag         string
+		algoSigner  string
+		expectError bool
+	}{
+		{"defaultSignerSSHDSupport", dockerImageTagSupportsRSA, "", false},
+		{"rsaSignerSSHDSupport", dockerImageTagSupportsRSA, ssh.SigAlgoRSA, false},
+		{"rsa2SignerSSHDSupport", dockerImageTagSupportsRSA, ssh.SigAlgoRSASHA2256, false},
+		{"rsa2SignerNoSSHDSupport", dockerImageTagSupportsNoRSA, ssh.SigAlgoRSASHA2256, false},
+		{"defaultSignerNoSSHDSupport", dockerImageTagSupportsNoRSA, "", true},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			testSSHBackend_CA(t, tc.tag, tc.algoSigner, tc.expectError)
+		})
+	}
+}
+
+func testSSHBackend_CA(t *testing.T, dockerImageTag, algorithmSigner string, expectError bool) {
+	cleanup, sshAddress := prepareTestContainer(t, dockerImageTag, testCAPublicKey)
+	defer cleanup()
+
+	config := logical.TestBackendConfig()
+
+	b, err := Factory(context.Background(), config)
+	if err != nil {
+		t.Fatalf("Cannot create backend: %s", err)
+	}
+
+	testKeyToSignPrivate := `-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAQEAwn1V2xd/EgJXIY53fBTtc20k/ajekqQngvkpFSwNHW63XNEQK8Ll
+FOCyGXoje9DUGxnYs3F/ohfsBBWkLNfU7fiENdSJL1pbkAgJ+2uhV9sLZjvYhikrXWoyJX
+LDKfY12LjpcBS2HeLMT04laZ/xSJrOBEJHGzHyr2wUO0NUQUQPUODAFhnHKgvvA4Uu79UY
+gcdThF4w83+EAnE4JzBZMKPMjzy4u1C0R/LoD8DuapHwX6NGWdEUvUZZ+XRcIWeCOvR0ne
+qGBRH35k1Mv7k65d7kkE0uvM5Z36erw3tdoszxPYf7AKnO1DpeU2uwMcym6xNwfwynKjhL
+qL/Mgi4uRwAAA8iAsY0zgLGNMwAAAAdzc2gtcnNhAAABAQDCfVXbF38SAlchjnd8FO1zbS
+T9qN6SpCeC+SkVLA0dbrdc0RArwuUU4LIZeiN70NQbGdizcX+iF+wEFaQs19Tt+IQ11Ikv
+WluQCAn7a6FX2wtmO9iGKStdajIlcsMp9jXYuOlwFLYd4sxPTiVpn/FIms4EQkcbMfKvbB
+Q7Q1RBRA9Q4MAWGccqC+8DhS7v1RiBx1OEXjDzf4QCcTgnMFkwo8yPPLi7ULRH8ugPwO5q
+kfBfo0ZZ0RS9Rln5dFwhZ4I69HSd6oYFEffmTUy/uTrl3uSQTS68zlnfp6vDe12izPE9h/
+sAqc7UOl5Ta7AxzKbrE3B/DKcqOEuov8yCLi5HAAAAAwEAAQAAAQABns2yT5XNbpuPOgKg
+1APObGBchKWmDxwNKUpAVOefEScR7OP3mV4TOHQDZlMZWvoJZ8O4av+nOA/NUOjXPs0VVn
+azhBvIezY8EvUSVSk49Cg6J9F7/KfR1WqpiTU7CkQUlCXNuz5xLUyKdJo3MQ/vjOqeenbh
+MR9Wes4IWF1BVe4VOD6lxRsjwuIieIgmScW28FFh2rgsEfO2spzZ3AWOGExw+ih757hFz5
+4A2fhsQXP8m3r8m7iiqcjTLWXdxTUk4zot2kZEjbI4Avk0BL+wVeFq6f/y+G+g5edqSo7j
+uuSgzbUQtA9PMnGxhrhU2Ob7n3VGdya7WbGZkaKP8zJhAAAAgQC3bJurmOSLIi3KVhp7lD
+/FfxwXHwVBFALCgq7EyNlkTz6RDoMFM4eOTRMDvsgWxT+bSB8R8eg1sfgY8rkHOuvTAVI5
+3oEYco3H7NWE9X8Zt0lyhO1uaE49EENNSQ8hY7R3UIw5becyI+7ZZxs9HkBgCQCZzSjzA+
+SIyAoMKM261AAAAIEA+PCkcDRp3J0PaoiuetXSlWZ5WjP3CtwT2xrvEX9x+ZsDgXCDYQ5T
+osxvEKOGSfIrHUUhzZbFGvqWyfrziPe9ypJrtCM7RJT/fApBXnbWFcDZzWamkQvohst+0w
+XHYCmNoJ6/Y+roLv3pzyFUmqRNcrQaohex7TZmsvHJT513UakAAACBAMgBXxH8DyNYdniX
+mIXEto4GqMh4rXdNwCghfpyWdJE6vCyDt7g7bYMq7AQ2ynSKRtQDT/ZgQNfSbilUq3iXz7
+xNZn5U9ndwFs90VmEpBup/PmhfX+Gwt5hQZLbkKZcgQ9XrhSKdMxVm1yy/fk0U457enlz5
+cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA==
+-----END OPENSSH PRIVATE KEY-----
+`
+	testKeyToSignPublic := `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDCfVXbF38SAlchjnd8FO1zbST9qN6SpCeC+SkVLA0dbrdc0RArwuUU4LIZeiN70NQbGdizcX+iF+wEFaQs19Tt+IQ11IkvWluQCAn7a6FX2wtmO9iGKStdajIlcsMp9jXYuOlwFLYd4sxPTiVpn/FIms4EQkcbMfKvbBQ7Q1RBRA9Q4MAWGccqC+8DhS7v1RiBx1OEXjDzf4QCcTgnMFkwo8yPPLi7ULRH8ugPwO5qkfBfo0ZZ0RS9Rln5dFwhZ4I69HSd6oYFEffmTUy/uTrl3uSQTS68zlnfp6vDe12izPE9h/sAqc7UOl5Ta7AxzKbrE3B/DKcqOEuov8yCLi5H `
+
+	roleOptions := map[string]interface{}{
+		"allow_user_certificates": true,
+		"allowed_users":           "*",
+		"default_extensions": []map[string]string{
+			{
+				"permit-pty": "",
+			},
+		},
+		"key_type":     "ca",
+		"default_user": testUserName,
+		"ttl":          "30m0s",
+	}
+	if algorithmSigner != "" {
+		roleOptions["algorithm_signer"] = algorithmSigner
+	}
+	testCase := logicaltest.TestCase{
+		LogicalBackend: b,
+		Steps: []logicaltest.TestStep{
+			configCaStep(),
+			testRoleWrite(t, "testcarole", roleOptions),
+			logicaltest.TestStep{
+				Operation: logical.UpdateOperation,
+				Path:      "sign/testcarole",
+				ErrorOk:   expectError,
+				Data: map[string]interface{}{
+					"public_key":       testKeyToSignPublic,
+					"valid_principals": testUserName,
+				},
+
+				Check: func(resp *logical.Response) error {
+
+					signedKey := strings.TrimSpace(resp.Data["signed_key"].(string))
+					if signedKey == "" {
+						return errors.New("no signed key in response")
+					}
+
+					privKey, err := ssh.ParsePrivateKey([]byte(testKeyToSignPrivate))
+					if err != nil {
+						return fmt.Errorf("error parsing private key: %v", err)
+					}
+
+					parsedKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(signedKey))
+					if err != nil {
+						return fmt.Errorf("error parsing signed key: %v", err)
+					}
+					certSigner, err := ssh.NewCertSigner(parsedKey.(*ssh.Certificate), privKey)
+					if err != nil {
+						return err
+					}
+
+					err = testSSH(t, testUserName, sshAddress, ssh.PublicKeys(certSigner), "date")
+					if expectError && err == nil {
+						return fmt.Errorf("expected error but got none")
+					}
+					if !expectError && err != nil {
+						return err
+					}
+
+					return nil
+				},
+			},
+		},
+	}
+
+	logicaltest.Test(t, testCase)
+}
+
 func TestBackend_AbleToRetrievePublicKey(t *testing.T) {
 
 	config := logical.TestBackendConfig()
@@ -588,8 +819,8 @@ func TestBackend_AbleToRetrievePublicKey(t *testing.T) {
 
 					key := string(resp.Data["http_raw_body"].([]byte))
 
-					if key != publicKey {
-						return fmt.Errorf("public_key incorrect. Expected %v, actual %v", publicKey, key)
+					if key != testCAPublicKey {
+						return fmt.Errorf("public_key incorrect. Expected %v, actual %v", testCAPublicKey, key)
 					}
 
 					return nil
@@ -610,12 +841,20 @@ func TestBackend_AbleToAutoGenerateSigningKeys(t *testing.T) {
 		t.Fatalf("Cannot create backend: %s", err)
 	}
 
+	var expectedPublicKey string
 	testCase := logicaltest.TestCase{
 		LogicalBackend: b,
 		Steps: []logicaltest.TestStep{
 			logicaltest.TestStep{
 				Operation: logical.UpdateOperation,
 				Path:      "config/ca",
+				Check: func(resp *logical.Response) error {
+					if resp.Data["public_key"].(string) == "" {
+						return fmt.Errorf("public_key empty")
+					}
+					expectedPublicKey = resp.Data["public_key"].(string)
+					return nil
+				},
 			},
 
 			logicaltest.TestStep{
@@ -630,6 +869,9 @@ func TestBackend_AbleToAutoGenerateSigningKeys(t *testing.T) {
 					if key == "" {
 						return fmt.Errorf("public_key empty. Expected not empty, actual %s", key)
 					}
+					if key != expectedPublicKey {
+						return fmt.Errorf("public_key mismatch. Expected %s, actual %s", expectedPublicKey, key)
+					}
 
 					return nil
 				},
@@ -753,7 +995,7 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) {
 				Operation: logical.UpdateOperation,
 				Path:      "sign/weakkey",
 				Data: map[string]interface{}{
-					"public_key": publicKey,
+					"public_key": testCAPublicKey,
 				},
 				ErrorOk: true,
 				Check: func(resp *logical.Response) error {
@@ -775,7 +1017,7 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) {
 				Operation: logical.UpdateOperation,
 				Path:      "sign/stdkey",
 				Data: map[string]interface{}{
-					"public_key": publicKey,
+					"public_key": testCAPublicKey,
 				},
 			},
 			// Fail with 4096 key
@@ -892,8 +1134,8 @@ func configCaStep() logicaltest.TestStep {
 		Operation: logical.UpdateOperation,
 		Path:      "config/ca",
 		Data: map[string]interface{}{
-			"public_key":  publicKey,
-			"private_key": privateKey,
+			"public_key":  testCAPublicKey,
+			"private_key": testCAPrivateKey,
 		},
 	}
 }
@@ -992,7 +1234,7 @@ func validateSSHCertificate(cert *ssh.Certificate, keyID string, certType int, v
 }
 
 func getSigningPublicKey() (ssh.PublicKey, error) {
-	key, err := base64.StdEncoding.DecodeString(strings.Split(publicKey, " ")[1])
+	key, err := base64.StdEncoding.DecodeString(strings.Split(testCAPublicKey, " ")[1])
 	if err != nil {
 		return nil, err
 	}
@@ -1167,12 +1409,12 @@ func testRoleDelete(t *testing.T, name string) logicaltest.TestStep {
 	}
 }
 
-func testCredsWrite(t *testing.T, roleName string, data map[string]interface{}, expectError bool) logicaltest.TestStep {
+func testCredsWrite(t *testing.T, roleName string, data map[string]interface{}, expectError bool, address string) logicaltest.TestStep {
 	return logicaltest.TestStep{
 		Operation: logical.UpdateOperation,
 		Path:      fmt.Sprintf("creds/%s", roleName),
 		Data:      data,
-		ErrorOk:   true,
+		ErrorOk:   expectError,
 		Check: func(resp *logical.Response) error {
 			if resp == nil {
 				return fmt.Errorf("response is nil")
@@ -1192,7 +1434,7 @@ func testCredsWrite(t *testing.T, roleName string, data map[string]interface{},
 				}
 				return nil
 			}
-			if roleName == testDynamicRoleName {
+			if roleName == testDynamicRoleName || roleName == testAtRoleName {
 				var d struct {
 					Key string `mapstructure:"key"`
 				}
@@ -1203,10 +1445,13 @@ func testCredsWrite(t *testing.T, roleName string, data map[string]interface{},
 					return fmt.Errorf("generated key is an empty string")
 				}
 				// Checking only for a parsable key
-				_, err := ssh.ParsePrivateKey([]byte(d.Key))
+				privKey, err := ssh.ParsePrivateKey([]byte(d.Key))
 				if err != nil {
 					return fmt.Errorf("generated key is invalid")
 				}
+				if err := testSSH(t, data["username"].(string), address, ssh.PublicKeys(privKey), "date"); err != nil {
+					return fmt.Errorf("unable to SSH with new key (%s): %w", d.Key, err)
+				}
 			} else {
 				if resp.Data["key_type"] != KeyTypeOTP {
 					return fmt.Errorf("incorrect key_type")
@@ -1219,11 +1464,3 @@ func testCredsWrite(t *testing.T, roleName string, data map[string]interface{},
 		},
 	}
 }
-
-func testAccUserPrecheckFunc(t *testing.T) func() {
-	return func() {
-		if _, err := user.Lookup(testUserName); err != nil {
-			t.Skipf("Acceptance test skipped unless user %q is present", testUserName)
-		}
-	}
-}
diff --git a/builtin/logical/ssh/path_config_ca_test.go b/builtin/logical/ssh/path_config_ca_test.go
index 6fac8eb99043..bd2c967b7f20 100644
--- a/builtin/logical/ssh/path_config_ca_test.go
+++ b/builtin/logical/ssh/path_config_ca_test.go
@@ -26,7 +26,7 @@ func TestSSH_ConfigCAStorageUpgrade(t *testing.T) {
 	// Store at an older path
 	err = config.StorageView.Put(context.Background(), &logical.StorageEntry{
 		Key:   caPrivateKeyStoragePathDeprecated,
-		Value: []byte(privateKey),
+		Value: []byte(testCAPrivateKey),
 	})
 	if err != nil {
 		t.Fatal(err)
@@ -60,7 +60,7 @@ func TestSSH_ConfigCAStorageUpgrade(t *testing.T) {
 	// Store at an older path
 	err = config.StorageView.Put(context.Background(), &logical.StorageEntry{
 		Key:   caPublicKeyStoragePathDeprecated,
-		Value: []byte(publicKey),
+		Value: []byte(testCAPublicKey),
 	})
 	if err != nil {
 		t.Fatal(err)
@@ -133,8 +133,8 @@ func TestSSH_ConfigCAUpdateDelete(t *testing.T) {
 
 	caReq.Operation = logical.UpdateOperation
 	caReq.Data = map[string]interface{}{
-		"public_key":  publicKey,
-		"private_key": privateKey,
+		"public_key":  testCAPublicKey,
+		"private_key": testCAPrivateKey,
 	}
 
 	// Successfully create a new one
diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go
index 0e539b40a390..dd2430f9cfa5 100644
--- a/builtin/logical/ssh/path_roles.go
+++ b/builtin/logical/ssh/path_roles.go
@@ -3,6 +3,7 @@ package ssh
 import (
 	"context"
 	"fmt"
+	"golang.org/x/crypto/ssh"
 	"strings"
 
 	"time"
@@ -53,6 +54,7 @@ type sshRole struct {
 	AllowUserKeyIDs        bool              `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"`
 	KeyIDFormat            string            `mapstructure:"key_id_format" json:"key_id_format"`
 	AllowedUserKeyLengths  map[string]int    `mapstructure:"allowed_user_key_lengths" json:"allowed_user_key_lengths"`
+	AlgorithmSigner        string            `mapstructure:"algorithm_signer" json:"algorithm_signer"`
 }
 
 func pathListRoles(b *backend) *framework.Path {
@@ -331,6 +333,17 @@ func pathRoles(b *backend) *framework.Path {
                                 If set, allows the enforcement of key types and minimum key sizes to be signed.
                                 `,
 			},
+			"algorithm_signer": &framework.FieldSchema{
+				Type:    framework.TypeString,
+				Default: ssh.SigAlgoRSA,
+				Description: `
+				When supplied, this value specifies a signing algorithm for the key.  Possible values: 
+				ssh-rsa, rsa-sha2-256, rsa-sha2-512.
+				`,
+				DisplayAttrs: &framework.DisplayAttributes{
+					Name: "Signing Algorithm",
+				},
+			},
 		},
 
 		Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -466,7 +479,14 @@ func (b *backend) pathRoleWrite(ctx context.Context, req *logical.Request, d *fr
 			KeyOptionSpecs:  keyOptionSpecs,
 		}
 	} else if keyType == KeyTypeCA {
-		role, errorResponse := b.createCARole(allowedUsers, d.Get("default_user").(string), d)
+		algorithmSigner := d.Get("algorithm_signer").(string)
+		switch algorithmSigner {
+		case ssh.SigAlgoRSA, ssh.SigAlgoRSASHA2256, ssh.SigAlgoRSASHA2512:
+		default:
+			return nil, fmt.Errorf("unknown algorithm signer %q", algorithmSigner)
+		}
+
+		role, errorResponse := b.createCARole(allowedUsers, d.Get("default_user").(string), algorithmSigner, d)
 		if errorResponse != nil {
 			return errorResponse, nil
 		}
@@ -486,7 +506,7 @@ func (b *backend) pathRoleWrite(ctx context.Context, req *logical.Request, d *fr
 	return nil, nil
 }
 
-func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework.FieldData) (*sshRole, *logical.Response) {
+func (b *backend) createCARole(allowedUsers, defaultUser, signer string, data *framework.FieldData) (*sshRole, *logical.Response) {
 	ttl := time.Duration(data.Get("ttl").(int)) * time.Second
 	maxTTL := time.Duration(data.Get("max_ttl").(int)) * time.Second
 	role := &sshRole{
@@ -503,6 +523,7 @@ func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework
 		AllowUserKeyIDs:        data.Get("allow_user_key_ids").(bool),
 		KeyIDFormat:            data.Get("key_id_format").(string),
 		KeyType:                KeyTypeCA,
+		AlgorithmSigner:        signer,
 	}
 
 	if !role.AllowUserCertificates && !role.AllowHostCertificates {
@@ -594,6 +615,7 @@ func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) {
 			"default_critical_options": role.DefaultCriticalOptions,
 			"default_extensions":       role.DefaultExtensions,
 			"allowed_user_key_lengths": role.AllowedUserKeyLengths,
+			"algorithm_signer":         role.AlgorithmSigner,
 		}
 	case KeyTypeDynamic:
 		result = map[string]interface{}{
diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go
index 090919dc422f..3e7146e11e0c 100644
--- a/builtin/logical/ssh/path_sign.go
+++ b/builtin/logical/ssh/path_sign.go
@@ -9,6 +9,7 @@ import (
 	"crypto/sha256"
 	"errors"
 	"fmt"
+	"io"
 	"regexp"
 	"strconv"
 	"strings"
@@ -497,6 +498,16 @@ func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) {
 
 	now := time.Now()
 
+	sshAlgorithmSigner, ok := b.Signer.(ssh.AlgorithmSigner)
+	if !ok {
+		return nil, fmt.Errorf("failed to generate signed SSH key: signer is not an AlgorithmSigner")
+	}
+
+	// prepare certificate for signing
+	nonce := make([]byte, 32)
+	if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
+		return nil, fmt.Errorf("failed to generate signed SSH key: error generating random nonce")
+	}
 	certificate := &ssh.Certificate{
 		Serial:          serialNumber.Uint64(),
 		Key:             b.PublicKey,
@@ -509,12 +520,25 @@ func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) {
 			CriticalOptions: b.CriticalOptions,
 			Extensions:      b.Extensions,
 		},
+		Nonce:        nonce,
+		SignatureKey: sshAlgorithmSigner.PublicKey(),
 	}
 
-	err = certificate.SignCert(rand.Reader, b.Signer)
+	// get bytes to sign; this is based on Certificate.bytesForSigning() from the go ssh lib
+	out := certificate.Marshal()
+	// Drop trailing signature length.
+	certificateBytes := out[:len(out)-4]
+
+	algo := b.Role.AlgorithmSigner
+	if algo == "" {
+		algo = ssh.SigAlgoRSA
+	}
+	sig, err := sshAlgorithmSigner.SignWithAlgorithm(rand.Reader, certificateBytes, algo)
 	if err != nil {
-		return nil, fmt.Errorf("failed to generate signed SSH key")
+		return nil, fmt.Errorf("failed to generate signed SSH key: sign error")
 	}
 
+	certificate.Signature = sig
+
 	return certificate, nil
 }
diff --git a/builtin/logical/ssh/util.go b/builtin/logical/ssh/util.go
index 736b100f6988..b6fc51827315 100644
--- a/builtin/logical/ssh/util.go
+++ b/builtin/logical/ssh/util.go
@@ -102,8 +102,7 @@ func (b *backend) installPublicKeyInTarget(ctx context.Context, adminUser, usern
 	rmCmd := fmt.Sprintf("rm -f %s", scriptFileName)
 	targetCmd := fmt.Sprintf("%s;%s;%s", chmodCmd, scriptCmd, rmCmd)
 
-	session.Run(targetCmd)
-	return nil
+	return session.Run(targetCmd)
 }
 
 // Takes an IP address and role name and checks if the IP is part
diff --git a/vault/testing.go b/vault/testing.go
index 78f7d6a75638..a32236afb030 100644
--- a/vault/testing.go
+++ b/vault/testing.go
@@ -14,7 +14,6 @@ import (
 	"encoding/pem"
 	"errors"
 	"fmt"
-	"github.com/hashicorp/vault/internalshared/configutil"
 	"io"
 	"io/ioutil"
 	"math/big"
@@ -22,7 +21,6 @@ import (
 	"net"
 	"net/http"
 	"os"
-	"os/exec"
 	"path/filepath"
 	"sync"
 	"sync/atomic"
@@ -37,7 +35,6 @@ import (
 	"github.com/mitchellh/copystructure"
 
 	"golang.org/x/crypto/ed25519"
-	"golang.org/x/crypto/ssh"
 	"golang.org/x/net/http2"
 
 	cleanhttp "github.com/hashicorp/go-cleanhttp"
@@ -45,6 +42,7 @@ import (
 	"github.com/hashicorp/vault/audit"
 	"github.com/hashicorp/vault/command/server"
 	"github.com/hashicorp/vault/helper/namespace"
+	"github.com/hashicorp/vault/internalshared/configutil"
 	"github.com/hashicorp/vault/internalshared/reloadutil"
 	dbMysql "github.com/hashicorp/vault/plugins/database/mysql"
 	dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql"
@@ -490,97 +488,6 @@ func TestAddTestPlugin(t testing.T, c *Core, name string, pluginType consts.Plug
 var testLogicalBackends = map[string]logical.Factory{}
 var testCredentialBackends = map[string]logical.Factory{}
 
-// StartSSHHostTestServer starts the test server which responds to SSH
-// authentication. Used to test the SSH secret backend.
-func StartSSHHostTestServer() (string, error) {
-	pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(testSharedPublicKey))
-	if err != nil {
-		return "", fmt.Errorf("error parsing public key")
-	}
-	serverConfig := &ssh.ServerConfig{
-		PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
-			if bytes.Compare(pubKey.Marshal(), key.Marshal()) == 0 {
-				return &ssh.Permissions{}, nil
-			} else {
-				return nil, fmt.Errorf("key does not match")
-			}
-		},
-	}
-	signer, err := ssh.ParsePrivateKey([]byte(testSharedPrivateKey))
-	if err != nil {
-		panic("Error parsing private key")
-	}
-	serverConfig.AddHostKey(signer)
-
-	soc, err := net.Listen("tcp", "127.0.0.1:0")
-	if err != nil {
-		return "", fmt.Errorf("error listening to connection")
-	}
-
-	go func() {
-		for {
-			conn, err := soc.Accept()
-			if err != nil {
-				panic(fmt.Sprintf("Error accepting incoming connection: %s", err))
-			}
-			defer conn.Close()
-			sshConn, chanReqs, _, err := ssh.NewServerConn(conn, serverConfig)
-			if err != nil {
-				panic(fmt.Sprintf("Handshaking error: %v", err))
-			}
-
-			go func() {
-				for chanReq := range chanReqs {
-					go func(chanReq ssh.NewChannel) {
-						if chanReq.ChannelType() != "session" {
-							chanReq.Reject(ssh.UnknownChannelType, "unknown channel type")
-							return
-						}
-
-						ch, requests, err := chanReq.Accept()
-						if err != nil {
-							panic(fmt.Sprintf("Error accepting channel: %s", err))
-						}
-
-						go func(ch ssh.Channel, in <-chan *ssh.Request) {
-							for req := range in {
-								executeServerCommand(ch, req)
-							}
-						}(ch, requests)
-					}(chanReq)
-				}
-				sshConn.Close()
-			}()
-		}
-	}()
-	return soc.Addr().String(), nil
-}
-
-// This executes the commands requested to be run on the server.
-// Used to test the SSH secret backend.
-func executeServerCommand(ch ssh.Channel, req *ssh.Request) {
-	command := string(req.Payload[4:])
-	cmd := exec.Command("/bin/bash", []string{"-c", command}...)
-	req.Reply(true, nil)
-
-	cmd.Stdout = ch
-	cmd.Stderr = ch
-	cmd.Stdin = ch
-
-	err := cmd.Start()
-	if err != nil {
-		panic(fmt.Sprintf("Error starting the command: '%s'", err))
-	}
-
-	go func() {
-		_, err := cmd.Process.Wait()
-		if err != nil {
-			panic(fmt.Sprintf("Error while waiting for command to finish:'%s'", err))
-		}
-		ch.Close()
-	}()
-}
-
 // This adds a credential backend for the test core. This needs to be
 // invoked before the test core is created.
 func AddTestCredentialBackend(name string, factory logical.Factory) error {

From 0e59e58aa63cefa6e94eb2e2c704feea8e4627b9 Mon Sep 17 00:00:00 2001
From: ncabatoff <ncabatoff@hashicorp.com>
Date: Thu, 11 Jun 2020 08:11:49 -0400
Subject: [PATCH 11/29] changelog++

---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index d12b0b580c68..1439f9d58963 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -25,6 +25,7 @@ IMPROVEMENTS:
 * secrets/database: Allow InfluxDB to use insecure TLS without cert bundle [[GH-8778](https://github.com/hashicorp/vault/pull/8778)]
 * secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)]
 * secrets/pki: Allow 3072-bit RSA keys [[GH-8343](https://github.com/hashicorp/vault/pull/8343)]
+* secrets/ssh: Add a CA-mode role option to specify signing algorithm [[GH-9096](https://github.com/hashicorp/vault/pull/9096)] 
 * secrets/transit: Transit requests that make use of keys now include a new field  `key_version` in their responses [[GH-8775](https://github.com/hashicorp/vault/pull/8775)]
 * secrets/transit: Improving transit batch encrypt and decrypt latencies [[GH-9100](https://github.com/hashicorp/vault/pull/9100)]
 * sentinel: Add a sentinel config section, and "additional_enabled_modules", a list of Sentinel modules that may be imported in addition to the defaults.

From 4d873fe3c05c8e3eaa2727600395bac38e5055bb Mon Sep 17 00:00:00 2001
From: Jim Kalafut <jkalafut@hashicorp.com>
Date: Thu, 11 Jun 2020 09:34:25 -0700
Subject: [PATCH 12/29] Update contribution guidelines

---
 CONTRIBUTING.md | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index d88537722c89..933843b337eb 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -13,8 +13,7 @@ rules to get in the way of that.
 That said, if you want to ensure that a pull request is likely to be merged,
 talk to us! You can find out our thoughts and ensure that your contribution
 won't clash or be obviated by Vault's normal direction. A great way to do this
-is via the [Vault Google Group][2]. Sometimes Vault devs are in `#vault-tool`
-on Freenode, too.
+is via the [Vault Discussion Forum][2].
 
 This document will cover what we're looking for in terms of reporting issues.
 By addressing all the points we're looking for, it raises the chances we can
@@ -69,7 +68,7 @@ following steps listed in the README, under the section [Developing Vault][1].
 
 
 [1]: https://github.com/hashicorp/vault#developing-vault
-[2]: https://groups.google.com/group/vault-tool
+[2]: https://discuss.hashicorp.com/c/vault
 
 ## Contributor License Agreement
 

From b9f05d08164d3af49bad8ca73f269bca714dd7ca Mon Sep 17 00:00:00 2001
From: Jeff Hemmen <com.github@citrusfruit.lu>
Date: Thu, 11 Jun 2020 19:22:49 +0100
Subject: [PATCH 13/29] Simple typos (#9119)

---
 website/pages/api-docs/system/rekey.mdx | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/website/pages/api-docs/system/rekey.mdx b/website/pages/api-docs/system/rekey.mdx
index c2a8f33ea949..147d9f2c721d 100644
--- a/website/pages/api-docs/system/rekey.mdx
+++ b/website/pages/api-docs/system/rekey.mdx
@@ -90,7 +90,7 @@ and starting a new rekey, which will also provide a new nonce.
   key is not actually rotated. The new keys must be provided to authorize the
   actual rotation of the master key. This ensures that the new keys have been
   successfully saved and protects against a risk of the keys being lost after
-  rotation but before they can be persisted. This can be used with without
+  rotation but before they can be persisted. This can be used with or without
   `pgp_keys`, and when used with it, it allows ensuring that the returned keys
   can be successfully decrypted before committing to the new shares, which the
   backup functionality does not provide.
@@ -282,7 +282,7 @@ The `nonce` for the current rekey operation is also displayed.
 This endpoint cancels any in-progress rekey verification operation. This clears
 any progress made and resets the nonce. Unlike a `DELETE` against
 `sys/rekey/init`, this only resets the current verification operation, not the
-entire rekey atttempt. The return value is the same as `GET` along with the new
+entire rekey attempt. The return value is the same as `GET` along with the new
 nonce.
 
 | Method   | Path                |

From 3d02fb4b86385666c2f2e43b48c79119402731a4 Mon Sep 17 00:00:00 2001
From: Mike Jarmy <mjarmy@gmail.com>
Date: Thu, 11 Jun 2020 15:07:59 -0400
Subject: [PATCH 14/29] Test pre-1.4 seal migration  (#9085)

* enable seal wrap in all seal migration tests

* move adjustForSealMigration to vault package

* fix adjustForSealMigration

* begin working on new seal migration test

* create shamir seal migration test

* refactor testhelpers

* add VerifyRaftConfiguration to testhelpers

* stub out TestTransit

* Revert "refactor testhelpers"

This reverts commit 39593defd0d4c6fd79aedfd37df6298391abb9db.

* get shamir test working again

* stub out transit join

* work on transit join

* Revert "move resuable storage test to avoid creating import cycle"

This reverts commit b3ff2317381a5af12a53117f87d1c6fbb093af6b.

* remove debug code

* initTransit now works with raft join

* runTransit works with inmem

* work on runTransit with raft

* runTransit works with raft

* get rid of dis-used test

* cleanup tests

* TestSealMigration_TransitToShamir_Pre14

* TestSealMigration_ShamirToTransit_Pre14

* split for pre-1.4 testing

* add simple tests for transit and shamir

* fix typo in test suite

* debug wrapper type

* test debug

* test-debug

* refactor core migration

* Revert "refactor core migration"

This reverts commit a776452d32a9dca7a51e3df4a76b9234d8c0c7ce.

* begin refactor of adjustForSealMigration

* fix bug in adjustForSealMigration

* clean up tests

* clean up core refactoring

* fix bug in shamir->transit migration

* remove unnecessary lock from setSealsForMigration()

* rename sealmigration test package

* use ephemeral ports below 30000

* simplify use of numTestCores
---
 command/seal_migration_test.go                | 750 ------------------
 command/server.go                             |   7 +-
 command/server_util.go                        | 111 ---
 helper/testhelpers/testhelpers.go             |  99 ++-
 .../teststorage/teststorage_reusable_test.go  | 220 -----
 vault/core.go                                 | 118 ++-
 .../seal_migration_pre14_test.go              | 134 ++++
 .../sealmigration/seal_migration_test.go      | 517 ++++++++++++
 vault/testing.go                              |  35 +-
 9 files changed, 880 insertions(+), 1111 deletions(-)
 delete mode 100644 command/seal_migration_test.go
 delete mode 100644 helper/testhelpers/teststorage/teststorage_reusable_test.go
 create mode 100644 vault/external_tests/sealmigration/seal_migration_pre14_test.go
 create mode 100644 vault/external_tests/sealmigration/seal_migration_test.go

diff --git a/command/seal_migration_test.go b/command/seal_migration_test.go
deleted file mode 100644
index a0bb9df69bea..000000000000
--- a/command/seal_migration_test.go
+++ /dev/null
@@ -1,750 +0,0 @@
-package command
-
-import (
-	"context"
-	"encoding/base64"
-	"testing"
-
-	wrapping "github.com/hashicorp/go-kms-wrapping"
-	aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
-	"github.com/hashicorp/vault/api"
-	"github.com/hashicorp/vault/helper/testhelpers"
-	sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
-	"github.com/hashicorp/vault/helper/testhelpers/teststorage"
-	vaulthttp "github.com/hashicorp/vault/http"
-	"github.com/hashicorp/vault/vault"
-	vaultseal "github.com/hashicorp/vault/vault/seal"
-)
-
-func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) {
-	t.Helper()
-	if cfg.Type != sealType {
-		t.Fatalf("bad seal config: %#v, expected type=%q", cfg, sealType)
-	}
-	if cfg.SecretShares != shares {
-		t.Fatalf("bad seal config: %#v, expected SecretShares=%d", cfg, shares)
-	}
-	if cfg.SecretThreshold != threshold {
-		t.Fatalf("bad seal config: %#v, expected SecretThreshold=%d", cfg, threshold)
-	}
-	if cfg.StoredShares != stored {
-		t.Fatalf("bad seal config: %#v, expected StoredShares=%d", cfg, stored)
-	}
-}
-
-func TestSealMigration_ShamirToTransit(t *testing.T) {
-	t.Parallel()
-	t.Run("inmem", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationShamirToTransit(t, teststorage.InmemBackendSetup)
-	})
-
-	t.Run("file", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationShamirToTransit(t, teststorage.FileBackendSetup)
-	})
-
-	t.Run("consul", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationShamirToTransit(t, teststorage.ConsulBackendSetup)
-	})
-
-	t.Run("raft", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationShamirToTransit(t, teststorage.RaftBackendSetup)
-	})
-}
-
-func testSealMigrationShamirToTransit(t *testing.T, setup teststorage.ClusterSetupMutator) {
-
-	// Create a cluster that uses shamir.
-	conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
-		DisableSealWrap: true,
-	}, &vault.TestClusterOptions{
-		HandlerFunc: vaulthttp.Handler,
-		SkipInit:    true,
-		NumCores:    3,
-	},
-		setup,
-	)
-	opts.SetupFunc = nil
-	cluster := vault.NewTestCluster(t, conf, opts)
-	cluster.Start()
-	defer cluster.Cleanup()
-
-	// Initialize the cluster, and unseal it using the shamir keys.
-	client := cluster.Cores[0].Client
-	initResp, err := client.Sys().Init(&api.InitRequest{
-		SecretShares:    5,
-		SecretThreshold: 3,
-	})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	var resp *api.SealStatusResponse
-	for _, key := range initResp.KeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	rootToken := initResp.RootToken
-	client.SetToken(rootToken)
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Create the transit server.
-	tcluster := sealhelper.NewTransitSealServer(t)
-	defer tcluster.Cleanup()
-	tcluster.MakeKey(t, "key1")
-	transitSeal := tcluster.MakeSeal(t, "key1")
-
-	// Transition to transit seal.
-	if err := adjustCoreForSealMigration(cluster.Logger, cluster.Cores[0].Core, transitSeal, nil); err != nil {
-		t.Fatal(err)
-	}
-
-	// Unseal and migrate to transit.
-	for _, key := range initResp.KeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err == nil {
-			t.Fatal("expected error due to lack of migrate parameter")
-		}
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	// Seal the cluster.
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Seal the transit cluster; we expect the unseal of our main cluster
-	// to fail as a result.
-	tcluster.EnsureCoresSealed(t)
-
-	// Verify that we cannot unseal.  Now the barrier unseal keys are actually
-	// the recovery keys.
-	for _, key := range initResp.KeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err != nil {
-			break
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if err == nil || resp != nil {
-		t.Fatalf("expected sealed state; got %#v", resp)
-	}
-
-	// Unseal the transit server; we expect the unseal to work now on our main
-	// cluster.
-	tcluster.UnsealCores(t)
-
-	// Verify that we can unseal.
-	for _, key := range initResp.KeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-
-	// Make sure the seal configs were updated correctly.
-	b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
-	if err != nil {
-		t.Fatal(err)
-	}
-	verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
-	verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
-}
-
-func TestSealMigration_ShamirToTestSeal(t *testing.T) {
-	t.Parallel()
-	t.Run("inmem", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationShamirToTestSeal(t, teststorage.InmemBackendSetup)
-	})
-
-	t.Run("file", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationShamirToTestSeal(t, teststorage.FileBackendSetup)
-	})
-
-	t.Run("consul", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationShamirToTestSeal(t, teststorage.ConsulBackendSetup)
-	})
-
-	t.Run("raft", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationShamirToTestSeal(t, teststorage.RaftBackendSetup)
-	})
-}
-
-func testSealMigrationShamirToTestSeal(t *testing.T, setup teststorage.ClusterSetupMutator) {
-
-	// Create a cluster that uses shamir.
-	conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
-		DisableSealWrap: true,
-	}, &vault.TestClusterOptions{
-		HandlerFunc: vaulthttp.Handler,
-		SkipInit:    true,
-		NumCores:    3,
-	},
-		setup,
-	)
-	opts.SetupFunc = nil
-	cluster := vault.NewTestCluster(t, conf, opts)
-	cluster.Start()
-	defer cluster.Cleanup()
-
-	// Initialize the cluster, and unseal it using the shamir keys.
-	client := cluster.Cores[0].Client
-	initResp, err := client.Sys().Init(&api.InitRequest{
-		SecretShares:    5,
-		SecretThreshold: 3,
-	})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	var resp *api.SealStatusResponse
-	for _, key := range initResp.KeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	rootToken := initResp.RootToken
-	client.SetToken(rootToken)
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Create a test seal
-	testSeal := vault.NewAutoSeal(vaultseal.NewTestSeal(&vaultseal.TestSealOpts{}))
-
-	// Transition to test seal.
-	if err := adjustCoreForSealMigration(cluster.Logger, cluster.Cores[0].Core, testSeal, nil); err != nil {
-		t.Fatal(err)
-	}
-
-	// Unseal and migrate to test seal.
-	for _, key := range initResp.KeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err == nil {
-			t.Fatal("expected error due to lack of migrate parameter")
-		}
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	// Seal the cluster.
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Verify that we can unseal.
-	for _, key := range initResp.KeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-
-	// Make sure the seal configs were updated correctly.
-	b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
-	if err != nil {
-		t.Fatal(err)
-	}
-	verifyBarrierConfig(t, b, wrapping.Test, 1, 1, 1)
-	verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
-}
-
-func TestSealMigration_TransitToTestSeal(t *testing.T) {
-	t.Parallel()
-	t.Run("inmem", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToTestSeal(t, teststorage.InmemBackendSetup)
-	})
-
-	t.Run("file", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToTestSeal(t, teststorage.FileBackendSetup)
-	})
-
-	t.Run("consul", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToTestSeal(t, teststorage.ConsulBackendSetup)
-	})
-
-	t.Run("raft", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToTestSeal(t, teststorage.RaftBackendSetup)
-	})
-}
-
-func testSealMigrationTransitToTestSeal(t *testing.T, setup teststorage.ClusterSetupMutator) {
-
-	// Create the transit server.
-	tcluster := sealhelper.NewTransitSealServer(t)
-	defer func() {
-		if tcluster != nil {
-			tcluster.Cleanup()
-		}
-	}()
-	tcluster.MakeKey(t, "key1")
-	var transitSeal vault.Seal
-
-	// Create a cluster that uses transit.
-	conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
-		DisableSealWrap: true,
-	}, &vault.TestClusterOptions{
-		HandlerFunc: vaulthttp.Handler,
-		SkipInit:    true,
-		NumCores:    3,
-		SealFunc: func() vault.Seal {
-			transitSeal = tcluster.MakeSeal(t, "key1")
-			return transitSeal
-		},
-	},
-		setup,
-	)
-	opts.SetupFunc = nil
-	cluster := vault.NewTestCluster(t, conf, opts)
-	cluster.Start()
-	defer cluster.Cleanup()
-
-	// Initialize the cluster, and fetch the recovery keys.
-	client := cluster.Cores[0].Client
-	initResp, err := client.Sys().Init(&api.InitRequest{
-		RecoveryShares:    5,
-		RecoveryThreshold: 3,
-	})
-	if err != nil {
-		t.Fatal(err)
-	}
-	for _, k := range initResp.RecoveryKeysB64 {
-		b, _ := base64.RawStdEncoding.DecodeString(k)
-		cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
-	}
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	rootToken := initResp.RootToken
-	client.SetToken(rootToken)
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Create a test seal
-	testSeal := vault.NewAutoSeal(vaultseal.NewTestSeal(&vaultseal.TestSealOpts{}))
-
-	// Transition to test seal.
-	if err := adjustCoreForSealMigration(cluster.Logger, cluster.Cores[0].Core, testSeal, transitSeal); err != nil {
-		t.Fatal(err)
-	}
-
-	// Unseal and migrate to Test Seal.
-	// Although we're unsealing using the recovery keys, this is still an
-	// autounseal; if we stopped the transit cluster this would fail.
-	var resp *api.SealStatusResponse
-	for _, key := range initResp.RecoveryKeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err == nil {
-			t.Fatal("expected error due to lack of migrate parameter")
-		}
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	// Seal the cluster.
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Unseal the cluster. Now the recovery keys are actually the barrier
-	// unseal keys.
-	for _, key := range initResp.RecoveryKeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	// Make sure the seal configs were updated correctly.
-	b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
-	if err != nil {
-		t.Fatal(err)
-	}
-	verifyBarrierConfig(t, b, wrapping.Test, 1, 1, 1)
-	verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
-
-	// Now that migration is done, we can stop the transit cluster, since we
-	// can seal/unseal without it.
-	tcluster.Cleanup()
-	tcluster = nil
-
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-	for _, key := range initResp.RecoveryKeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-}
-
-func TestSealMigration_TransitToShamir(t *testing.T) {
-	t.Parallel()
-	t.Run("inmem", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToShamir(t, teststorage.InmemBackendSetup)
-	})
-
-	t.Run("file", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToShamir(t, teststorage.FileBackendSetup)
-	})
-
-	t.Run("consul", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToShamir(t, teststorage.ConsulBackendSetup)
-	})
-
-	t.Run("raft", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToShamir(t, teststorage.RaftBackendSetup)
-	})
-}
-
-func testSealMigrationTransitToShamir(t *testing.T, setup teststorage.ClusterSetupMutator) {
-
-	// Create the transit server.
-	tcluster := sealhelper.NewTransitSealServer(t)
-	defer func() {
-		if tcluster != nil {
-			tcluster.Cleanup()
-		}
-	}()
-	tcluster.MakeKey(t, "key1")
-	var transitSeal vault.Seal
-
-	// Create a cluster that uses transit.
-	conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
-		DisableSealWrap: true,
-	}, &vault.TestClusterOptions{
-		HandlerFunc: vaulthttp.Handler,
-		SkipInit:    true,
-		NumCores:    3,
-		SealFunc: func() vault.Seal {
-			transitSeal = tcluster.MakeSeal(t, "key1")
-			return transitSeal
-		},
-	},
-		setup,
-	)
-	opts.SetupFunc = nil
-	cluster := vault.NewTestCluster(t, conf, opts)
-	cluster.Start()
-	defer cluster.Cleanup()
-
-	// Initialize the cluster, and fetch the recovery keys.
-	client := cluster.Cores[0].Client
-	initResp, err := client.Sys().Init(&api.InitRequest{
-		RecoveryShares:    5,
-		RecoveryThreshold: 3,
-	})
-	if err != nil {
-		t.Fatal(err)
-	}
-	for _, k := range initResp.RecoveryKeysB64 {
-		b, _ := base64.RawStdEncoding.DecodeString(k)
-		cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
-	}
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	rootToken := initResp.RootToken
-	client.SetToken(rootToken)
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Create a Shamir seal.
-	logger := cluster.Logger.Named("shamir")
-	shamirSeal := vault.NewDefaultSeal(&vaultseal.Access{
-		Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
-			Logger: logger,
-		}),
-	})
-
-	// Transition to Shamir seal.
-	if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, shamirSeal, transitSeal); err != nil {
-		t.Fatal(err)
-	}
-
-	// Unseal and migrate to Shamir.
-	// Although we're unsealing using the recovery keys, this is still an
-	// autounseal; if we stopped the transit cluster this would fail.
-	var resp *api.SealStatusResponse
-	for _, key := range initResp.RecoveryKeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err == nil {
-			t.Fatal("expected error due to lack of migrate parameter")
-		}
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	// Seal the cluster.
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Unseal the cluster. Now the recovery keys are actually the barrier
-	// unseal keys.
-	for _, key := range initResp.RecoveryKeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	// Make sure the seal configs were updated correctly.
-	b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
-	if err != nil {
-		t.Fatal(err)
-	}
-	verifyBarrierConfig(t, b, wrapping.Shamir, 5, 3, 1)
-	if r != nil {
-		t.Fatalf("expected nil recovery config, got: %#v", r)
-	}
-
-	// Now that migration is done, we can stop the transit cluster, since we
-	// can seal/unseal without it.
-	tcluster.Cleanup()
-	tcluster = nil
-
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-	for _, key := range initResp.RecoveryKeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-}
-
-/*
-func TestSealMigration_TransitToTransit(t *testing.T) {
-	t.Parallel()
-	t.Run("inmem", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToTransit(t, teststorage.InmemBackendSetup)
-	})
-
-	t.Run("file", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToTransit(t, teststorage.FileBackendSetup)
-	})
-
-	t.Run("consul", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToTransit(t, teststorage.ConsulBackendSetup)
-	})
-
-	t.Run("raft", func(t *testing.T) {
-		t.Parallel()
-		testSealMigrationTransitToTransit(t, teststorage.RaftBackendSetup)
-	})
-}
-
-func testSealMigrationTransitToTransit(t *testing.T, setup teststorage.ClusterSetupMutator) {
-	tcluster := sealhelper.NewTransitSealServer(t)
-	defer tcluster.Cleanup()
-	tcluster.MakeKey(t, "key1")
-	tcluster.MakeKey(t, "key2")
-	var seals []vault.Seal
-
-	conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
-		DisableSealWrap: true,
-	}, &vault.TestClusterOptions{
-		HandlerFunc: vaulthttp.Handler,
-		SkipInit:    true,
-		NumCores:    3,
-		SealFunc: func() vault.Seal {
-			tseal := tcluster.MakeSeal(t, "key1")
-			seals = append(seals, tseal)
-			return tseal
-		},
-	},
-		setup,
-	)
-	opts.SetupFunc = nil
-	cluster := vault.NewTestCluster(t, conf, opts)
-	cluster.Start()
-	defer cluster.Cleanup()
-
-	client := cluster.Cores[0].Client
-	initResp, err := client.Sys().Init(&api.InitRequest{
-		RecoveryShares:    5,
-		RecoveryThreshold: 3,
-	})
-	if err != nil {
-		t.Fatal(err)
-	}
-	rootToken := initResp.RootToken
-	client.SetToken(rootToken)
-	for _, k := range initResp.RecoveryKeysB64 {
-		b, _ := base64.RawStdEncoding.DecodeString(k)
-		cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
-	}
-
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	logger := cluster.Logger.Named("shamir")
-	autoSeal2 := tcluster.MakeSeal(t, "key2")
-	if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, autoSeal2, seals[0]); err != nil {
-		t.Fatal(err)
-	}
-
-	// Although we're unsealing using the recovery keys, this is still an
-	// autounseal; if we stopped the transit cluster this would fail.
-	var resp *api.SealStatusResponse
-	for _, key := range initResp.RecoveryKeysB64 {
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
-		if err == nil {
-			t.Fatal("expected error due to lack of migrate parameter")
-		}
-		resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
-		if err != nil {
-			t.Fatal(err)
-		}
-		if resp == nil || !resp.Sealed {
-			break
-		}
-	}
-	if resp == nil || resp.Sealed {
-		t.Fatalf("expected unsealed state; got %#v", resp)
-	}
-
-	testhelpers.WaitForActiveNode(t, cluster)
-
-	// Seal and unseal again to verify that things are working fine
-	if err := client.Sys().Seal(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Delete the original seal's transit key.
-	_, err = tcluster.Cores[0].Client.Logical().Delete(path.Join("transit", "keys", "key1"))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = cluster.Cores[0].Core.UnsealWithStoredKeys(context.Background())
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-*/
diff --git a/command/server.go b/command/server.go
index 7290662bb6c0..9d2a038af38d 100644
--- a/command/server.go
+++ b/command/server.go
@@ -1120,6 +1120,7 @@ func (c *ServerCommand) Run(args []string) int {
 		HAPhysical:                nil,
 		ServiceRegistration:       configSR,
 		Seal:                      barrierSeal,
+		UnwrapSeal:                unwrapSeal,
 		AuditBackends:             c.AuditBackends,
 		CredentialBackends:        c.CredentialBackends,
 		LogicalBackends:           c.LogicalBackends,
@@ -1528,12 +1529,6 @@ CLUSTER_SYNTHESIS_COMPLETE:
 		Core: core,
 	}))
 
-	// Before unsealing with stored keys, setup seal migration if needed
-	if err := adjustCoreForSealMigration(c.logger, core, barrierSeal, unwrapSeal); err != nil {
-		c.UI.Error(err.Error())
-		return 1
-	}
-
 	// Attempt unsealing in a background goroutine. This is needed for when a
 	// Vault cluster with multiple servers is configured with auto-unseal but is
 	// uninitialized. Once one server initializes the storage backend, this
diff --git a/command/server_util.go b/command/server_util.go
index 1f3819a34bc8..dd95e72a9437 100644
--- a/command/server_util.go
+++ b/command/server_util.go
@@ -1,16 +1,8 @@
 package command
 
 import (
-	"context"
-	"fmt"
-
-	log "github.com/hashicorp/go-hclog"
-	wrapping "github.com/hashicorp/go-kms-wrapping"
-	aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
 	"github.com/hashicorp/vault/command/server"
 	"github.com/hashicorp/vault/vault"
-	vaultseal "github.com/hashicorp/vault/vault/seal"
-	"github.com/pkg/errors"
 )
 
 var (
@@ -19,106 +11,3 @@ var (
 
 func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) {
 }
-
-func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal, unwrapSeal vault.Seal) error {
-	existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background())
-	if err != nil {
-		return fmt.Errorf("Error checking for existing seal: %s", err)
-	}
-
-	// If we don't have an existing config or if it's the deprecated auto seal
-	// which needs an upgrade, skip out
-	if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated {
-		return nil
-	}
-
-	if unwrapSeal == nil {
-		// We have the same barrier type and the unwrap seal is nil so we're not
-		// migrating from same to same, IOW we assume it's not a migration
-		if existBarrierSealConfig.Type == barrierSeal.BarrierType() {
-			return nil
-		}
-
-		// If we're not coming from Shamir, and the existing type doesn't match
-		// the barrier type, we need both the migration seal and the new seal
-		if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir {
-			return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`)
-		}
-	} else {
-		if unwrapSeal.BarrierType() == wrapping.Shamir {
-			return errors.New("Shamir seals cannot be set disabled (they should simply not be set)")
-		}
-	}
-
-	if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil {
-		return errors.New(`Recovery seal configuration not found for existing seal`)
-	}
-
-	var migrationSeal vault.Seal
-	var newSeal vault.Seal
-
-	// Determine the migrationSeal. This is either going to be an instance of
-	// shamir or the unwrapSeal.
-	switch existBarrierSealConfig.Type {
-	case wrapping.Shamir:
-		// The value reflected in config is what we're going to
-		migrationSeal = vault.NewDefaultSeal(&vaultseal.Access{
-			Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
-				Logger: logger.Named("shamir"),
-			}),
-		})
-
-	default:
-		// If we're not coming from Shamir we expect the previous seal to be
-		// in the config and disabled.
-		migrationSeal = unwrapSeal
-	}
-
-	// newSeal will be the barrierSeal
-	newSeal = barrierSeal
-
-	if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() {
-		return errors.New("Migrating between same seal types is currently not supported")
-	}
-
-	if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() {
-		// In this case our migration seal is set so we are using it
-		// (potentially) for unwrapping. Set it on core for that purpose then
-		// exit.
-		core.SetSealsForMigration(nil, nil, unwrapSeal)
-		return nil
-	}
-
-	// Set the appropriate barrier and recovery configs.
-	switch {
-	case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():
-		// Migrating from auto->auto, copy the configs over
-		newSeal.SetCachedBarrierConfig(existBarrierSealConfig)
-		newSeal.SetCachedRecoveryConfig(existRecoverySealConfig)
-	case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported():
-		// Migrating from auto->shamir, clone auto's recovery config and set
-		// stored keys to 1.
-		newSealConfig := existRecoverySealConfig.Clone()
-		newSealConfig.StoredShares = 1
-		newSeal.SetCachedBarrierConfig(newSealConfig)
-	case newSeal != nil && newSeal.RecoveryKeySupported():
-		// Migrating from shamir->auto, set a new barrier config and set
-		// recovery config to a clone of shamir's barrier config with stored
-		// keys set to 0.
-		newBarrierSealConfig := &vault.SealConfig{
-			Type:            newSeal.BarrierType(),
-			SecretShares:    1,
-			SecretThreshold: 1,
-			StoredShares:    1,
-		}
-		newSeal.SetCachedBarrierConfig(newBarrierSealConfig)
-
-		newRecoveryConfig := existBarrierSealConfig.Clone()
-		newRecoveryConfig.StoredShares = 0
-		newSeal.SetCachedRecoveryConfig(newRecoveryConfig)
-	}
-
-	core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal)
-
-	return nil
-}
diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go
index 25b2be8f0346..b9aff79f3b14 100644
--- a/helper/testhelpers/testhelpers.go
+++ b/helper/testhelpers/testhelpers.go
@@ -412,42 +412,79 @@ func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib
 }
 
 func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
-	addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
+	raftClusterJoinNodes(t, cluster, false)
+}
+
+func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) {
+	raftClusterJoinNodes(t, cluster, true)
+}
+
+func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) {
 
-	leaderCore := cluster.Cores[0]
-	leaderAPI := leaderCore.Client.Address()
+	addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
 	atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1)
 
+	leader := cluster.Cores[0]
+
 	// Seal the leader so we can install an address provider
 	{
-		EnsureCoreSealed(t, leaderCore)
-		leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
-		cluster.UnsealCore(t, leaderCore)
-		vault.TestWaitActive(t, leaderCore.Core)
+		EnsureCoreSealed(t, leader)
+		leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
+		if useStoredKeys {
+			cluster.UnsealCoreWithStoredKeys(t, leader)
+		} else {
+			cluster.UnsealCore(t, leader)
+		}
+		vault.TestWaitActive(t, leader.Core)
 	}
 
-	leaderInfo := &raft.LeaderJoinInfo{
-		LeaderAPIAddr: leaderAPI,
-		TLSConfig:     leaderCore.TLSConfig,
+	leaderInfos := []*raft.LeaderJoinInfo{
+		&raft.LeaderJoinInfo{
+			LeaderAPIAddr: leader.Client.Address(),
+			TLSConfig:     leader.TLSConfig,
+		},
 	}
 
+	// Join followers
 	for i := 1; i < len(cluster.Cores); i++ {
 		core := cluster.Cores[i]
 		core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
-		leaderInfos := []*raft.LeaderJoinInfo{
-			leaderInfo,
-		}
 		_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
 		if err != nil {
 			t.Fatal(err)
 		}
 
-		cluster.UnsealCore(t, core)
+		if useStoredKeys {
+			// For autounseal, the raft backend is not initialized right away
+			// after the join.  We need to wait briefly before we can unseal.
+			awaitUnsealWithStoredKeys(t, core)
+		} else {
+			cluster.UnsealCore(t, core)
+		}
 	}
 
 	WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
 }
 
+func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) {
+
+	timeout := time.Now().Add(30 * time.Second)
+	for {
+		if time.Now().After(timeout) {
+			t.Fatal("raft join: timeout waiting for core to unseal")
+		}
+		// Its actually ok for an error to happen here the first couple of
+		// times -- it means the raft join hasn't gotten around to initializing
+		// the backend yet.
+		err := core.UnsealWithStoredKeys(context.Background())
+		if err == nil {
+			return
+		}
+		core.Logger().Warn("raft join: failed to unseal core", "error", err)
+		time.Sleep(time.Second)
+	}
+}
+
 // HardcodedServerAddressProvider is a ServerAddressProvider that uses
 // a hardcoded map of raft node addresses.
 //
@@ -494,6 +531,40 @@ func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider r
 	}
 }
 
+// VerifyRaftConfiguration checks that we have a valid raft configuration, i.e.
+// the correct number of servers, having the correct NodeIDs, and exactly one
+// leader.
+func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error {
+
+	backend := core.UnderlyingRawStorage.(*raft.RaftBackend)
+	ctx := namespace.RootContext(context.Background())
+	config, err := backend.GetConfiguration(ctx)
+	if err != nil {
+		return err
+	}
+
+	servers := config.Servers
+	if len(servers) != numCores {
+		return fmt.Errorf("Found %d servers, not %d", len(servers), numCores)
+	}
+
+	leaders := 0
+	for i, s := range servers {
+		if s.NodeID != fmt.Sprintf("core-%d", i) {
+			return fmt.Errorf("Found unexpected node ID %q", s.NodeID)
+		}
+		if s.Leader {
+			leaders++
+		}
+	}
+
+	if leaders != 1 {
+		return fmt.Errorf("Found %d leaders", leaders)
+	}
+
+	return nil
+}
+
 func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} {
 	t.Helper()
 
diff --git a/helper/testhelpers/teststorage/teststorage_reusable_test.go b/helper/testhelpers/teststorage/teststorage_reusable_test.go
deleted file mode 100644
index 8d6869027652..000000000000
--- a/helper/testhelpers/teststorage/teststorage_reusable_test.go
+++ /dev/null
@@ -1,220 +0,0 @@
-package teststorage
-
-import (
-	"context"
-	"fmt"
-	"testing"
-	"time"
-
-	"github.com/go-test/deep"
-
-	"github.com/hashicorp/go-hclog"
-	"github.com/hashicorp/vault/helper/namespace"
-	"github.com/hashicorp/vault/helper/testhelpers"
-	vaulthttp "github.com/hashicorp/vault/http"
-	"github.com/hashicorp/vault/physical/raft"
-	"github.com/hashicorp/vault/sdk/helper/logging"
-	"github.com/hashicorp/vault/vault"
-)
-
-const numTestCores = 5
-
-func TestReusableStorage(t *testing.T) {
-
-	logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name())
-
-	t.Run("inmem", func(t *testing.T) {
-		t.Parallel()
-
-		logger := logger.Named("inmem")
-		storage, cleanup := MakeReusableStorage(
-			t, logger, MakeInmemBackend(t, logger))
-		defer cleanup()
-		testReusableStorage(t, logger, storage, 51000)
-	})
-
-	t.Run("file", func(t *testing.T) {
-		t.Parallel()
-
-		logger := logger.Named("file")
-		storage, cleanup := MakeReusableStorage(
-			t, logger, MakeFileBackend(t, logger))
-		defer cleanup()
-		testReusableStorage(t, logger, storage, 52000)
-	})
-
-	t.Run("consul", func(t *testing.T) {
-		t.Parallel()
-
-		logger := logger.Named("consul")
-		storage, cleanup := MakeReusableStorage(
-			t, logger, MakeConsulBackend(t, logger))
-		defer cleanup()
-		testReusableStorage(t, logger, storage, 53000)
-	})
-
-	t.Run("raft", func(t *testing.T) {
-		t.Parallel()
-
-		logger := logger.Named("raft")
-		storage, cleanup := MakeReusableRaftStorage(t, logger, numTestCores)
-		defer cleanup()
-		testReusableStorage(t, logger, storage, 54000)
-	})
-}
-
-func testReusableStorage(
-	t *testing.T, logger hclog.Logger,
-	storage ReusableStorage, basePort int) {
-
-	rootToken, keys := initializeStorage(t, logger, storage, basePort)
-	reuseStorage(t, logger, storage, basePort, rootToken, keys)
-}
-
-// initializeStorage initializes a brand new backend storage.
-func initializeStorage(
-	t *testing.T, logger hclog.Logger,
-	storage ReusableStorage, basePort int) (string, [][]byte) {
-
-	var baseClusterPort = basePort + 10
-
-	// Start the cluster
-	var conf = vault.CoreConfig{
-		Logger: logger.Named("initializeStorage"),
-	}
-	var opts = vault.TestClusterOptions{
-		HandlerFunc:           vaulthttp.Handler,
-		NumCores:              numTestCores,
-		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
-		BaseClusterListenPort: baseClusterPort,
-	}
-	storage.Setup(&conf, &opts)
-	cluster := vault.NewTestCluster(t, &conf, &opts)
-	cluster.Start()
-	defer func() {
-		storage.Cleanup(t, cluster)
-		cluster.Cleanup()
-	}()
-
-	leader := cluster.Cores[0]
-	client := leader.Client
-
-	if storage.IsRaft {
-		// Join raft cluster
-		testhelpers.RaftClusterJoinNodes(t, cluster)
-		time.Sleep(15 * time.Second)
-		verifyRaftConfiguration(t, leader)
-	} else {
-		// Unseal
-		cluster.UnsealCores(t)
-	}
-
-	// Wait until unsealed
-	testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores)
-
-	// Write a secret that we will read back out later.
-	_, err := client.Logical().Write(
-		"secret/foo",
-		map[string]interface{}{"zork": "quux"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// Seal the cluster
-	cluster.EnsureCoresSealed(t)
-
-	return cluster.RootToken, cluster.BarrierKeys
-}
-
-// reuseStorage uses a pre-populated backend storage.
-func reuseStorage(
-	t *testing.T, logger hclog.Logger,
-	storage ReusableStorage, basePort int,
-	rootToken string, keys [][]byte) {
-
-	var baseClusterPort = basePort + 10
-
-	// Start the cluster
-	var conf = vault.CoreConfig{
-		Logger: logger.Named("reuseStorage"),
-	}
-	var opts = vault.TestClusterOptions{
-		HandlerFunc:           vaulthttp.Handler,
-		NumCores:              numTestCores,
-		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
-		BaseClusterListenPort: baseClusterPort,
-		SkipInit:              true,
-	}
-	storage.Setup(&conf, &opts)
-	cluster := vault.NewTestCluster(t, &conf, &opts)
-	cluster.Start()
-	defer func() {
-		storage.Cleanup(t, cluster)
-		cluster.Cleanup()
-	}()
-
-	leader := cluster.Cores[0]
-	client := leader.Client
-	client.SetToken(rootToken)
-
-	cluster.BarrierKeys = keys
-	if storage.IsRaft {
-		// Set hardcoded Raft address providers
-		provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort)
-		testhelpers.SetRaftAddressProviders(t, cluster, provider)
-
-		// Unseal cores
-		for _, core := range cluster.Cores {
-			cluster.UnsealCore(t, core)
-		}
-		time.Sleep(15 * time.Second)
-		verifyRaftConfiguration(t, leader)
-	} else {
-		// Unseal
-		cluster.UnsealCores(t)
-	}
-
-	// Wait until unsealed
-	testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores)
-
-	// Read the secret
-	secret, err := client.Logical().Read("secret/foo")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
-		t.Fatal(diff)
-	}
-
-	// Seal the cluster
-	cluster.EnsureCoresSealed(t)
-}
-
-func verifyRaftConfiguration(t *testing.T, core *vault.TestClusterCore) {
-
-	backend := core.UnderlyingRawStorage.(*raft.RaftBackend)
-	ctx := namespace.RootContext(context.Background())
-	config, err := backend.GetConfiguration(ctx)
-	if err != nil {
-		t.Fatal(err)
-	}
-	servers := config.Servers
-
-	if len(servers) != numTestCores {
-		t.Fatalf("Found %d servers, not %d", len(servers), numTestCores)
-	}
-
-	leaders := 0
-	for i, s := range servers {
-		if diff := deep.Equal(s.NodeID, fmt.Sprintf("core-%d", i)); len(diff) > 0 {
-			t.Fatal(diff)
-		}
-		if s.Leader {
-			leaders++
-		}
-	}
-
-	if leaders != 1 {
-		t.Fatalf("Found %d leaders, not 1", leaders)
-	}
-}
diff --git a/vault/core.go b/vault/core.go
index 6fc6d850cf80..98c0241d9dca 100644
--- a/vault/core.go
+++ b/vault/core.go
@@ -546,7 +546,8 @@ type CoreConfig struct {
 
 	ServiceRegistration sr.ServiceRegistration
 
-	Seal Seal
+	Seal       Seal
+	UnwrapSeal Seal
 
 	SecureRandomReader io.Reader
 
@@ -942,6 +943,11 @@ func NewCore(conf *CoreConfig) (*Core, error) {
 
 	c.clusterListener.Store((*cluster.Listener)(nil))
 
+	err = c.adjustForSealMigration(conf.UnwrapSeal)
+	if err != nil {
+		return nil, err
+	}
+
 	return c, nil
 }
 
@@ -2224,9 +2230,113 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi
 	return barrierConf, recoveryConf, nil
 }
 
-func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) {
-	c.stateLock.Lock()
-	defer c.stateLock.Unlock()
+func (c *Core) adjustForSealMigration(unwrapSeal Seal) error {
+
+	barrierSeal := c.seal
+
+	existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background())
+	if err != nil {
+		return fmt.Errorf("Error checking for existing seal: %s", err)
+	}
+
+	// If we don't have an existing config or if it's the deprecated auto seal
+	// which needs an upgrade, skip out
+	if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated {
+		return nil
+	}
+
+	if unwrapSeal == nil {
+		// We have the same barrier type and the unwrap seal is nil so we're not
+		// migrating from same to same, IOW we assume it's not a migration
+		if existBarrierSealConfig.Type == barrierSeal.BarrierType() {
+			return nil
+		}
+
+		// If we're not coming from Shamir, and the existing type doesn't match
+		// the barrier type, we need both the migration seal and the new seal
+		if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir {
+			return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`)
+		}
+	} else {
+		if unwrapSeal.BarrierType() == wrapping.Shamir {
+			return errors.New("Shamir seals cannot be set disabled (they should simply not be set)")
+		}
+	}
+
+	if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil {
+		return errors.New(`Recovery seal configuration not found for existing seal`)
+	}
+
+	var migrationSeal Seal
+	var newSeal Seal
+
+	// Determine the migrationSeal. This is either going to be an instance of
+	// shamir or the unwrapSeal.
+	switch existBarrierSealConfig.Type {
+	case wrapping.Shamir:
+		// The value reflected in config is what we're going to
+		migrationSeal = NewDefaultSeal(&vaultseal.Access{
+			Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
+				Logger: c.logger.Named("shamir"),
+			}),
+		})
+
+	default:
+		// If we're not coming from Shamir we expect the previous seal to be
+		// in the config and disabled.
+		migrationSeal = unwrapSeal
+	}
+
+	// newSeal will be the barrierSeal
+	newSeal = barrierSeal
+
+	if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() {
+		return errors.New("Migrating between same seal types is currently not supported")
+	}
+
+	if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() {
+		// In this case our migration seal is set so we are using it
+		// (potentially) for unwrapping. Set it on core for that purpose then
+		// exit.
+		c.setSealsForMigration(nil, nil, unwrapSeal)
+		return nil
+	}
+
+	// Set the appropriate barrier and recovery configs.
+	switch {
+	case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():
+		// Migrating from auto->auto, copy the configs over
+		newSeal.SetCachedBarrierConfig(existBarrierSealConfig)
+		newSeal.SetCachedRecoveryConfig(existRecoverySealConfig)
+	case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported():
+		// Migrating from auto->shamir, clone auto's recovery config and set
+		// stored keys to 1.
+		newSealConfig := existRecoverySealConfig.Clone()
+		newSealConfig.StoredShares = 1
+		newSeal.SetCachedBarrierConfig(newSealConfig)
+	case newSeal != nil && newSeal.RecoveryKeySupported():
+		// Migrating from shamir->auto, set a new barrier config and set
+		// recovery config to a clone of shamir's barrier config with stored
+		// keys set to 0.
+		newBarrierSealConfig := &SealConfig{
+			Type:            newSeal.BarrierType(),
+			SecretShares:    1,
+			SecretThreshold: 1,
+			StoredShares:    1,
+		}
+		newSeal.SetCachedBarrierConfig(newBarrierSealConfig)
+
+		newRecoveryConfig := existBarrierSealConfig.Clone()
+		newRecoveryConfig.StoredShares = 0
+		newSeal.SetCachedRecoveryConfig(newRecoveryConfig)
+	}
+
+	c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal)
+
+	return nil
+}
+
+func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) {
 	c.unwrapSeal = unwrapSeal
 	if c.unwrapSeal != nil {
 		c.unwrapSeal.SetCore(c)
diff --git a/vault/external_tests/sealmigration/seal_migration_pre14_test.go b/vault/external_tests/sealmigration/seal_migration_pre14_test.go
new file mode 100644
index 000000000000..6f72449da118
--- /dev/null
+++ b/vault/external_tests/sealmigration/seal_migration_pre14_test.go
@@ -0,0 +1,134 @@
+// +build !enterprise
+
+package sealmigration
+
+import (
+	"context"
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/go-test/deep"
+
+	"github.com/hashicorp/go-hclog"
+	wrapping "github.com/hashicorp/go-kms-wrapping"
+	"github.com/hashicorp/vault/helper/testhelpers"
+	sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
+	"github.com/hashicorp/vault/helper/testhelpers/teststorage"
+	vaulthttp "github.com/hashicorp/vault/http"
+	"github.com/hashicorp/vault/vault"
+)
+
+// TestSealMigration_TransitToShamir_Pre14 tests transit-to-shamir seal
+// migration, using the pre-1.4 method of bring down the whole cluster to do
+// the migration.
+func TestSealMigration_TransitToShamir_Pre14(t *testing.T) {
+	// Note that we do not test integrated raft storage since this is
+	// a pre-1.4 test.
+	testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false)
+}
+
+func testSealMigrationTransitToShamir_Pre14(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int) {
+
+	// Create the transit server.
+	tss := sealhelper.NewTransitSealServer(t)
+	defer func() {
+		if tss != nil {
+			tss.Cleanup()
+		}
+	}()
+	tss.MakeKey(t, "transit-seal-key")
+
+	// Initialize the backend with transit.
+	rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss)
+
+	// Migrate the backend from transit to shamir
+	migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys)
+
+	// Now that migration is done, we can nuke the transit server, since we
+	// can unseal without it.
+	tss.Cleanup()
+	tss = nil
+
+	// Run the backend with shamir.  Note that the recovery keys are now the
+	// barrier keys.
+	runShamir(t, logger, storage, basePort, rootToken, recoveryKeys)
+}
+
+func migrateFromTransitToShamir_Pre14(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int,
+	tss *sealhelper.TransitSealServer, transitSeal vault.Seal,
+	rootToken string, recoveryKeys [][]byte) {
+
+	var baseClusterPort = basePort + 10
+
+	var conf = vault.CoreConfig{
+		Logger: logger.Named("migrateFromTransitToShamir"),
+		// N.B. Providing an UnwrapSeal puts us in migration mode. This is the
+		// equivalent of doing the following in HCL:
+		//     seal "transit" {
+		//       // ...
+		//       disabled = "true"
+		//     }
+		UnwrapSeal: transitSeal,
+	}
+	var opts = vault.TestClusterOptions{
+		HandlerFunc:           vaulthttp.Handler,
+		NumCores:              numTestCores,
+		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
+		BaseClusterListenPort: baseClusterPort,
+		SkipInit:              true,
+	}
+	storage.Setup(&conf, &opts)
+	cluster := vault.NewTestCluster(t, &conf, &opts)
+	cluster.Start()
+	defer func() {
+		storage.Cleanup(t, cluster)
+		cluster.Cleanup()
+	}()
+
+	leader := cluster.Cores[0]
+	client := leader.Client
+	client.SetToken(rootToken)
+
+	// Attempt to unseal while the transit server is unreachable.  Although
+	// we're unsealing using the recovery keys, this is still an
+	// autounseal, so it should fail.
+	tss.EnsureCoresSealed(t)
+	unsealMigrate(t, client, recoveryKeys, false)
+	tss.UnsealCores(t)
+	testhelpers.WaitForActiveNode(t, tss.TestCluster)
+
+	// Unseal and migrate to Shamir. Although we're unsealing using the
+	// recovery keys, this is still an autounseal.
+	unsealMigrate(t, client, recoveryKeys, true)
+	testhelpers.WaitForActiveNode(t, cluster)
+
+	// Wait for migration to finish.  Sadly there is no callback, and the
+	// test will fail later on if we don't do this.
+	time.Sleep(10 * time.Second)
+
+	// Read the secret
+	secret, err := client.Logical().Read("secret/foo")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
+		t.Fatal(diff)
+	}
+
+	// Make sure the seal configs were updated correctly.
+	b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
+	if err != nil {
+		t.Fatal(err)
+	}
+	verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1)
+	if r != nil {
+		t.Fatalf("expected nil recovery config, got: %#v", r)
+	}
+
+	cluster.EnsureCoresSealed(t)
+}
diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go
new file mode 100644
index 000000000000..78506b3023ea
--- /dev/null
+++ b/vault/external_tests/sealmigration/seal_migration_test.go
@@ -0,0 +1,517 @@
+package sealmigration
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/go-test/deep"
+
+	"github.com/hashicorp/go-hclog"
+	wrapping "github.com/hashicorp/go-kms-wrapping"
+	"github.com/hashicorp/vault/api"
+	"github.com/hashicorp/vault/helper/testhelpers"
+	sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
+	"github.com/hashicorp/vault/helper/testhelpers/teststorage"
+	vaulthttp "github.com/hashicorp/vault/http"
+	"github.com/hashicorp/vault/sdk/helper/logging"
+	"github.com/hashicorp/vault/vault"
+)
+
+const (
+	numTestCores = 5
+	keyShares    = 3
+	keyThreshold = 3
+)
+
+type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int)
+
+func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) {
+
+	logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name())
+
+	t.Run("inmem", func(t *testing.T) {
+		t.Parallel()
+
+		logger := logger.Named("inmem")
+		storage, cleanup := teststorage.MakeReusableStorage(
+			t, logger, teststorage.MakeInmemBackend(t, logger))
+		defer cleanup()
+		tf(t, logger, storage, 20000)
+	})
+
+	t.Run("file", func(t *testing.T) {
+		t.Parallel()
+
+		logger := logger.Named("file")
+		storage, cleanup := teststorage.MakeReusableStorage(
+			t, logger, teststorage.MakeFileBackend(t, logger))
+		defer cleanup()
+		tf(t, logger, storage, 20020)
+	})
+
+	t.Run("consul", func(t *testing.T) {
+		t.Parallel()
+
+		logger := logger.Named("consul")
+		storage, cleanup := teststorage.MakeReusableStorage(
+			t, logger, teststorage.MakeConsulBackend(t, logger))
+		defer cleanup()
+		tf(t, logger, storage, 20040)
+	})
+
+	if includeRaft {
+		t.Run("raft", func(t *testing.T) {
+			t.Parallel()
+
+			logger := logger.Named("raft")
+			storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores)
+			defer cleanup()
+			tf(t, logger, storage, 20060)
+		})
+	}
+}
+
+// TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal
+// migration, using the pre-1.4 method of bring down the whole cluster to do
+// the migration.
+func TestSealMigration_ShamirToTransit_Pre14(t *testing.T) {
+	// Note that we do not test integrated raft storage since this is
+	// a pre-1.4 test.
+	testVariousBackends(t, testSealMigrationShamirToTransit_Pre14, false)
+}
+
+func testSealMigrationShamirToTransit_Pre14(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int) {
+
+	// Initialize the backend using shamir
+	rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort)
+
+	// Create the transit server.
+	tss := sealhelper.NewTransitSealServer(t)
+	defer func() {
+		tss.EnsureCoresSealed(t)
+		tss.Cleanup()
+	}()
+	tss.MakeKey(t, "transit-seal-key")
+
+	// Migrate the backend from shamir to transit.  Note that the barrier keys
+	// are now the recovery keys.
+	transitSeal := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys)
+
+	// Run the backend with transit.
+	runTransit(t, logger, storage, basePort, rootToken, transitSeal)
+}
+
+func migrateFromShamirToTransit_Pre14(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int,
+	tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte,
+) vault.Seal {
+
+	var baseClusterPort = basePort + 10
+
+	var transitSeal vault.Seal
+
+	var conf = vault.CoreConfig{
+		Logger: logger.Named("migrateFromShamirToTransit"),
+	}
+	var opts = vault.TestClusterOptions{
+		HandlerFunc:           vaulthttp.Handler,
+		NumCores:              numTestCores,
+		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
+		BaseClusterListenPort: baseClusterPort,
+		SkipInit:              true,
+		// N.B. Providing a transit seal puts us in migration mode.
+		SealFunc: func() vault.Seal {
+			// Each core will create its own transit seal here.  Later
+			// on it won't matter which one of these we end up using, since
+			// they were all created from the same transit key.
+			transitSeal = tss.MakeSeal(t, "transit-seal-key")
+			return transitSeal
+		},
+	}
+	storage.Setup(&conf, &opts)
+	cluster := vault.NewTestCluster(t, &conf, &opts)
+	cluster.Start()
+	defer func() {
+		storage.Cleanup(t, cluster)
+		cluster.Cleanup()
+	}()
+
+	leader := cluster.Cores[0]
+	client := leader.Client
+	client.SetToken(rootToken)
+
+	// Unseal and migrate to Transit.
+	unsealMigrate(t, client, recoveryKeys, true)
+
+	// Wait for migration to finish.  Sadly there is no callback, and the
+	// test will fail later on if we don't do this.
+	time.Sleep(10 * time.Second)
+
+	// Read the secret
+	secret, err := client.Logical().Read("secret/foo")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
+		t.Fatal(diff)
+	}
+
+	// Make sure the seal configs were updated correctly.
+	b, r, err := leader.Core.PhysicalSealConfigs(context.Background())
+	if err != nil {
+		t.Fatal(err)
+	}
+	verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
+	verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0)
+
+	cluster.EnsureCoresSealed(t)
+
+	return transitSeal
+}
+
+func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) {
+
+	for i, key := range keys {
+
+		// Try to unseal with missing "migrate" parameter
+		_, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
+			Key: base64.StdEncoding.EncodeToString(key),
+		})
+		if err == nil {
+			t.Fatal("expected error due to lack of migrate parameter")
+		}
+
+		// Unseal with "migrate" parameter
+		resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
+			Key:     base64.StdEncoding.EncodeToString(key),
+			Migrate: true,
+		})
+
+		if i < keyThreshold-1 {
+			// Not enough keys have been provided yet.
+			if err != nil {
+				t.Fatal(err)
+			}
+		} else {
+			if transitServerAvailable {
+				// The transit server is running.
+				if err != nil {
+					t.Fatal(err)
+				}
+				if resp == nil || resp.Sealed {
+					t.Fatalf("expected unsealed state; got %#v", resp)
+				}
+			} else {
+				// The transit server is stopped.
+				if err == nil {
+					t.Fatal("expected error due to transit server being stopped.")
+				}
+			}
+			break
+		}
+	}
+}
+
+// verifyBarrierConfig verifies that a barrier configuration is correct.
+func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) {
+	t.Helper()
+	if cfg.Type != sealType {
+		t.Fatalf("bad seal config: %#v, expected type=%q", cfg, sealType)
+	}
+	if cfg.SecretShares != shares {
+		t.Fatalf("bad seal config: %#v, expected SecretShares=%d", cfg, shares)
+	}
+	if cfg.SecretThreshold != threshold {
+		t.Fatalf("bad seal config: %#v, expected SecretThreshold=%d", cfg, threshold)
+	}
+	if cfg.StoredShares != stored {
+		t.Fatalf("bad seal config: %#v, expected StoredShares=%d", cfg, stored)
+	}
+}
+
+// initializeShamir initializes a brand new backend storage with Shamir.
+func initializeShamir(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int) (string, [][]byte) {
+
+	var baseClusterPort = basePort + 10
+
+	// Start the cluster
+	var conf = vault.CoreConfig{
+		Logger: logger.Named("initializeShamir"),
+	}
+	var opts = vault.TestClusterOptions{
+		HandlerFunc:           vaulthttp.Handler,
+		NumCores:              numTestCores,
+		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
+		BaseClusterListenPort: baseClusterPort,
+	}
+	storage.Setup(&conf, &opts)
+	cluster := vault.NewTestCluster(t, &conf, &opts)
+	cluster.Start()
+	defer func() {
+		storage.Cleanup(t, cluster)
+		cluster.Cleanup()
+	}()
+
+	leader := cluster.Cores[0]
+	client := leader.Client
+
+	// Unseal
+	if storage.IsRaft {
+		testhelpers.RaftClusterJoinNodes(t, cluster)
+		if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
+			t.Fatal(err)
+		}
+	} else {
+		cluster.UnsealCores(t)
+	}
+	testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
+
+	// Write a secret that we will read back out later.
+	_, err := client.Logical().Write(
+		"secret/foo",
+		map[string]interface{}{"zork": "quux"})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Seal the cluster
+	cluster.EnsureCoresSealed(t)
+
+	return cluster.RootToken, cluster.BarrierKeys
+}
+
+// runShamir uses a pre-populated backend storage with Shamir.
+func runShamir(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int,
+	rootToken string, barrierKeys [][]byte) {
+
+	var baseClusterPort = basePort + 10
+
+	// Start the cluster
+	var conf = vault.CoreConfig{
+		Logger: logger.Named("runShamir"),
+	}
+	var opts = vault.TestClusterOptions{
+		HandlerFunc:           vaulthttp.Handler,
+		NumCores:              numTestCores,
+		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
+		BaseClusterListenPort: baseClusterPort,
+		SkipInit:              true,
+	}
+	storage.Setup(&conf, &opts)
+	cluster := vault.NewTestCluster(t, &conf, &opts)
+	cluster.Start()
+	defer func() {
+		storage.Cleanup(t, cluster)
+		cluster.Cleanup()
+	}()
+
+	leader := cluster.Cores[0]
+	client := leader.Client
+	client.SetToken(rootToken)
+
+	// Unseal
+	cluster.BarrierKeys = barrierKeys
+	if storage.IsRaft {
+		provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort)
+		testhelpers.SetRaftAddressProviders(t, cluster, provider)
+
+		for _, core := range cluster.Cores {
+			cluster.UnsealCore(t, core)
+		}
+
+		// This is apparently necessary for the raft cluster to get itself
+		// situated.
+		time.Sleep(15 * time.Second)
+
+		if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
+			t.Fatal(err)
+		}
+	} else {
+		cluster.UnsealCores(t)
+	}
+	testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
+
+	// Read the secret
+	secret, err := client.Logical().Read("secret/foo")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
+		t.Fatal(diff)
+	}
+
+	// Seal the cluster
+	cluster.EnsureCoresSealed(t)
+}
+
+// initializeTransit initializes a brand new backend storage with Transit.
+func initializeTransit(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int,
+	tss *sealhelper.TransitSealServer) (string, [][]byte, vault.Seal) {
+
+	var transitSeal vault.Seal
+
+	var baseClusterPort = basePort + 10
+
+	// Start the cluster
+	var conf = vault.CoreConfig{
+		Logger: logger.Named("initializeTransit"),
+	}
+	var opts = vault.TestClusterOptions{
+		HandlerFunc:           vaulthttp.Handler,
+		NumCores:              numTestCores,
+		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
+		BaseClusterListenPort: baseClusterPort,
+		SealFunc: func() vault.Seal {
+			// Each core will create its own transit seal here.  Later
+			// on it won't matter which one of these we end up using, since
+			// they were all created from the same transit key.
+			transitSeal = tss.MakeSeal(t, "transit-seal-key")
+			return transitSeal
+		},
+	}
+	storage.Setup(&conf, &opts)
+	cluster := vault.NewTestCluster(t, &conf, &opts)
+	cluster.Start()
+	defer func() {
+		storage.Cleanup(t, cluster)
+		cluster.Cleanup()
+	}()
+
+	leader := cluster.Cores[0]
+	client := leader.Client
+
+	// Join raft
+	if storage.IsRaft {
+		testhelpers.RaftClusterJoinNodesWithStoredKeys(t, cluster)
+		if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
+			t.Fatal(err)
+		}
+	}
+	testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
+
+	// Write a secret that we will read back out later.
+	_, err := client.Logical().Write(
+		"secret/foo",
+		map[string]interface{}{"zork": "quux"})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Seal the cluster
+	cluster.EnsureCoresSealed(t)
+
+	return cluster.RootToken, cluster.RecoveryKeys, transitSeal
+}
+
+func runTransit(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int,
+	rootToken string, transitSeal vault.Seal) {
+
+	var baseClusterPort = basePort + 10
+
+	// Start the cluster
+	var conf = vault.CoreConfig{
+		Logger: logger.Named("runTransit"),
+		Seal:   transitSeal,
+	}
+	var opts = vault.TestClusterOptions{
+		HandlerFunc:           vaulthttp.Handler,
+		NumCores:              numTestCores,
+		BaseListenAddress:     fmt.Sprintf("127.0.0.1:%d", basePort),
+		BaseClusterListenPort: baseClusterPort,
+		SkipInit:              true,
+	}
+	storage.Setup(&conf, &opts)
+	cluster := vault.NewTestCluster(t, &conf, &opts)
+	cluster.Start()
+	defer func() {
+		storage.Cleanup(t, cluster)
+		cluster.Cleanup()
+	}()
+
+	leader := cluster.Cores[0]
+	client := leader.Client
+	client.SetToken(rootToken)
+
+	// Unseal.  Even though we are using autounseal, we have to unseal
+	// explicitly because we are using SkipInit.
+	if storage.IsRaft {
+		provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort)
+		testhelpers.SetRaftAddressProviders(t, cluster, provider)
+
+		for _, core := range cluster.Cores {
+			cluster.UnsealCoreWithStoredKeys(t, core)
+		}
+
+		// This is apparently necessary for the raft cluster to get itself
+		// situated.
+		time.Sleep(15 * time.Second)
+
+		if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
+			t.Fatal(err)
+		}
+	} else {
+		if err := cluster.UnsealCoresWithError(true); err != nil {
+			t.Fatal(err)
+		}
+	}
+	testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
+
+	// Read the secret
+	secret, err := client.Logical().Read("secret/foo")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
+		t.Fatal(diff)
+	}
+
+	// Seal the cluster
+	cluster.EnsureCoresSealed(t)
+}
+
+// TestShamir is a temporary test that exercises the reusable raft storage.
+// It will be replace once we do the post-1.4 migration testing.
+func TestShamir(t *testing.T) {
+	testVariousBackends(t, testShamir, true)
+}
+
+func testShamir(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int) {
+
+	rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort)
+	runShamir(t, logger, storage, basePort, rootToken, barrierKeys)
+}
+
+// TestTransit is a temporary test that exercises the reusable raft storage.
+// It will be replace once we do the post-1.4 migration testing.
+func TestTransit(t *testing.T) {
+	testVariousBackends(t, testTransit, true)
+}
+
+func testTransit(
+	t *testing.T, logger hclog.Logger,
+	storage teststorage.ReusableStorage, basePort int) {
+
+	// Create the transit server.
+	tss := sealhelper.NewTransitSealServer(t)
+	defer tss.Cleanup()
+	tss.MakeKey(t, "transit-seal-key")
+
+	rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss)
+	runTransit(t, logger, storage, basePort, rootToken, transitSeal)
+}
diff --git a/vault/testing.go b/vault/testing.go
index a32236afb030..b09c5b4503bd 100644
--- a/vault/testing.go
+++ b/vault/testing.go
@@ -26,6 +26,8 @@ import (
 	"sync/atomic"
 	"time"
 
+	"github.com/hashicorp/vault/internalshared/configutil"
+
 	"github.com/armon/go-metrics"
 	hclog "github.com/hashicorp/go-hclog"
 	log "github.com/hashicorp/go-hclog"
@@ -816,6 +818,12 @@ func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
 	}
 }
 
+func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) {
+	if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
+		t.Fatal(err)
+	}
+}
+
 func (c *TestCluster) EnsureCoresSealed(t testing.T) {
 	t.Helper()
 	if err := c.ensureCoresSealed(); err != nil {
@@ -959,14 +967,28 @@ type TestClusterOptions struct {
 	HandlerFunc              func(*HandlerProperties) http.Handler
 	DefaultHandlerProperties HandlerProperties
 
-	// BaseListenAddress is used to assign ports in sequence to the listener
-	// of each core.  It shoud be a string of the form "127.0.0.1:50000"
+	// BaseListenAddress is used to explicitly assign ports in sequence to the
+	// listener of each core.  It shoud be a string of the form
+	// "127.0.0.1:20000"
+	//
+	// WARNING: Using an explicitly assigned port above 30000 may clash with
+	// ephemeral ports that have been assigned by the OS in other tests.  The
+	// use of explictly assigned ports below 30000 is strongly recommended.
+	// In addition, you should be careful to use explictly assigned ports that
+	// do not clash with any other explicitly assigned ports in other tests.
 	BaseListenAddress string
 
-	// BaseClusterListenPort is used to assign ports in sequence to the
-	// cluster listener of each core.  If BaseClusterListenPort is specified,
-	// then BaseListenAddress must also be specified.  Each cluster listener
-	// will use the same host as the one specified in BaseListenAddress.
+	// BaseClusterListenPort is used to explicitly assign ports in sequence to
+	// the cluster listener of each core.  If BaseClusterListenPort is
+	// specified, then BaseListenAddress must also be specified.  Each cluster
+	// listener will use the same host as the one specified in
+	// BaseListenAddress.
+	//
+	// WARNING: Using an explicitly assigned port above 30000 may clash with
+	// ephemeral ports that have been assigned by the OS in other tests.  The
+	// use of explictly assigned ports below 30000 is strongly recommended.
+	// In addition, you should be careful to use explictly assigned ports that
+	// do not clash with any other explicitly assigned ports in other tests.
 	BaseClusterListenPort int
 
 	NumCores int
@@ -1338,6 +1360,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
 		coreConfig.CacheSize = base.CacheSize
 		coreConfig.PluginDirectory = base.PluginDirectory
 		coreConfig.Seal = base.Seal
+		coreConfig.UnwrapSeal = base.UnwrapSeal
 		coreConfig.DevToken = base.DevToken
 		coreConfig.EnableRaw = base.EnableRaw
 		coreConfig.DisableSealWrap = base.DisableSealWrap

From 8cabb11b48117157d8ed858b5fe8158edce3498e Mon Sep 17 00:00:00 2001
From: ncabatoff <ncabatoff@hashicorp.com>
Date: Thu, 11 Jun 2020 16:20:36 -0400
Subject: [PATCH 15/29] Clarify cache setting. (#9204)

---
 website/pages/docs/configuration/index.mdx | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/website/pages/docs/configuration/index.mdx b/website/pages/docs/configuration/index.mdx
index 299d3a1d4b78..471e37aa5581 100644
--- a/website/pages/docs/configuration/index.mdx
+++ b/website/pages/docs/configuration/index.mdx
@@ -61,7 +61,7 @@ to specify where the configuration is.
   Vault cluster. If omitted, Vault will generate a value. When connecting to
   Vault Enterprise, this value will be used in the interface.
 
-- `cache_size` `(string: "32000")` – Specifies the size of the read cache used
+- `cache_size` `(string: "131072")` – Specifies the size of the read cache used
   by the physical storage subsystem. The value is in number of entries, so the
   total cache size depends on the size of stored entries.
 

From b2441efd377b6bb56a84c256612920718cacb22c Mon Sep 17 00:00:00 2001
From: Michael Golowka <72365+pcman312@users.noreply.github.com>
Date: Thu, 11 Jun 2020 16:08:20 -0600
Subject: [PATCH 16/29] Integrate password policies into RabbitMQ secret engine
 (#9143)

* Add password policies to RabbitMQ & update docs
* Also updates some parts of the password policies to aid/fix testing
---
 builtin/logical/rabbitmq/backend.go           | 11 +--
 builtin/logical/rabbitmq/backend_test.go      | 56 ++++++++---
 builtin/logical/rabbitmq/passwords.go         | 14 +++
 .../rabbitmq/path_config_connection.go        | 55 +++++++++--
 builtin/logical/rabbitmq/path_role_create.go  |  7 +-
 sdk/helper/random/string_generator.go         |  2 +-
 sdk/helper/random/string_generator_test.go    | 67 ++++++-------
 sdk/logical/system_view.go                    | 13 +++
 vault/logical_system_test.go                  |  2 +-
 .../sdk/helper/random/string_generator.go     |  2 +-
 .../vault/sdk/logical/system_view.go          | 13 +++
 .../pages/api-docs/secret/rabbitmq/index.mdx  | 93 +++++++++++++++++--
 website/pages/docs/secrets/rabbitmq/index.mdx |  6 +-
 13 files changed, 262 insertions(+), 79 deletions(-)
 create mode 100644 builtin/logical/rabbitmq/passwords.go

diff --git a/builtin/logical/rabbitmq/backend.go b/builtin/logical/rabbitmq/backend.go
index 95c99919e562..b813d128f5f6 100644
--- a/builtin/logical/rabbitmq/backend.go
+++ b/builtin/logical/rabbitmq/backend.go
@@ -2,7 +2,6 @@ package rabbitmq
 
 import (
 	"context"
-	"fmt"
 	"strings"
 	"sync"
 
@@ -73,18 +72,10 @@ func (b *backend) Client(ctx context.Context, s logical.Storage) (*rabbithole.Cl
 	b.lock.RUnlock()
 
 	// Otherwise, attempt to make connection
-	entry, err := s.Get(ctx, "config/connection")
+	connConfig, err := readConfig(ctx, s)
 	if err != nil {
 		return nil, err
 	}
-	if entry == nil {
-		return nil, fmt.Errorf("configure the client connection with config/connection first")
-	}
-
-	var connConfig connectionConfig
-	if err := entry.DecodeJSON(&connConfig); err != nil {
-		return nil, err
-	}
 
 	b.lock.Lock()
 	defer b.lock.Unlock()
diff --git a/builtin/logical/rabbitmq/backend_test.go b/builtin/logical/rabbitmq/backend_test.go
index 552f5ca704b6..2b45945d2dd8 100644
--- a/builtin/logical/rabbitmq/backend_test.go
+++ b/builtin/logical/rabbitmq/backend_test.go
@@ -11,6 +11,7 @@ import (
 	"github.com/hashicorp/vault/helper/testhelpers/docker"
 	logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical"
 	"github.com/hashicorp/vault/sdk/helper/jsonutil"
+	"github.com/hashicorp/vault/sdk/helper/random"
 	"github.com/hashicorp/vault/sdk/logical"
 	rabbithole "github.com/michaelklishin/rabbit-hole"
 	"github.com/mitchellh/mapstructure"
@@ -27,6 +28,8 @@ const (
 	testTags        = "administrator"
 	testVHosts      = `{"/": {"configure": ".*", "write": ".*", "read": ".*"}}`
 	testVHostTopics = `{"/": {"amq.topic": {"write": ".*", "read": ".*"}}}`
+
+	roleName = "web"
 )
 
 func prepareRabbitMQTestContainer(t *testing.T) (func(), string, int) {
@@ -89,9 +92,9 @@ func TestBackend_basic(t *testing.T) {
 		PreCheck:       testAccPreCheckFunc(t, uri),
 		LogicalBackend: b,
 		Steps: []logicaltest.TestStep{
-			testAccStepConfig(t, uri),
+			testAccStepConfig(t, uri, ""),
 			testAccStepRole(t),
-			testAccStepReadCreds(t, b, uri, "web"),
+			testAccStepReadCreds(t, b, uri, roleName),
 		},
 	})
 
@@ -111,10 +114,10 @@ func TestBackend_returnsErrs(t *testing.T) {
 		PreCheck:       testAccPreCheckFunc(t, uri),
 		LogicalBackend: b,
 		Steps: []logicaltest.TestStep{
-			testAccStepConfig(t, uri),
+			testAccStepConfig(t, uri, ""),
 			{
 				Operation: logical.CreateOperation,
-				Path:      "roles/web",
+				Path:      fmt.Sprintf("roles/%s", roleName),
 				Data: map[string]interface{}{
 					"tags":         testTags,
 					"vhosts":       `{"invalid":{"write": ".*", "read": ".*"}}`,
@@ -123,7 +126,7 @@ func TestBackend_returnsErrs(t *testing.T) {
 			},
 			{
 				Operation: logical.ReadOperation,
-				Path:      "creds/web",
+				Path:      fmt.Sprintf("creds/%s", roleName),
 				ErrorOk:   true,
 			},
 		},
@@ -144,11 +147,35 @@ func TestBackend_roleCrud(t *testing.T) {
 		PreCheck:       testAccPreCheckFunc(t, uri),
 		LogicalBackend: b,
 		Steps: []logicaltest.TestStep{
-			testAccStepConfig(t, uri),
+			testAccStepConfig(t, uri, ""),
+			testAccStepRole(t),
+			testAccStepReadRole(t, roleName, testTags, testVHosts, testVHostTopics),
+			testAccStepDeleteRole(t, roleName),
+			testAccStepReadRole(t, roleName, "", "", ""),
+		},
+	})
+}
+
+func TestBackend_roleWithPasswordPolicy(t *testing.T) {
+	if os.Getenv(logicaltest.TestEnvVar) == "" {
+		t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
+		return
+	}
+
+	backendConfig := logical.TestBackendConfig()
+	backendConfig.System.(*logical.StaticSystemView).SetPasswordPolicy("testpolicy", random.DefaultStringGenerator)
+	b, _ := Factory(context.Background(), backendConfig)
+
+	cleanup, uri, _ := prepareRabbitMQTestContainer(t)
+	defer cleanup()
+
+	logicaltest.Test(t, logicaltest.TestCase{
+		PreCheck:       testAccPreCheckFunc(t, uri),
+		LogicalBackend: b,
+		Steps: []logicaltest.TestStep{
+			testAccStepConfig(t, uri, "testpolicy"),
 			testAccStepRole(t),
-			testAccStepReadRole(t, "web", testTags, testVHosts, testVHostTopics),
-			testAccStepDeleteRole(t, "web"),
-			testAccStepReadRole(t, "web", "", "", ""),
+			testAccStepReadCreds(t, b, uri, roleName),
 		},
 	})
 }
@@ -161,7 +188,7 @@ func testAccPreCheckFunc(t *testing.T, uri string) func() {
 	}
 }
 
-func testAccStepConfig(t *testing.T, uri string) logicaltest.TestStep {
+func testAccStepConfig(t *testing.T, uri string, passwordPolicy string) logicaltest.TestStep {
 	username := os.Getenv(envRabbitMQUsername)
 	if len(username) == 0 {
 		username = "guest"
@@ -175,9 +202,10 @@ func testAccStepConfig(t *testing.T, uri string) logicaltest.TestStep {
 		Operation: logical.UpdateOperation,
 		Path:      "config/connection",
 		Data: map[string]interface{}{
-			"connection_uri": uri,
-			"username":       username,
-			"password":       password,
+			"connection_uri":  uri,
+			"username":        username,
+			"password":        password,
+			"password_policy": passwordPolicy,
 		},
 	}
 }
@@ -185,7 +213,7 @@ func testAccStepConfig(t *testing.T, uri string) logicaltest.TestStep {
 func testAccStepRole(t *testing.T) logicaltest.TestStep {
 	return logicaltest.TestStep{
 		Operation: logical.UpdateOperation,
-		Path:      "roles/web",
+		Path:      fmt.Sprintf("roles/%s", roleName),
 		Data: map[string]interface{}{
 			"tags":         testTags,
 			"vhosts":       testVHosts,
diff --git a/builtin/logical/rabbitmq/passwords.go b/builtin/logical/rabbitmq/passwords.go
new file mode 100644
index 000000000000..cb660bc5c29b
--- /dev/null
+++ b/builtin/logical/rabbitmq/passwords.go
@@ -0,0 +1,14 @@
+package rabbitmq
+
+import (
+	"context"
+
+	"github.com/hashicorp/vault/sdk/helper/base62"
+)
+
+func (b *backend) generatePassword(ctx context.Context, policyName string) (password string, err error) {
+	if policyName != "" {
+		return b.System().GeneratePasswordFromPolicy(ctx, policyName)
+	}
+	return base62.Random(36)
+}
diff --git a/builtin/logical/rabbitmq/path_config_connection.go b/builtin/logical/rabbitmq/path_config_connection.go
index 11536aad0537..cd41b73c8a4f 100644
--- a/builtin/logical/rabbitmq/path_config_connection.go
+++ b/builtin/logical/rabbitmq/path_config_connection.go
@@ -9,6 +9,10 @@ import (
 	rabbithole "github.com/michaelklishin/rabbit-hole"
 )
 
+const (
+	storageKey = "config/connection"
+)
+
 func pathConfigConnection(b *backend) *framework.Path {
 	return &framework.Path{
 		Pattern: "config/connection",
@@ -30,6 +34,10 @@ func pathConfigConnection(b *backend) *framework.Path {
 				Default:     true,
 				Description: `If set, connection_uri is verified by actually connecting to the RabbitMQ management API`,
 			},
+			"password_policy": &framework.FieldSchema{
+				Type:        framework.TypeString,
+				Description: "Name of the password policy to use to generate passwords for dynamic credentials.",
+			},
 		},
 
 		Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -57,6 +65,8 @@ func (b *backend) pathConnectionUpdate(ctx context.Context, req *logical.Request
 		return logical.ErrorResponse("missing password"), nil
 	}
 
+	passwordPolicy := data.Get("password_policy").(string)
+
 	// Don't check the connection_url if verification is disabled
 	verifyConnection := data.Get("verify_connection").(bool)
 	if verifyConnection {
@@ -73,15 +83,14 @@ func (b *backend) pathConnectionUpdate(ctx context.Context, req *logical.Request
 	}
 
 	// Store it
-	entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
-		URI:      uri,
-		Username: username,
-		Password: password,
-	})
-	if err != nil {
-		return nil, err
+	config := connectionConfig{
+		URI:            uri,
+		Username:       username,
+		Password:       password,
+		PasswordPolicy: passwordPolicy,
 	}
-	if err := req.Storage.Put(ctx, entry); err != nil {
+	err := writeConfig(ctx, req.Storage, config)
+	if err != nil {
 		return nil, err
 	}
 
@@ -91,6 +100,33 @@ func (b *backend) pathConnectionUpdate(ctx context.Context, req *logical.Request
 	return nil, nil
 }
 
+func readConfig(ctx context.Context, storage logical.Storage) (connectionConfig, error) {
+	entry, err := storage.Get(ctx, storageKey)
+	if err != nil {
+		return connectionConfig{}, err
+	}
+	if entry == nil {
+		return connectionConfig{}, nil
+	}
+
+	var connConfig connectionConfig
+	if err := entry.DecodeJSON(&connConfig); err != nil {
+		return connectionConfig{}, err
+	}
+	return connConfig, nil
+}
+
+func writeConfig(ctx context.Context, storage logical.Storage, config connectionConfig) error {
+	entry, err := logical.StorageEntryJSON(storageKey, config)
+	if err != nil {
+		return err
+	}
+	if err := storage.Put(ctx, entry); err != nil {
+		return err
+	}
+	return nil
+}
+
 // connectionConfig contains the information required to make a connection to a RabbitMQ node
 type connectionConfig struct {
 	// URI of the RabbitMQ server
@@ -101,6 +137,9 @@ type connectionConfig struct {
 
 	// Password for the Username
 	Password string `json:"password"`
+
+	// PasswordPolicy for generating passwords for dynamic credentials
+	PasswordPolicy string `json:"password_policy"`
 }
 
 const pathConfigConnectionHelpSyn = `
diff --git a/builtin/logical/rabbitmq/path_role_create.go b/builtin/logical/rabbitmq/path_role_create.go
index 63c09f6d6d4e..77f30d2dc6d8 100644
--- a/builtin/logical/rabbitmq/path_role_create.go
+++ b/builtin/logical/rabbitmq/path_role_create.go
@@ -53,7 +53,12 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr
 	}
 	username := fmt.Sprintf("%s-%s", req.DisplayName, uuidVal)
 
-	password, err := uuid.GenerateUUID()
+	config, err := readConfig(ctx, req.Storage)
+	if err != nil {
+		return nil, fmt.Errorf("unable to read configuration: %w", err)
+	}
+
+	password, err := b.generatePassword(ctx, config.PasswordPolicy)
 	if err != nil {
 		return nil, err
 	}
diff --git a/sdk/helper/random/string_generator.go b/sdk/helper/random/string_generator.go
index 761577455a22..621930eb66f2 100644
--- a/sdk/helper/random/string_generator.go
+++ b/sdk/helper/random/string_generator.go
@@ -37,7 +37,7 @@ var (
 	AlphaNumericFullSymbolRuneset  = []rune(AlphaNumericFullSymbolCharset)
 
 	// DefaultStringGenerator has reasonable default rules for generating strings
-	DefaultStringGenerator = StringGenerator{
+	DefaultStringGenerator = &StringGenerator{
 		Length: 20,
 		Rules: []Rule{
 			CharsetRule{
diff --git a/sdk/helper/random/string_generator_test.go b/sdk/helper/random/string_generator_test.go
index 55b252a4f9eb..af4e7da14962 100644
--- a/sdk/helper/random/string_generator_test.go
+++ b/sdk/helper/random/string_generator_test.go
@@ -106,7 +106,7 @@ func TestStringGenerator_Generate_errors(t *testing.T) {
 	type testCase struct {
 		timeout   time.Duration
 		generator *StringGenerator
-		rng io.Reader
+		rng       io.Reader
 	}
 
 	tests := map[string]testCase{
@@ -121,7 +121,7 @@ func TestStringGenerator_Generate_errors(t *testing.T) {
 				},
 				charset: AlphaNumericShortSymbolRuneset,
 			},
-				rng:     rand.Reader,
+			rng: rand.Reader,
 		},
 		"impossible rules": {
 			timeout: 10 * time.Millisecond, // Keep this short so the test doesn't take too long
@@ -134,7 +134,7 @@ func TestStringGenerator_Generate_errors(t *testing.T) {
 				},
 				charset: AlphaNumericShortSymbolRuneset,
 			},
-				rng:     rand.Reader,
+			rng: rand.Reader,
 		},
 		"bad RNG reader": {
 			timeout: 10 * time.Millisecond, // Keep this short so the test doesn't take too long
@@ -143,7 +143,7 @@ func TestStringGenerator_Generate_errors(t *testing.T) {
 				Rules:   []Rule{},
 				charset: AlphaNumericShortSymbolRuneset,
 			},
-				rng:     badReader{},
+			rng: badReader{},
 		},
 		"0 length": {
 			timeout: 10 * time.Millisecond,
@@ -157,7 +157,7 @@ func TestStringGenerator_Generate_errors(t *testing.T) {
 				},
 				charset: []rune("abcde"),
 			},
-				rng:     rand.Reader,
+			rng: rand.Reader,
 		},
 		"-1 length": {
 			timeout: 10 * time.Millisecond,
@@ -171,7 +171,7 @@ func TestStringGenerator_Generate_errors(t *testing.T) {
 				},
 				charset: []rune("abcde"),
 			},
-				rng:     rand.Reader,
+			rng: rand.Reader,
 		},
 		"no charset": {
 			timeout: 10 * time.Millisecond,
@@ -179,7 +179,7 @@ func TestStringGenerator_Generate_errors(t *testing.T) {
 				Length: 20,
 				Rules:  []Rule{},
 			},
-			rng:    rand.Reader,
+			rng: rand.Reader,
 		},
 	}
 
@@ -333,8 +333,8 @@ func TestRandomRunes_errors(t *testing.T) {
 				"šŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſ℀℁ℂ℃℄℅℆ℇ℈℉ℊℋℌℍℎℏℐℑℒℓ℔ℕ№℗℘ℙℚℛℜℝ℞℟℠" +
 				"Σ",
 			),
-			length:20,
-			rng: rand.Reader,
+			length: 20,
+			rng:    rand.Reader,
 		},
 		"length is zero": {
 			charset: []rune("abcde"),
@@ -372,22 +372,24 @@ func BenchmarkStringGenerator_Generate(b *testing.B) {
 	}
 
 	type testCase struct {
-		generator StringGenerator
+		generator *StringGenerator
 	}
 
 	benches := map[string]testCase{
-		"no rules": {
-			generator: StringGenerator{
-				charset: AlphaNumericFullSymbolRuneset,
-				Rules:   []Rule{},
+		"no restrictions": {
+			generator: &StringGenerator{
+				Rules: []Rule{
+					CharsetRule{
+						Charset: AlphaNumericFullSymbolRuneset,
+					},
+				},
 			},
 		},
 		"default generator": {
 			generator: DefaultStringGenerator,
 		},
 		"large symbol set": {
-			generator: StringGenerator{
-				charset: AlphaNumericFullSymbolRuneset,
+			generator: &StringGenerator{
 				Rules: []Rule{
 					CharsetRule{
 						Charset:  LowercaseRuneset,
@@ -409,13 +411,14 @@ func BenchmarkStringGenerator_Generate(b *testing.B) {
 			},
 		},
 		"max symbol set": {
-			generator: StringGenerator{
-				charset: []rune(" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_" +
-					"`abcdefghijklmnopqrstuvwxyz{|}~ĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠ" +
-					"ġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠ" +
-					"šŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſ℀℁ℂ℃℄℅℆ℇ℈℉ℊℋℌℍℎℏℐℑℒℓ℔ℕ№℗℘ℙℚℛℜℝ℞℟℠",
-				),
+			generator: &StringGenerator{
 				Rules: []Rule{
+					CharsetRule{
+						Charset: []rune(" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_" +
+							"`abcdefghijklmnopqrstuvwxyz{|}~ĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠ" +
+							"ġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠ" +
+							"šŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſ℀℁ℂ℃℄℅℆ℇ℈℉ℊℋℌℍℎℏℐℑℒℓ℔ℕ№℗℘ℙℚℛℜℝ℞℟℠"),
+					},
 					CharsetRule{
 						Charset:  LowercaseRuneset,
 						MinChars: 1,
@@ -432,9 +435,11 @@ func BenchmarkStringGenerator_Generate(b *testing.B) {
 			},
 		},
 		"restrictive charset rules": {
-			generator: StringGenerator{
-				charset: AlphaNumericShortSymbolRuneset,
+			generator: &StringGenerator{
 				Rules: []Rule{
+					CharsetRule{
+						Charset: AlphaNumericShortSymbolRuneset,
+					},
 					CharsetRule{
 						Charset:  []rune("A"),
 						MinChars: 1,
@@ -551,7 +556,7 @@ func (badReader) Read([]byte) (int, error) {
 
 func TestValidate(t *testing.T) {
 	type testCase struct {
-		generator StringGenerator
+		generator *StringGenerator
 		expectErr bool
 	}
 
@@ -561,33 +566,33 @@ func TestValidate(t *testing.T) {
 			expectErr: false,
 		},
 		"length is 0": {
-			generator: StringGenerator{
+			generator: &StringGenerator{
 				Length: 0,
 			},
 			expectErr: true,
 		},
 		"length is negative": {
-			generator: StringGenerator{
+			generator: &StringGenerator{
 				Length: -2,
 			},
 			expectErr: true,
 		},
 		"nil charset, no rules": {
-			generator: StringGenerator{
+			generator: &StringGenerator{
 				Length:  5,
 				charset: nil,
 			},
 			expectErr: true,
 		},
 		"zero length charset, no rules": {
-			generator: StringGenerator{
+			generator: &StringGenerator{
 				Length:  5,
 				charset: []rune{},
 			},
 			expectErr: true,
 		},
 		"rules require password longer than length": {
-			generator: StringGenerator{
+			generator: &StringGenerator{
 				Length:  5,
 				charset: []rune("abcde"),
 				Rules: []Rule{
@@ -600,7 +605,7 @@ func TestValidate(t *testing.T) {
 			expectErr: true,
 		},
 		"charset has non-printable characters": {
-			generator: StringGenerator{
+			generator: &StringGenerator{
 				Length: 0,
 				charset: []rune{
 					'a',
diff --git a/sdk/logical/system_view.go b/sdk/logical/system_view.go
index 41f82f36c329..b68e1bbc29b8 100644
--- a/sdk/logical/system_view.go
+++ b/sdk/logical/system_view.go
@@ -194,3 +194,16 @@ func (d StaticSystemView) GeneratePasswordFromPolicy(ctx context.Context, policy
 	}
 	return policy.Generate(ctx, nil)
 }
+
+func (d *StaticSystemView) SetPasswordPolicy(name string, policy PasswordPolicy) {
+	if d.PasswordPolicies == nil {
+		d.PasswordPolicies = map[string]PasswordPolicy{}
+	}
+	d.PasswordPolicies[name] = policy
+}
+
+func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) {
+	_, existed = d.PasswordPolicies[name]
+	delete(d.PasswordPolicies, name)
+	return existed
+}
diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go
index 7f13ba1f68b9..1ff14558e75d 100644
--- a/vault/logical_system_test.go
+++ b/vault/logical_system_test.go
@@ -3188,7 +3188,7 @@ func TestHandlePoliciesPasswordGenerate(t *testing.T) {
 	})
 
 	t.Run("success", func(t *testing.T) {
-		ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
 		defer cancel()
 
 		policyEntry := storageEntry(t, "testpolicy",
diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/random/string_generator.go b/vendor/github.com/hashicorp/vault/sdk/helper/random/string_generator.go
index 761577455a22..621930eb66f2 100644
--- a/vendor/github.com/hashicorp/vault/sdk/helper/random/string_generator.go
+++ b/vendor/github.com/hashicorp/vault/sdk/helper/random/string_generator.go
@@ -37,7 +37,7 @@ var (
 	AlphaNumericFullSymbolRuneset  = []rune(AlphaNumericFullSymbolCharset)
 
 	// DefaultStringGenerator has reasonable default rules for generating strings
-	DefaultStringGenerator = StringGenerator{
+	DefaultStringGenerator = &StringGenerator{
 		Length: 20,
 		Rules: []Rule{
 			CharsetRule{
diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go
index 41f82f36c329..b68e1bbc29b8 100644
--- a/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go
+++ b/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go
@@ -194,3 +194,16 @@ func (d StaticSystemView) GeneratePasswordFromPolicy(ctx context.Context, policy
 	}
 	return policy.Generate(ctx, nil)
 }
+
+func (d *StaticSystemView) SetPasswordPolicy(name string, policy PasswordPolicy) {
+	if d.PasswordPolicies == nil {
+		d.PasswordPolicies = map[string]PasswordPolicy{}
+	}
+	d.PasswordPolicies[name] = policy
+}
+
+func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) {
+	_, existed = d.PasswordPolicies[name]
+	delete(d.PasswordPolicies, name)
+	return existed
+}
diff --git a/website/pages/api-docs/secret/rabbitmq/index.mdx b/website/pages/api-docs/secret/rabbitmq/index.mdx
index d754c666ee1d..c9ab5cc0579f 100644
--- a/website/pages/api-docs/secret/rabbitmq/index.mdx
+++ b/website/pages/api-docs/secret/rabbitmq/index.mdx
@@ -26,17 +26,16 @@ RabbitMQ.
 
 ### Parameters
 
-- `connection_uri` `(string: <required>)` – Specifies the RabbitMQ connection
-  URI.
+- `connection_uri` `(string: <required>)` – Specifies the RabbitMQ connection URI.
 
-- `username` `(string: <required>)` – Specifies the RabbitMQ management
-  administrator username.
+- `username` `(string: <required>)` – Specifies the RabbitMQ management administrator username.
 
-- `password` `(string: <required>)` – Specifies the RabbitMQ management
-  administrator password.
+- `password` `(string: <required>)` – Specifies the RabbitMQ management administrator password.
 
-- `verify_connection` `(bool: true)` – Specifies whether to verify connection
-  URI, username, and password.
+- `verify_connection` `(bool: true)` – Specifies whether to verify connection URI, username, and password.
+
+- `password_policy` `(string: "")` - Specifies a [password policy](/docs/concepts/password-policies) to
+  use when creating dynamic credentials. Defaults to generating an alphanumeric password if not set.
 
 ### Sample Payload
 
@@ -44,12 +43,16 @@ RabbitMQ.
 {
   "connection_uri": "https://...",
   "username": "user",
-  "password": "password"
+  "password": "password",
+  "password_policy": "rabbitmq_policy"
 }
 ```
 
 ### Sample Request
 
+<Tabs>
+<Tab heading="cURL">
+
 ```shell-session
 $ curl \
     --header "X-Vault-Token: ..." \
@@ -57,6 +60,18 @@ $ curl \
     --data @payload.json \
     http://127.0.0.1:8200/v1/rabbitmq/config/connection
 ```
+</Tab>
+<Tab heading="CLI">
+
+```shell-session
+$ vault write rabbitmq/config/connection \
+    connection_uri="http://localhost:8080" \
+    username="user" \
+    password="password" \
+    password_policy="rabbitmq_policy"
+```
+</Tab>
+</Tabs>
 
 ## Configure Lease
 
@@ -83,6 +98,9 @@ This endpoint configures the lease settings for generated credentials.
 
 ### Sample Request
 
+<Tabs>
+<Tab heading="cURL">
+
 ```shell-session
 $ curl \
     --header "X-Vault-Token: ..." \
@@ -90,6 +108,16 @@ $ curl \
     --data @payload.json \
     http://127.0.0.1:8200/v1/rabbitmq/config/lease
 ```
+</Tab>
+<Tab heading="CLI">
+
+```shell-session
+$ vault write rabbitmq/config/lease \
+    ttl=1800 \
+    max_ttl=3600
+```
+</Tab>
+</Tabs>
 
 ## Create Role
 
@@ -124,6 +152,9 @@ This endpoint creates or updates the role definition.
 
 ### Sample Request
 
+<Tabs>
+<Tab heading="cURL">
+
 ```shell-session
 $ curl \
     --header "X-Vault-Token: ..." \
@@ -131,6 +162,17 @@ $ curl \
     --data @payload.json \
     http://127.0.0.1:8200/v1/rabbitmq/roles/my-role
 ```
+</Tab>
+<Tab heading="CLI">
+
+```shell-session
+$ vault write rabbitmq/roles/my-role \
+    tags="tag1,tag2" \
+    vhosts="..." \
+    vhost_topics="..."
+```
+</Tab>
+</Tabs>
 
 ## Read Role
 
@@ -147,11 +189,22 @@ This endpoint queries the role definition.
 
 ### Sample Request
 
+<Tabs>
+<Tab heading="cURL">
+
 ```shell-session
 $ curl \
     --header "X-Vault-Token: ..." \
     http://127.0.0.1:8200/v1/rabbitmq/roles/my-role
 ```
+</Tab>
+<Tab heading="CLI">
+
+```shell-session
+$ vault read rabbitmq/roles/my-role
+```
+</Tab>
+</Tabs>
 
 ### Sample Response
 
@@ -180,12 +233,23 @@ This endpoint deletes the role definition.
 
 ### Sample Request
 
+<Tabs>
+<Tab heading="cURL">
+
 ```shell-session
 $ curl \
     --header "X-Vault-Token: ..." \
     --request DELETE \
     http://127.0.0.1:8200/v1/rabbitmq/roles/my-role
 ```
+</Tab>
+<Tab heading="CLI">
+
+```shell-session
+vault delete rabbitmq/roles/my-role
+```
+</Tab>
+</Tabs>
 
 ## Generate Credentials
 
@@ -203,11 +267,22 @@ role.
 
 ### Sample Request
 
+<Tabs>
+<Tab heading="cURL">
+
 ```shell-session
 $ curl \
     --header "X-Vault-Token: ..." \
     http://127.0.0.1:8200/v1/rabbitmq/creds/my-role
 ```
+</Tab>
+<Tab heading="CLI">
+
+```shell-session
+$ vault read rabbitmq/creds/my-role
+```
+</Tab>
+</Tabs>
 
 ### Sample Response
 
diff --git a/website/pages/docs/secrets/rabbitmq/index.mdx b/website/pages/docs/secrets/rabbitmq/index.mdx
index 007c9ec7d008..f8c0b0fd8458 100644
--- a/website/pages/docs/secrets/rabbitmq/index.mdx
+++ b/website/pages/docs/secrets/rabbitmq/index.mdx
@@ -81,11 +81,11 @@ the proper permission, it can generate credentials.
     $ vault read rabbitmq/creds/my-role
     Key                Value
     ---                -----
-    lease_id           rabbitmq/creds/my-role/37d70d04-f24d-760a-e06e-b9b21087f0f4
+    lease_id           rabbitmq/creds/my-role/I39Hu8XXOombof4wiK5bKMn9
     lease_duration     768h
     lease_renewable    true
-    password           a98af72b-b6c9-b4b1-fe37-c73a572befed
-    username           token-590f1fe2-1094-a4d6-01a7-9d4ff756a085
+    password           3yNDBikgQvrkx2VA2zhq5IdSM7IWk1RyMYJr
+    username           root-39669250-3894-8032-c420-3d58483ebfc4
     ```
 
     Using ACLs, it is possible to restrict using the rabbitmq secrets engine

From 0c535a53ac5c633d805acda909d2dc9fb62709c0 Mon Sep 17 00:00:00 2001
From: Austin Gebauer <34121980+austingebauer@users.noreply.github.com>
Date: Thu, 11 Jun 2020 18:08:46 -0700
Subject: [PATCH 17/29] fix: configutil redeclared as imported package name
 (#9211)

---
 vault/testing.go | 2 --
 1 file changed, 2 deletions(-)

diff --git a/vault/testing.go b/vault/testing.go
index b09c5b4503bd..0c73b1f29ae5 100644
--- a/vault/testing.go
+++ b/vault/testing.go
@@ -26,8 +26,6 @@ import (
 	"sync/atomic"
 	"time"
 
-	"github.com/hashicorp/vault/internalshared/configutil"
-
 	"github.com/armon/go-metrics"
 	hclog "github.com/hashicorp/go-hclog"
 	log "github.com/hashicorp/go-hclog"

From 0223f597d31a88095b4fd8a042f91b1d6cd92247 Mon Sep 17 00:00:00 2001
From: Roman Iuvshyn <roman.iuvshyn@transferwise.com>
Date: Fri, 12 Jun 2020 05:02:20 +0300
Subject: [PATCH 18/29] add disable_iss_validation option to k8s auth docs
 (#9142)

---
 website/pages/api-docs/auth/kubernetes/index.mdx | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/website/pages/api-docs/auth/kubernetes/index.mdx b/website/pages/api-docs/auth/kubernetes/index.mdx
index 3b38f1164bae..634b8a328608 100644
--- a/website/pages/api-docs/auth/kubernetes/index.mdx
+++ b/website/pages/api-docs/auth/kubernetes/index.mdx
@@ -40,6 +40,8 @@ access the Kubernetes API.
   keys.
 - `issuer` `(string: "")` - Optional JWT issuer. If no issuer is specified, then this plugin will
   use `kubernetes.io/serviceaccount` as the default issuer.
+- `disable_iss_validation` `(bool: false)` - Disable JWT issuer validation. Allows to skip ISS validation.
+
 
 ### Sample Payload
 
@@ -84,7 +86,8 @@ $ curl \
   "data":{
     "kubernetes_host": "https://192.168.99.100:8443",
     "kubernetes_ca_cert": "-----BEGIN CERTIFICATE-----.....-----END CERTIFICATE-----",
-    "pem_keys": ["-----BEGIN CERTIFICATE-----.....", .....]
+    "pem_keys": ["-----BEGIN CERTIFICATE-----.....", .....],
+    "disable_iss_validation": false
   }
 }
 ```

From 342318aca4e2ce0b3ca3ebfcddd32ebb68a6277c Mon Sep 17 00:00:00 2001
From: Michael Golowka <72365+pcman312@users.noreply.github.com>
Date: Fri, 12 Jun 2020 11:08:56 -0600
Subject: [PATCH 19/29] Validate physical MySQL database and table config
 values before using them (#9189)

* Validate database & table names prior to using it in SQL
---
 physical/mysql/mysql.go      |  79 +++++++++++++++++++++--
 physical/mysql/mysql_test.go | 121 ++++++++++++++++++++++++++++-------
 2 files changed, 173 insertions(+), 27 deletions(-)

diff --git a/physical/mysql/mysql.go b/physical/mysql/mysql.go
index fce3f026488f..eb83923dd97f 100644
--- a/physical/mysql/mysql.go
+++ b/physical/mysql/mysql.go
@@ -15,8 +15,10 @@ import (
 	"strings"
 	"sync"
 	"time"
+	"unicode"
 
 	log "github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/go-multierror"
 
 	metrics "github.com/armon/go-metrics"
 	mysql "github.com/go-sql-driver/mysql"
@@ -59,15 +61,21 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
 		return nil, err
 	}
 
-	database, ok := conf["database"]
-	if !ok {
+	database := conf["database"]
+	if database == "" {
 		database = "vault"
 	}
-	table, ok := conf["table"]
-	if !ok {
+	table := conf["table"]
+	if table == "" {
 		table = "vault"
 	}
-	dbTable := "`" + database + "`.`" + table + "`"
+
+	err = validateDBTable(database, table)
+	if err != nil {
+		return nil, err
+	}
+
+	dbTable := fmt.Sprintf("`%s`.`%s`", database, table)
 
 	maxParStr, ok := conf["max_parallel"]
 	var maxParInt int
@@ -193,6 +201,67 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
 	return m, nil
 }
 
+// validateDBTable to prevent SQL injection attacks. This ensures that the database and table names only have valid
+// characters in them. MySQL allows for more characters that this will allow, but there isn't an easy way of
+// representing the full Unicode Basic Multilingual Plane to check against.
+// https://dev.mysql.com/doc/refman/5.7/en/identifiers.html
+func validateDBTable(db, table string) (err error) {
+	merr := &multierror.Error{}
+	merr = multierror.Append(merr, wrapErr("invalid database: %w", validate(db)))
+	merr = multierror.Append(merr, wrapErr("invalid table: %w", validate(table)))
+	return merr.ErrorOrNil()
+}
+
+func validate(name string) (err error) {
+	if name == "" {
+		return fmt.Errorf("missing name")
+	}
+	// From: https://dev.mysql.com/doc/refman/5.7/en/identifiers.html
+	// - Permitted characters in quoted identifiers include the full Unicode Basic Multilingual Plane (BMP), except U+0000:
+	//    ASCII: U+0001 .. U+007F
+	//    Extended: U+0080 .. U+FFFF
+	// - ASCII NUL (U+0000) and supplementary characters (U+10000 and higher) are not permitted in quoted or unquoted identifiers.
+	// - Identifiers may begin with a digit but unless quoted may not consist solely of digits.
+	// - Database, table, and column names cannot end with space characters.
+	//
+	// We are explicitly excluding all space characters (it's easier to deal with)
+	// The name will be quoted, so the all-digit requirement doesn't apply
+	runes := []rune(name)
+	validationErr := fmt.Errorf("invalid character found: can only include printable, non-space characters between [0x0001-0xFFFF]")
+	for _, r := range runes {
+		// U+0000 Explicitly disallowed
+		if r == 0x0000 {
+			return fmt.Errorf("invalid character: cannot include 0x0000")
+		}
+		// Cannot be above 0xFFFF
+		if r > 0xFFFF {
+			return fmt.Errorf("invalid character: cannot include any characters above 0xFFFF")
+		}
+		if r == '`' {
+			return fmt.Errorf("invalid character: cannot include '`' character")
+		}
+		if r == '\'' || r == '"' {
+			return fmt.Errorf("invalid character: cannot include quotes")
+		}
+		// We are excluding non-printable characters (not mentioned in the docs)
+		if !unicode.IsPrint(r) {
+			return validationErr
+		}
+		// We are excluding space characters (not mentioned in the docs)
+		if unicode.IsSpace(r) {
+			return validationErr
+		}
+	}
+	return nil
+}
+
+func wrapErr(message string, err error) error {
+	if err == nil {
+		return nil
+	}
+	return fmt.Errorf(message, err)
+}
+
 func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error) {
 	var err error
 
diff --git a/physical/mysql/mysql_test.go b/physical/mysql/mysql_test.go
index 51222639a24d..75d220b9ae4d 100644
--- a/physical/mysql/mysql_test.go
+++ b/physical/mysql/mysql_test.go
@@ -43,11 +43,11 @@ func TestMySQLPlaintextCatch(t *testing.T) {
 	logger := logging.NewVaultLogger(log.Debug)
 
 	NewMySQLBackend(map[string]string{
-		"address":  address,
-		"database": database,
-		"table":    table,
-		"username": username,
-		"password": password,
+		"address":                      address,
+		"database":                     database,
+		"table":                        table,
+		"username":                     username,
+		"password":                     password,
 		"plaintext_connection_allowed": "false",
 	}, logger)
 
@@ -82,11 +82,11 @@ func TestMySQLBackend(t *testing.T) {
 	logger := logging.NewVaultLogger(log.Debug)
 
 	b, err := NewMySQLBackend(map[string]string{
-		"address":  address,
-		"database": database,
-		"table":    table,
-		"username": username,
-		"password": password,
+		"address":                      address,
+		"database":                     database,
+		"table":                        table,
+		"username":                     username,
+		"password":                     password,
 		"plaintext_connection_allowed": "true",
 	}, logger)
 
@@ -128,12 +128,12 @@ func TestMySQLHABackend(t *testing.T) {
 	// Run vault tests
 	logger := logging.NewVaultLogger(log.Debug)
 	config := map[string]string{
-		"address":    address,
-		"database":   database,
-		"table":      table,
-		"username":   username,
-		"password":   password,
-		"ha_enabled": "true",
+		"address":                      address,
+		"database":                     database,
+		"table":                        table,
+		"username":                     username,
+		"password":                     password,
+		"ha_enabled":                   "true",
 		"plaintext_connection_allowed": "true",
 	}
 
@@ -176,12 +176,12 @@ func TestMySQLHABackend_LockFailPanic(t *testing.T) {
 	table := "test"
 	logger := logging.NewVaultLogger(log.Debug)
 	config := map[string]string{
-		"address":    cfg.Addr,
-		"database":   cfg.DBName,
-		"table":      table,
-		"username":   cfg.User,
-		"password":   cfg.Passwd,
-		"ha_enabled": "true",
+		"address":                      cfg.Addr,
+		"database":                     cfg.DBName,
+		"table":                        table,
+		"username":                     cfg.User,
+		"password":                     cfg.Passwd,
+		"ha_enabled":                   "true",
 		"plaintext_connection_allowed": "true",
 	}
 
@@ -265,3 +265,80 @@ func TestMySQLHABackend_LockFailPanic(t *testing.T) {
 		t.Fatalf("expected error, got none")
 	}
 }
+
+func TestValidateDBTable(t *testing.T) {
+	type testCase struct {
+		database  string
+		table     string
+		expectErr bool
+	}
+
+	tests := map[string]testCase{
+		"empty database & table":        {"", "", true},
+		"empty database":                {"", "a", true},
+		"empty table":                   {"a", "", true},
+		"ascii database":                {"abcde", "a", false},
+		"ascii table":                   {"a", "abcde", false},
+		"ascii database & table":        {"abcde", "abcde", false},
+		"only whitespace db":            {"     ", "a", true},
+		"only whitespace table":         {"a", "     ", true},
+		"whitespace prefix db":          {" bcde", "a", true},
+		"whitespace middle db":          {"ab de", "a", true},
+		"whitespace suffix db":          {"abcd ", "a", true},
+		"whitespace prefix table":       {"a", " bcde", true},
+		"whitespace middle table":       {"a", "ab de", true},
+		"whitespace suffix table":       {"a", "abcd ", true},
+		"backtick prefix db":            {"`bcde", "a", true},
+		"backtick middle db":            {"ab`de", "a", true},
+		"backtick suffix db":            {"abcd`", "a", true},
+		"backtick prefix table":         {"a", "`bcde", true},
+		"backtick middle table":         {"a", "ab`de", true},
+		"backtick suffix table":         {"a", "abcd`", true},
+		"single quote prefix db":        {"'bcde", "a", true},
+		"single quote middle db":        {"ab'de", "a", true},
+		"single quote suffix db":        {"abcd'", "a", true},
+		"single quote prefix table":     {"a", "'bcde", true},
+		"single quote middle table":     {"a", "ab'de", true},
+		"single quote suffix table":     {"a", "abcd'", true},
+		"double quote prefix db":        {`"bcde`, "a", true},
+		"double quote middle db":        {`ab"de`, "a", true},
+		"double quote suffix db":        {`abcd"`, "a", true},
+		"double quote prefix table":     {"a", `"bcde`, true},
+		"double quote middle table":     {"a", `ab"de`, true},
+		"double quote suffix table":     {"a", `abcd"`, true},
+		"0x0000 prefix db":              {str(0x0000, 'b', 'c'), "a", true},
+		"0x0000 middle db":              {str('a', 0x0000, 'c'), "a", true},
+		"0x0000 suffix db":              {str('a', 'b', 0x0000), "a", true},
+		"0x0000 prefix table":           {"a", str(0x0000, 'b', 'c'), true},
+		"0x0000 middle table":           {"a", str('a', 0x0000, 'c'), true},
+		"0x0000 suffix table":           {"a", str('a', 'b', 0x0000), true},
+		"unicode > 0xFFFF prefix db":    {str(0x10000, 'b', 'c'), "a", true},
+		"unicode > 0xFFFF middle db":    {str('a', 0x10000, 'c'), "a", true},
+		"unicode > 0xFFFF suffix db":    {str('a', 'b', 0x10000), "a", true},
+		"unicode > 0xFFFF prefix table": {"a", str(0x10000, 'b', 'c'), true},
+		"unicode > 0xFFFF middle table": {"a", str('a', 0x10000, 'c'), true},
+		"unicode > 0xFFFF suffix table": {"a", str('a', 'b', 0x10000), true},
+		"non-printable prefix db":       {str(0x0001, 'b', 'c'), "a", true},
+		"non-printable middle db":       {str('a', 0x0001, 'c'), "a", true},
+		"non-printable suffix db":       {str('a', 'b', 0x0001), "a", true},
+		"non-printable prefix table":    {"a", str(0x0001, 'b', 'c'), true},
+		"non-printable middle table":    {"a", str('a', 0x0001, 'c'), true},
+		"non-printable suffix table":    {"a", str('a', 'b', 0x0001), true},
+	}
+
+	for name, test := range tests {
+		t.Run(name, func(t *testing.T) {
+			err := validateDBTable(test.database, test.table)
+			if test.expectErr && err == nil {
+				t.Fatalf("err expected, got nil")
+			}
+			if !test.expectErr && err != nil {
+				t.Fatalf("no error expected, got: %s", err)
+			}
+		})
+	}
+}
+
+func str(r ...rune) string {
+	return string(r)
+}

From d11f15704df252bb724d1252559f152ce4735f51 Mon Sep 17 00:00:00 2001
From: Michael Golowka <72365+pcman312@users.noreply.github.com>
Date: Fri, 12 Jun 2020 11:09:38 -0600
Subject: [PATCH 20/29] Validate physical CockroachDB table config value before
 using it (#9191)

* Validate table name (and database if specified) prior to using it in SQL
---
 physical/cockroachdb/cockroachdb.go      |  76 +++-
 physical/cockroachdb/cockroachdb_test.go |  69 +++-
 physical/cockroachdb/keywords.go         | 440 +++++++++++++++++++++++
 3 files changed, 575 insertions(+), 10 deletions(-)
 create mode 100644 physical/cockroachdb/keywords.go

diff --git a/physical/cockroachdb/cockroachdb.go b/physical/cockroachdb/cockroachdb.go
index 3900708e2734..587146f2a59f 100644
--- a/physical/cockroachdb/cockroachdb.go
+++ b/physical/cockroachdb/cockroachdb.go
@@ -8,11 +8,13 @@ import (
 	"strconv"
 	"strings"
 	"time"
+	"unicode"
 
 	metrics "github.com/armon/go-metrics"
 	"github.com/cockroachdb/cockroach-go/crdb"
 	"github.com/hashicorp/errwrap"
 	log "github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/go-multierror"
 	"github.com/hashicorp/vault/sdk/helper/strutil"
 	"github.com/hashicorp/vault/sdk/physical"
 
@@ -21,8 +23,14 @@ import (
 )
 
 // Verify CockroachDBBackend satisfies the correct interfaces
-var _ physical.Backend = (*CockroachDBBackend)(nil)
-var _ physical.Transactional = (*CockroachDBBackend)(nil)
+var (
+	_ physical.Backend       = (*CockroachDBBackend)(nil)
+	_ physical.Transactional = (*CockroachDBBackend)(nil)
+)
+
+const (
+	defaultTableName = "vault_kv_store"
+)
 
 // CockroachDBBackend Backend is a physical backend that stores data
 // within a CockroachDB database.
@@ -44,14 +52,18 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical.
 		return nil, fmt.Errorf("missing connection_url")
 	}
 
-	dbTable, ok := conf["table"]
-	if !ok {
-		dbTable = "vault_kv_store"
+	dbTable := conf["table"]
+	if dbTable == "" {
+		dbTable = defaultTableName
+	}
+
+	err := validateDBTable(dbTable)
+	if err != nil {
+		return nil, errwrap.Wrapf("invalid table: {{err}}", err)
 	}
 
 	maxParStr, ok := conf["max_parallel"]
 	var maxParInt int
-	var err error
 	if ok {
 		maxParInt, err = strconv.Atoi(maxParStr)
 		if err != nil {
@@ -239,3 +251,55 @@ func (c *CockroachDBBackend) transaction(tx *sql.Tx, txns []*physical.TxnEntry)
 	}
 	return nil
 }
+
+// validateDBTable against the CockroachDB rules for table names:
+// https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#identifiers
+//
+//   - All values that accept an identifier must:
+//     - Begin with a Unicode letter or an underscore (_). Subsequent characters can be letters,
+//     - underscores, digits (0-9), or dollar signs ($).
+//   - Not equal any SQL keyword unless the keyword is accepted by the element's syntax. For example,
+//     name accepts Unreserved or Column Name keywords.
+//
+// The docs do state that we can bypass these rules with double quotes, however I think it
+// is safer to just require these rules across the board.
+func validateDBTable(dbTable string) (err error) {
+	// Check if this is 'database.table' formatted. If so, split them apart and check the two
+	// parts from each other
+	split := strings.SplitN(dbTable, ".", 2)
+	if len(split) == 2 {
+		merr := &multierror.Error{}
+		merr = multierror.Append(merr, wrapErr("invalid database: %w", validateDBTable(split[0])))
+		merr = multierror.Append(merr, wrapErr("invalid table name: %w", validateDBTable(split[1])))
+		return merr.ErrorOrNil()
+	}
+
+	// Disallow SQL keywords as the table name
+	if sqlKeywords[strings.ToUpper(dbTable)] {
+		return fmt.Errorf("name must not be a SQL keyword")
+	}
+
+	runes := []rune(dbTable)
+	for i, r := range runes {
+		if i == 0 && !unicode.IsLetter(r) && r != '_' {
+			return fmt.Errorf("must use a letter or an underscore as the first character")
+		}
+
+		if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) && r != '$' {
+			return fmt.Errorf("must only contain letters, underscores, digits, and dollar signs")
+		}
+
+		if r == '`' || r == '\'' || r == '"' {
+			return fmt.Errorf("cannot contain backticks, single quotes, or double quotes")
+		}
+	}
+
+	return nil
+}
+
+func wrapErr(message string, err error) error {
+	if err == nil {
+		return nil
+	}
+	return fmt.Errorf(message, err)
+}
diff --git a/physical/cockroachdb/cockroachdb_test.go b/physical/cockroachdb/cockroachdb_test.go
index 5760f6aa109f..43853d560743 100644
--- a/physical/cockroachdb/cockroachdb_test.go
+++ b/physical/cockroachdb/cockroachdb_test.go
@@ -18,8 +18,9 @@ import (
 func prepareCockroachDBTestContainer(t *testing.T) (cleanup func(), retURL, tableName string) {
 	tableName = os.Getenv("CR_TABLE")
 	if tableName == "" {
-		tableName = "vault_kv_store"
+		tableName = defaultTableName
 	}
+	t.Logf("Table name: %s", tableName)
 	retURL = os.Getenv("CR_URL")
 	if retURL != "" {
 		return func() {}, retURL, tableName
@@ -45,8 +46,8 @@ func prepareCockroachDBTestContainer(t *testing.T) (cleanup func(), retURL, tabl
 	}
 
 	retURL = fmt.Sprintf("postgresql://root@localhost:%s/?sslmode=disable", resource.GetPort("26257/tcp"))
-	database := "database"
-	tableName = database + ".vault_kv"
+	database := "vault"
+	tableName = fmt.Sprintf("%s.%s", database, tableName)
 
 	// exponential backoff-retry
 	if err = pool.Retry(func() error {
@@ -56,7 +57,7 @@ func prepareCockroachDBTestContainer(t *testing.T) (cleanup func(), retURL, tabl
 			return err
 		}
 		defer db.Close()
-		_, err = db.Exec("CREATE DATABASE database")
+		_, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s", database))
 		return err
 	}); err != nil {
 		cleanup()
@@ -99,3 +100,63 @@ func truncate(t *testing.T, b physical.Backend) {
 		t.Fatalf("Failed to drop table: %v", err)
 	}
 }
+
+func TestValidateDBTable(t *testing.T) {
+	type testCase struct {
+		table     string
+		expectErr bool
+	}
+
+	tests := map[string]testCase{
+		"first character is letter":     {"abcdef", false},
+		"first character is underscore": {"_bcdef", false},
+		"exclamation point":             {"ab!def", true},
+		"at symbol":                     {"ab@def", true},
+		"hash":                          {"ab#def", true},
+		"percent":                       {"ab%def", true},
+		"carrot":                        {"ab^def", true},
+		"ampersand":                     {"ab&def", true},
+		"star":                          {"ab*def", true},
+		"left paren":                    {"ab(def", true},
+		"right paren":                   {"ab)def", true},
+		"dash":                          {"ab-def", true},
+		"digit":                         {"a123ef", false},
+		"dollar end":                    {"abcde$", false},
+		"dollar middle":                 {"ab$def", false},
+		"dollar start":                  {"$bcdef", true},
+		"backtick prefix":               {"`bcdef", true},
+		"backtick middle":               {"ab`def", true},
+		"backtick suffix":               {"abcde`", true},
+		"single quote prefix":           {"'bcdef", true},
+		"single quote middle":           {"ab'def", true},
+		"single quote suffix":           {"abcde'", true},
+		"double quote prefix":           {`"bcdef`, true},
+		"double quote middle":           {`ab"def`, true},
+		"double quote suffix":           {`abcde"`, true},
+		"underscore with all runes":     {"_bcd123__a__$", false},
+		"all runes":                     {"abcd123__a__$", false},
+		"default table name":            {defaultTableName, false},
+	}
+
+	for name, test := range tests {
+		t.Run(name, func(t *testing.T) {
+			err := validateDBTable(test.table)
+			if test.expectErr && err == nil {
+				t.Fatalf("err expected, got nil")
+			}
+			if !test.expectErr && err != nil {
+				t.Fatalf("no error expected, got: %s", err)
+			}
+		})
+		t.Run(fmt.Sprintf("database: %s", name), func(t *testing.T) {
+			dbTable := fmt.Sprintf("%s.%s", test.table, test.table)
+			err := validateDBTable(dbTable)
+			if test.expectErr && err == nil {
+				t.Fatalf("err expected, got nil")
+			}
+			if !test.expectErr && err != nil {
+				t.Fatalf("no error expected, got: %s", err)
+			}
+		})
+	}
+}
diff --git a/physical/cockroachdb/keywords.go b/physical/cockroachdb/keywords.go
new file mode 100644
index 000000000000..b7ecb05d7dd0
--- /dev/null
+++ b/physical/cockroachdb/keywords.go
@@ -0,0 +1,440 @@
+package cockroachdb
+
+var (
+	// sqlKeywords is a reference of all of the keywords that we do not allow for use as the table name
+	// Referenced from:
+	// https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#identifiers
+	// -> https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#keywords
+	// -> https://www.cockroachlabs.com/docs/stable/sql-grammar.html
+	sqlKeywords = map[string]bool{
+		// reserved_keyword
+		// https://www.cockroachlabs.com/docs/stable/sql-grammar.html#reserved_keyword
+		"ALL":               true,
+		"ANALYSE":           true,
+		"ANALYZE":           true,
+		"AND":               true,
+		"ANY":               true,
+		"ARRAY":             true,
+		"AS":                true,
+		"ASC":               true,
+		"ASYMMETRIC":        true,
+		"BOTH":              true,
+		"CASE":              true,
+		"CAST":              true,
+		"CHECK":             true,
+		"COLLATE":           true,
+		"COLUMN":            true,
+		"CONCURRENTLY":      true,
+		"CONSTRAINT":        true,
+		"CREATE":            true,
+		"CURRENT_CATALOG":   true,
+		"CURRENT_DATE":      true,
+		"CURRENT_ROLE":      true,
+		"CURRENT_SCHEMA":    true,
+		"CURRENT_TIME":      true,
+		"CURRENT_TIMESTAMP": true,
+		"CURRENT_USER":      true,
+		"DEFAULT":           true,
+		"DEFERRABLE":        true,
+		"DESC":              true,
+		"DISTINCT":          true,
+		"DO":                true,
+		"ELSE":              true,
+		"END":               true,
+		"EXCEPT":            true,
+		"FALSE":             true,
+		"FETCH":             true,
+		"FOR":               true,
+		"FOREIGN":           true,
+		"FROM":              true,
+		"GRANT":             true,
+		"GROUP":             true,
+		"HAVING":            true,
+		"IN":                true,
+		"INITIALLY":         true,
+		"INTERSECT":         true,
+		"INTO":              true,
+		"LATERAL":           true,
+		"LEADING":           true,
+		"LIMIT":             true,
+		"LOCALTIME":         true,
+		"LOCALTIMESTAMP":    true,
+		"NOT":               true,
+		"NULL":              true,
+		"OFFSET":            true,
+		"ON":                true,
+		"ONLY":              true,
+		"OR":                true,
+		"ORDER":             true,
+		"PLACING":           true,
+		"PRIMARY":           true,
+		"REFERENCES":        true,
+		"RETURNING":         true,
+		"SELECT":            true,
+		"SESSION_USER":      true,
+		"SOME":              true,
+		"SYMMETRIC":         true,
+		"TABLE":             true,
+		"THEN":              true,
+		"TO":                true,
+		"TRAILING":          true,
+		"TRUE":              true,
+		"UNION":             true,
+		"UNIQUE":            true,
+		"USER":              true,
+		"USING":             true,
+		"VARIADIC":          true,
+		"WHEN":              true,
+		"WHERE":             true,
+		"WINDOW":            true,
+		"WITH":              true,
+
+		// cockroachdb_extra_reserved_keyword
+		// https://www.cockroachlabs.com/docs/stable/sql-grammar.html#cockroachdb_extra_reserved_keyword
+		"INDEX":   true,
+		"NOTHING": true,
+
+		// type_func_name_keyword
+		// https://www.cockroachlabs.com/docs/stable/sql-grammar.html#type_func_name_keyword
+		"COLLATION": true,
+		"CROSS":     true,
+		"FULL":      true,
+		"INNER":     true,
+		"ILIKE":     true,
+		"IS":        true,
+		"ISNULL":    true,
+		"JOIN":      true,
+		"LEFT":      true,
+		"LIKE":      true,
+		"NATURAL":   true,
+		"NONE":      true,
+		"NOTNULL":   true,
+		"OUTER":     true,
+		"OVERLAPS":  true,
+		"RIGHT":     true,
+		"SIMILAR":   true,
+		"FAMILY":    true,
+
+		// col_name_keyword
+		// https://www.cockroachlabs.com/docs/stable/sql-grammar.html#col_name_keyword
+		"ANNOTATE_TYPE":    true,
+		"BETWEEN":          true,
+		"BIGINT":           true,
+		"BIT":              true,
+		"BOOLEAN":          true,
+		"CHAR":             true,
+		"CHARACTER":        true,
+		"CHARACTERISTICS":  true,
+		"COALESCE":         true,
+		"DEC":              true,
+		"DECIMAL":          true,
+		"EXISTS":           true,
+		"EXTRACT":          true,
+		"EXTRACT_DURATION": true,
+		"FLOAT":            true,
+		"GREATEST":         true,
+		"GROUPING":         true,
+		"IF":               true,
+		"IFERROR":          true,
+		"IFNULL":           true,
+		"INT":              true,
+		"INTEGER":          true,
+		"INTERVAL":         true,
+		"ISERROR":          true,
+		"LEAST":            true,
+		"NULLIF":           true,
+		"NUMERIC":          true,
+		"OUT":              true,
+		"OVERLAY":          true,
+		"POSITION":         true,
+		"PRECISION":        true,
+		"REAL":             true,
+		"ROW":              true,
+		"SMALLINT":         true,
+		"SUBSTRING":        true,
+		"TIME":             true,
+		"TIMETZ":           true,
+		"TIMESTAMP":        true,
+		"TIMESTAMPTZ":      true,
+		"TREAT":            true,
+		"TRIM":             true,
+		"VALUES":           true,
+		"VARBIT":           true,
+		"VARCHAR":          true,
+		"VIRTUAL":          true,
+		"WORK":             true,
+
+		// unreserved_keyword
+		// https://www.cockroachlabs.com/docs/stable/sql-grammar.html#unreserved_keyword
+		"ABORT":                     true,
+		"ACTION":                    true,
+		"ADD":                       true,
+		"ADMIN":                     true,
+		"AGGREGATE":                 true,
+		"ALTER":                     true,
+		"AT":                        true,
+		"AUTOMATIC":                 true,
+		"AUTHORIZATION":             true,
+		"BACKUP":                    true,
+		"BEGIN":                     true,
+		"BIGSERIAL":                 true,
+		"BLOB":                      true,
+		"BOOL":                      true,
+		"BUCKET_COUNT":              true,
+		"BUNDLE":                    true,
+		"BY":                        true,
+		"BYTEA":                     true,
+		"BYTES":                     true,
+		"CACHE":                     true,
+		"CANCEL":                    true,
+		"CASCADE":                   true,
+		"CHANGEFEED":                true,
+		"CLUSTER":                   true,
+		"COLUMNS":                   true,
+		"COMMENT":                   true,
+		"COMMIT":                    true,
+		"COMMITTED":                 true,
+		"COMPACT":                   true,
+		"COMPLETE":                  true,
+		"CONFLICT":                  true,
+		"CONFIGURATION":             true,
+		"CONFIGURATIONS":            true,
+		"CONFIGURE":                 true,
+		"CONSTRAINTS":               true,
+		"CONVERSION":                true,
+		"COPY":                      true,
+		"COVERING":                  true,
+		"CREATEROLE":                true,
+		"CUBE":                      true,
+		"CURRENT":                   true,
+		"CYCLE":                     true,
+		"DATA":                      true,
+		"DATABASE":                  true,
+		"DATABASES":                 true,
+		"DATE":                      true,
+		"DAY":                       true,
+		"DEALLOCATE":                true,
+		"DELETE":                    true,
+		"DEFERRED":                  true,
+		"DISCARD":                   true,
+		"DOMAIN":                    true,
+		"DOUBLE":                    true,
+		"DROP":                      true,
+		"ENCODING":                  true,
+		"ENUM":                      true,
+		"ESCAPE":                    true,
+		"EXCLUDE":                   true,
+		"EXECUTE":                   true,
+		"EXPERIMENTAL":              true,
+		"EXPERIMENTAL_AUDIT":        true,
+		"EXPERIMENTAL_FINGERPRINTS": true,
+		"EXPERIMENTAL_RELOCATE":     true,
+		"EXPERIMENTAL_REPLICA":      true,
+		"EXPIRATION":                true,
+		"EXPLAIN":                   true,
+		"EXPORT":                    true,
+		"EXTENSION":                 true,
+		"FILES":                     true,
+		"FILTER":                    true,
+		"FIRST":                     true,
+		"FLOAT4":                    true,
+		"FLOAT8":                    true,
+		"FOLLOWING":                 true,
+		"FORCE_INDEX":               true,
+		"FUNCTION":                  true,
+		"GLOBAL":                    true,
+		"GRANTS":                    true,
+		"GROUPS":                    true,
+		"HASH":                      true,
+		"HIGH":                      true,
+		"HISTOGRAM":                 true,
+		"HOUR":                      true,
+		"IMMEDIATE":                 true,
+		"IMPORT":                    true,
+		"INCLUDE":                   true,
+		"INCREMENT":                 true,
+		"INCREMENTAL":               true,
+		"INDEXES":                   true,
+		"INET":                      true,
+		"INJECT":                    true,
+		"INSERT":                    true,
+		"INT2":                      true,
+		"INT2VECTOR":                true,
+		"INT4":                      true,
+		"INT8":                      true,
+		"INT64":                     true,
+		"INTERLEAVE":                true,
+		"INVERTED":                  true,
+		"ISOLATION":                 true,
+		"JOB":                       true,
+		"JOBS":                      true,
+		"JSON":                      true,
+		"JSONB":                     true,
+		"KEY":                       true,
+		"KEYS":                      true,
+		"KV":                        true,
+		"LANGUAGE":                  true,
+		"LAST":                      true,
+		"LC_COLLATE":                true,
+		"LC_CTYPE":                  true,
+		"LEASE":                     true,
+		"LESS":                      true,
+		"LEVEL":                     true,
+		"LIST":                      true,
+		"LOCAL":                     true,
+		"LOCKED":                    true,
+		"LOGIN":                     true,
+		"LOOKUP":                    true,
+		"LOW":                       true,
+		"MATCH":                     true,
+		"MATERIALIZED":              true,
+		"MAXVALUE":                  true,
+		"MERGE":                     true,
+		"MINUTE":                    true,
+		"MINVALUE":                  true,
+		"MONTH":                     true,
+		"NAMES":                     true,
+		"NAN":                       true,
+		"NAME":                      true,
+		"NEXT":                      true,
+		"NO":                        true,
+		"NORMAL":                    true,
+		"NO_INDEX_JOIN":             true,
+		"NOCREATEROLE":              true,
+		"NOLOGIN":                   true,
+		"NOWAIT":                    true,
+		"NULLS":                     true,
+		"IGNORE_FOREIGN_KEYS":       true,
+		"OF":                        true,
+		"OFF":                       true,
+		"OID":                       true,
+		"OIDS":                      true,
+		"OIDVECTOR":                 true,
+		"OPERATOR":                  true,
+		"OPT":                       true,
+		"OPTION":                    true,
+		"OPTIONS":                   true,
+		"ORDINALITY":                true,
+		"OTHERS":                    true,
+		"OVER":                      true,
+		"OWNED":                     true,
+		"PARENT":                    true,
+		"PARTIAL":                   true,
+		"PARTITION":                 true,
+		"PARTITIONS":                true,
+		"PASSWORD":                  true,
+		"PAUSE":                     true,
+		"PHYSICAL":                  true,
+		"PLAN":                      true,
+		"PLANS":                     true,
+		"PRECEDING":                 true,
+		"PREPARE":                   true,
+		"PRESERVE":                  true,
+		"PRIORITY":                  true,
+		"PUBLIC":                    true,
+		"PUBLICATION":               true,
+		"QUERIES":                   true,
+		"QUERY":                     true,
+		"RANGE":                     true,
+		"RANGES":                    true,
+		"READ":                      true,
+		"RECURSIVE":                 true,
+		"REF":                       true,
+		"REGCLASS":                  true,
+		"REGPROC":                   true,
+		"REGPROCEDURE":              true,
+		"REGNAMESPACE":              true,
+		"REGTYPE":                   true,
+		"REINDEX":                   true,
+		"RELEASE":                   true,
+		"RENAME":                    true,
+		"REPEATABLE":                true,
+		"REPLACE":                   true,
+		"RESET":                     true,
+		"RESTORE":                   true,
+		"RESTRICT":                  true,
+		"RESUME":                    true,
+		"REVOKE":                    true,
+		"ROLE":                      true,
+		"ROLES":                     true,
+		"ROLLBACK":                  true,
+		"ROLLUP":                    true,
+		"ROWS":                      true,
+		"RULE":                      true,
+		"SETTING":                   true,
+		"SETTINGS":                  true,
+		"STATUS":                    true,
+		"SAVEPOINT":                 true,
+		"SCATTER":                   true,
+		"SCHEMA":                    true,
+		"SCHEMAS":                   true,
+		"SCRUB":                     true,
+		"SEARCH":                    true,
+		"SECOND":                    true,
+		"SERIAL":                    true,
+		"SERIALIZABLE":              true,
+		"SERIAL2":                   true,
+		"SERIAL4":                   true,
+		"SERIAL8":                   true,
+		"SEQUENCE":                  true,
+		"SEQUENCES":                 true,
+		"SERVER":                    true,
+		"SESSION":                   true,
+		"SESSIONS":                  true,
+		"SET":                       true,
+		"SHARE":                     true,
+		"SHOW":                      true,
+		"SIMPLE":                    true,
+		"SKIP":                      true,
+		"SMALLSERIAL":               true,
+		"SNAPSHOT":                  true,
+		"SPLIT":                     true,
+		"SQL":                       true,
+		"START":                     true,
+		"STATISTICS":                true,
+		"STDIN":                     true,
+		"STORE":                     true,
+		"STORED":                    true,
+		"STORING":                   true,
+		"STRICT":                    true,
+		"STRING":                    true,
+		"SUBSCRIPTION":              true,
+		"SYNTAX":                    true,
+		"SYSTEM":                    true,
+		"TABLES":                    true,
+		"TEMP":                      true,
+		"TEMPLATE":                  true,
+		"TEMPORARY":                 true,
+		"TESTING_RELOCATE":          true,
+		"TEXT":                      true,
+		"TIES":                      true,
+		"TRACE":                     true,
+		"TRANSACTION":               true,
+		"TRIGGER":                   true,
+		"TRUNCATE":                  true,
+		"TRUSTED":                   true,
+		"TYPE":                      true,
+		"THROTTLING":                true,
+		"UNBOUNDED":                 true,
+		"UNCOMMITTED":               true,
+		"UNKNOWN":                   true,
+		"UNLOGGED":                  true,
+		"UNSPLIT":                   true,
+		"UNTIL":                     true,
+		"UPDATE":                    true,
+		"UPSERT":                    true,
+		"UUID":                      true,
+		"USE":                       true,
+		"USERS":                     true,
+		"VALID":                     true,
+		"VALIDATE":                  true,
+		"VALUE":                     true,
+		"VARYING":                   true,
+		"VIEW":                      true,
+		"WITHIN":                    true,
+		"WITHOUT":                   true,
+		"WRITE":                     true,
+		"YEAR":                      true,
+		"ZONE":                      true,
+	}
+)

From 60aef2b51ce1dc61e8b54544c7b0585f27302e01 Mon Sep 17 00:00:00 2001
From: Jim Kalafut <jkalafut@hashicorp.com>
Date: Sun, 14 Jun 2020 20:53:36 -0700
Subject: [PATCH 21/29] Minor transform docs rewording (#9223)

---
 website/pages/docs/secrets/transform/index.mdx | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/website/pages/docs/secrets/transform/index.mdx b/website/pages/docs/secrets/transform/index.mdx
index b4c9d4452310..cbc9f43e4d1e 100644
--- a/website/pages/docs/secrets/transform/index.mdx
+++ b/website/pages/docs/secrets/transform/index.mdx
@@ -167,7 +167,7 @@ In summary, there are three ways in which the tweak value may be sourced:
   not need to be stored separately. This mode should only be used if the values
   being encoded are sufficiently unique.
 
-Your team and organization should weigh in on the trade-offs when it comes to
+Your team and organization should weigh the trade-offs when it comes to
 choosing the proper tweak source to use.
 
 #### Input Limits

From 6a934e26e5198614572b8606652a4102d425113d Mon Sep 17 00:00:00 2001
From: Michael Golowka <72365+pcman312@users.noreply.github.com>
Date: Mon, 15 Jun 2020 10:36:17 -0600
Subject: [PATCH 22/29] Add password policies to Active Directory secret engine
 (#9144)

* Also updates AD docs to reflect password policies
---
 go.mod                                        |  4 +-
 go.sum                                        |  6 +-
 .../hashicorp/raft/.golangci-lint.yml         | 49 ++++++++++++
 .../vault-plugin-secrets-ad/plugin/backend.go |  7 +-
 .../plugin/checkout_handler.go                |  7 +-
 .../vault-plugin-secrets-ad/plugin/config.go  | 76 ++++++++++++++++++
 .../plugin/engineconf.go                      | 11 ---
 .../plugin/passwordconf.go                    | 17 ----
 .../plugin/passwords.go                       | 58 ++++++++++++++
 .../plugin/path_config.go                     | 66 +++++++++++-----
 .../plugin/path_creds.go                      |  3 +-
 .../plugin/path_roles.go                      |  2 +-
 .../plugin/path_rotate_root_creds.go          | 16 +---
 .../plugin/util/passwords.go                  | 77 -------------------
 vendor/modules.txt                            |  4 +-
 website/pages/api-docs/secret/ad/index.mdx    | 57 +++++++++++++-
 website/pages/docs/secrets/ad/index.mdx       | 16 +++-
 17 files changed, 312 insertions(+), 164 deletions(-)
 create mode 100644 vendor/github.com/hashicorp/raft/.golangci-lint.yml
 create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/config.go
 delete mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/engineconf.go
 delete mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwordconf.go
 create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwords.go
 delete mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/passwords.go

diff --git a/go.mod b/go.mod
index a418c691f547..9b315839c318 100644
--- a/go.mod
+++ b/go.mod
@@ -83,7 +83,7 @@ require (
 	github.com/hashicorp/vault-plugin-auth-oci v0.5.4
 	github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4
 	github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.0-beta1.0.20200521152755-9cf156a44f9c
-	github.com/hashicorp/vault-plugin-secrets-ad v0.6.4-beta1.0.20200518124111-3dceeb3ce90e
+	github.com/hashicorp/vault-plugin-secrets-ad v0.6.6
 	github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.5
 	github.com/hashicorp/vault-plugin-secrets-azure v0.5.6
 	github.com/hashicorp/vault-plugin-secrets-gcp v0.6.2
@@ -92,7 +92,7 @@ require (
 	github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2
 	github.com/hashicorp/vault-plugin-secrets-openldap v0.1.3
 	github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f
-	github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267
+	github.com/hashicorp/vault/sdk v0.1.14-0.20200527182800-ad90e0b39d2f
 	github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4
 	github.com/jcmturner/gokrb5/v8 v8.0.0
 	github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f
diff --git a/go.sum b/go.sum
index a77b4318b88f..497135683f4f 100644
--- a/go.sum
+++ b/go.sum
@@ -471,8 +471,6 @@ github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d/go.mod h1:WKCL
 github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI=
 github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 h1:p+2EISNdFCnD9R+B4xCiqSn429MCFtvM41aHJDJ6qW4=
 github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
-github.com/hashicorp/raft v1.1.2 h1:oxEL5DDeurYxLd3UbcY/hccgSPhLLpiBZ1YxtWEq59c=
-github.com/hashicorp/raft v1.1.2/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
 github.com/hashicorp/raft v1.1.3-0.20200501224250-c95aa91e604e h1:hMRRBhY9cayPJzEgNGNAl74TJ0rwY3Csbr43ogjKh1I=
 github.com/hashicorp/raft v1.1.3-0.20200501224250-c95aa91e604e/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
 github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
@@ -504,8 +502,8 @@ github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4 h1:YE4qndazWmYGp
 github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4/go.mod h1:QjGrrxcRXv/4XkEZAlM0VMZEa3uxKAICFqDj27FP/48=
 github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.0-beta1.0.20200521152755-9cf156a44f9c h1:9pXwe7sEVhZ5C3U6egIrKaZBb5lD0FvLIjISEvpbQQA=
 github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.0-beta1.0.20200521152755-9cf156a44f9c/go.mod h1:HTXNzFr/SAVtJOs7jz0XxZ69jlKtaceEwp37l86UAQ0=
-github.com/hashicorp/vault-plugin-secrets-ad v0.6.4-beta1.0.20200518124111-3dceeb3ce90e h1:0GK1BNBfglD2sydZ4XXMjJElhY8bC2TDdc0vk1Q9zbA=
-github.com/hashicorp/vault-plugin-secrets-ad v0.6.4-beta1.0.20200518124111-3dceeb3ce90e/go.mod h1:SCsKcChP8yrtOHXOeTD7oRk0oflj3IxA9y9zTOGtQ8s=
+github.com/hashicorp/vault-plugin-secrets-ad v0.6.6 h1:GskxrCCL2flrBtnAeOsBV+whCaqnnM/+t/h1IyqukNo=
+github.com/hashicorp/vault-plugin-secrets-ad v0.6.6/go.mod h1:L5L6NoJFxRvgxhuA2sWhloc3sbgmE7KxhNcoRxcaH9U=
 github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.5 h1:BOOtSls+BQ1EtPmpE9LoqZztsEZ1fRWVSkHWtRIrCB4=
 github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.5/go.mod h1:gAoReoUpBHaBwkxQqTK7FY8nQC0MuaZHLiW5WOSny5g=
 github.com/hashicorp/vault-plugin-secrets-azure v0.5.6 h1:4PgQ5rCT29wW5PMyebEhPkEYuR5s+SnInuZz3x2cP50=
diff --git a/vendor/github.com/hashicorp/raft/.golangci-lint.yml b/vendor/github.com/hashicorp/raft/.golangci-lint.yml
new file mode 100644
index 000000000000..a021e196ee34
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/.golangci-lint.yml
@@ -0,0 +1,49 @@
+run:
+  deadline: 5m
+
+linters-settings:
+  govet:
+    check-shadowing: true
+  golint:
+    min-confidence: 0
+
+linters:
+  disable-all: true
+  enable:
+    - gofmt
+    #- golint
+    - govet
+    #- varcheck
+    #- typecheck
+    #- gosimple
+
+issues:
+  exclude-use-default: false
+  exclude:
+      # ignore the false positive erros resulting from not including a comment above every `package` keyword
+    - should have a package comment, unless it's in another file for this package (golint)
+      # golint: Annoying issue about not having a comment. The rare codebase has such comments
+      # - (comment on exported (method|function|type|const)|should have( a package)? comment|comment should be of the form)
+      # errcheck: Almost all programs ignore errors on these functions and in most cases it's ok
+    - Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked
+
+      # golint: False positive when tests are defined in package 'test'
+    - func name will be used as test\.Test.* by other packages, and that stutters; consider calling this
+
+      # staticcheck: Developers tend to write in C-style with an 
+      # explicit 'break' in a 'switch', so it's ok to ignore
+    - ineffective break statement. Did you mean to break out of the outer loop
+      # gosec: Too many false-positives on 'unsafe' usage
+    - Use of unsafe calls should be audited
+
+    # gosec: Too many false-positives for parametrized shell calls
+    - Subprocess launch(ed with variable|ing should be audited)
+
+    # gosec: Duplicated errcheck checks
+    - G104
+
+    # gosec: Too many issues in popular repos
+    - (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less)
+
+    # gosec: False positive is triggered by 'src, err := ioutil.ReadFile(filename)'
+    - Potential file inclusion via variable
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/backend.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/backend.go
index 6174e159f6f1..f1ddbb3ccadf 100644
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/backend.go
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/backend.go
@@ -14,21 +14,22 @@ import (
 )
 
 func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
-	backend := newBackend(util.NewSecretsClient(conf.Logger))
+	backend := newBackend(util.NewSecretsClient(conf.Logger), conf.System)
 	if err := backend.Setup(ctx, conf); err != nil {
 		return nil, err
 	}
 	return backend, nil
 }
 
-func newBackend(client secretsClient) *backend {
+func newBackend(client secretsClient, passwordGenerator passwordGenerator) *backend {
 	adBackend := &backend{
 		client:         client,
 		roleCache:      cache.New(roleCacheExpiration, roleCacheCleanup),
 		credCache:      cache.New(credCacheExpiration, credCacheCleanup),
 		rotateRootLock: new(int32),
 		checkOutHandler: &checkOutHandler{
-			client: client,
+			client:            client,
+			passwordGenerator: passwordGenerator,
 		},
 		checkOutLocks: locksutil.CreateLocks(),
 	}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/checkout_handler.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/checkout_handler.go
index 5b4c729aa108..61d0214466be 100644
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/checkout_handler.go
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/checkout_handler.go
@@ -3,7 +3,7 @@ package plugin
 import (
 	"context"
 	"errors"
-	"github.com/hashicorp/vault-plugin-secrets-ad/plugin/util"
+
 	"github.com/hashicorp/vault/sdk/logical"
 )
 
@@ -32,7 +32,8 @@ type CheckOut struct {
 // checkOutHandler manages checkouts. It's not thread-safe and expects the caller to handle locking because
 // locking may span multiple calls.
 type checkOutHandler struct {
-	client secretsClient
+	client            secretsClient
+	passwordGenerator passwordGenerator
 }
 
 // CheckOut attempts to check out a service account. If the account is unavailable, it returns
@@ -98,7 +99,7 @@ func (h *checkOutHandler) CheckIn(ctx context.Context, storage logical.Storage,
 	if engineConf == nil {
 		return errors.New("the config is currently unset")
 	}
-	newPassword, err := util.GeneratePassword(engineConf.PasswordConf.Formatter, engineConf.PasswordConf.Length)
+	newPassword, err := GeneratePassword(ctx, engineConf.PasswordConf, h.passwordGenerator)
 	if err != nil {
 		return err
 	}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/config.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/config.go
new file mode 100644
index 000000000000..90de64e91279
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/config.go
@@ -0,0 +1,76 @@
+package plugin
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/hashicorp/vault-plugin-secrets-ad/plugin/client"
+)
+
+type configuration struct {
+	PasswordConf          passwordConf
+	ADConf                *client.ADConf
+	LastRotationTolerance int
+}
+
+type passwordConf struct {
+	TTL    int `json:"ttl"`
+	MaxTTL int `json:"max_ttl"`
+
+	// Mutually exclusive with Length and Formatter
+	PolicyName string `json:"password_policy"`
+
+	// Length of the password to generate. Mutually exclusive with PolicyName.
+	// Deprecated
+	Length int `json:"length"`
+
+	// Formatter describes how to format a password. This allows for prefixes and suffixes on the password.
+	// Mutually exclusive with PolicyName.
+	// Deprecated
+	Formatter string `json:"formatter"`
+}
+
+func (c passwordConf) Map() map[string]interface{} {
+	return map[string]interface{}{
+		"ttl":         c.TTL,
+		"max_ttl":     c.MaxTTL,
+		"length":      c.Length,
+		"formatter":   c.Formatter,
+		"policy_name": c.PolicyName,
+	}
+}
+
+// validate returns an error if the configuration is invalid/unable to process for whatever reason.
+func (c passwordConf) validate() error {
+	if c.PolicyName != "" &&
+		(c.Length != 0 || c.Formatter != "") {
+		return fmt.Errorf("cannot set password_policy and either length or formatter")
+	}
+
+	// Don't validate the length and formatter fields if a policy is set
+	if c.PolicyName != "" {
+		return nil
+	}
+
+	// Check for if there's no formatter.
+	if c.Formatter == "" {
+		if c.Length < len(passwordComplexityPrefix)+minimumLengthOfComplexString {
+			return fmt.Errorf("it's not possible to generate a _secure_ password of length %d, please boost length to %d, though Vault recommends higher",
+				c.Length, minimumLengthOfComplexString+len(passwordComplexityPrefix))
+		}
+		return nil
+	}
+
+	// Check for if there is a formatter.
+	if lengthOfPassword(c.Formatter, c.Length) < minimumLengthOfComplexString {
+		return fmt.Errorf("since the desired length is %d, it isn't possible to generate a sufficiently complex password - please increase desired length or remove characters from the formatter", c.Length)
+	}
+	numPwdFields := strings.Count(c.Formatter, pwdFieldTmpl)
+	if numPwdFields == 0 {
+		return fmt.Errorf("%s must contain password replacement field of %s", c.Formatter, pwdFieldTmpl)
+	}
+	if numPwdFields > 1 {
+		return fmt.Errorf("%s must contain ONE password replacement field of %s", c.Formatter, pwdFieldTmpl)
+	}
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/engineconf.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/engineconf.go
deleted file mode 100644
index b2f4837ccc2e..000000000000
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/engineconf.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package plugin
-
-import (
-	"github.com/hashicorp/vault-plugin-secrets-ad/plugin/client"
-)
-
-type configuration struct {
-	PasswordConf          *passwordConf
-	ADConf                *client.ADConf
-	LastRotationTolerance int
-}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwordconf.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwordconf.go
deleted file mode 100644
index b43d4e12ff18..000000000000
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwordconf.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package plugin
-
-type passwordConf struct {
-	TTL       int    `json:"ttl"`
-	MaxTTL    int    `json:"max_ttl"`
-	Length    int    `json:"length"`
-	Formatter string `json:"formatter"`
-}
-
-func (c *passwordConf) Map() map[string]interface{} {
-	return map[string]interface{}{
-		"ttl":       c.TTL,
-		"max_ttl":   c.MaxTTL,
-		"length":    c.Length,
-		"formatter": c.Formatter,
-	}
-}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwords.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwords.go
new file mode 100644
index 000000000000..c349f7229e0c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwords.go
@@ -0,0 +1,58 @@
+package plugin
+
+import (
+	"context"
+	"strings"
+
+	"github.com/hashicorp/vault/sdk/helper/base62"
+)
+
+var (
+	// Per https://en.wikipedia.org/wiki/Password_strength#Guidelines_for_strong_passwords
+	minimumLengthOfComplexString = 8
+
+	passwordComplexityPrefix = "?@09AZ"
+	pwdFieldTmpl             = "{{PASSWORD}}"
+)
+
+type passwordGenerator interface {
+	GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error)
+}
+
+// GeneratePassword from the password configuration. This will either generate based on a password policy
+// or from the provided formatter. The formatter/length options are deprecated.
+func GeneratePassword(ctx context.Context, passConf passwordConf, generator passwordGenerator) (password string, err error) {
+	err = passConf.validate()
+	if err != nil {
+		return "", err
+	}
+
+	if passConf.PolicyName != "" {
+		return generator.GeneratePasswordFromPolicy(ctx, passConf.PolicyName)
+	}
+	return generateDeprecatedPassword(passConf.Formatter, passConf.Length)
+}
+
+func generateDeprecatedPassword(formatter string, totalLength int) (string, error) {
+	// Has formatter
+	if formatter != "" {
+		passLen := lengthOfPassword(formatter, totalLength)
+		pwd, err := base62.Random(passLen)
+		if err != nil {
+			return "", err
+		}
+		return strings.Replace(formatter, pwdFieldTmpl, pwd, 1), nil
+	}
+
+	// Doesn't have formatter
+	pwd, err := base62.Random(totalLength - len(passwordComplexityPrefix))
+	if err != nil {
+		return "", err
+	}
+	return passwordComplexityPrefix + pwd, nil
+}
+
+func lengthOfPassword(formatter string, totalLength int) int {
+	lengthOfText := len(formatter) - len(pwdFieldTmpl)
+	return totalLength - lengthOfText
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_config.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_config.go
index a5345a8900b3..ed8698ba7416 100644
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_config.go
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_config.go
@@ -3,10 +3,10 @@ package plugin
 import (
 	"context"
 	"errors"
+	"fmt"
 	"time"
 
 	"github.com/hashicorp/vault-plugin-secrets-ad/plugin/client"
-	"github.com/hashicorp/vault-plugin-secrets-ad/plugin/util"
 	"github.com/hashicorp/vault/sdk/framework"
 	"github.com/hashicorp/vault/sdk/helper/ldaputil"
 	"github.com/hashicorp/vault/sdk/logical"
@@ -32,13 +32,24 @@ func readConfig(ctx context.Context, storage logical.Storage) (*configuration, e
 	if entry == nil {
 		return nil, nil
 	}
-	config := &configuration{&passwordConf{}, &client.ADConf{}, 0}
+	config := &configuration{}
 	if err := entry.DecodeJSON(config); err != nil {
 		return nil, err
 	}
 	return config, nil
 }
 
+func writeConfig(ctx context.Context, storage logical.Storage, config *configuration) (err error) {
+	entry, err := logical.StorageEntryJSON(configStorageKey, config)
+	if err != nil {
+		return fmt.Errorf("unable to marshal config to JSON: %w", err)
+	}
+	if err := storage.Put(ctx, entry); err != nil {
+		return fmt.Errorf("unable to store config: %w", err)
+	}
+	return nil
+}
+
 func (b *backend) pathConfig() *framework.Path {
 	return &framework.Path{
 		Pattern: configPath,
@@ -63,19 +74,27 @@ func (b *backend) configFields() map[string]*framework.FieldSchema {
 		Type:        framework.TypeDurationSecond,
 		Description: "In seconds, the maximum password time-to-live.",
 	}
+	fields["last_rotation_tolerance"] = &framework.FieldSchema{
+		Type:        framework.TypeDurationSecond,
+		Description: "The number of seconds after a Vault rotation where, if Active Directory shows a later rotation, it should be considered out-of-band.",
+		Default:     5,
+	}
+	fields["password_policy"] = &framework.FieldSchema{
+		Type:        framework.TypeString,
+		Description: "Name of the password policy to use to generate passwords.",
+	}
+
+	// Deprecated fields
 	fields["length"] = &framework.FieldSchema{
 		Type:        framework.TypeInt,
 		Default:     defaultPasswordLength,
 		Description: "The desired length of passwords that Vault generates.",
+		Deprecated:  true,
 	}
 	fields["formatter"] = &framework.FieldSchema{
 		Type:        framework.TypeString,
 		Description: `Text to insert the password into, ex. "customPrefix{{PASSWORD}}customSuffix".`,
-	}
-	fields["last_rotation_tolerance"] = &framework.FieldSchema{
-		Type:        framework.TypeDurationSecond,
-		Description: "The number of seconds after a Vault rotation where, if Active Directory shows a later rotation, it should be considered out-of-band.",
-		Default:     5,
+		Deprecated:  true,
 	}
 	return fields
 }
@@ -93,9 +112,11 @@ func (b *backend) configUpdateOperation(ctx context.Context, req *logical.Reques
 	// Build the password conf.
 	ttl := fieldData.Get("ttl").(int)
 	maxTTL := fieldData.Get("max_ttl").(int)
+	lastRotationTolerance := fieldData.Get("last_rotation_tolerance").(int)
+
 	length := fieldData.Get("length").(int)
 	formatter := fieldData.Get("formatter").(string)
-	lastRotationTolerance := fieldData.Get("last_rotation_tolerance").(int)
+	passwordPolicy := fieldData.Get("password_policy").(string)
 
 	if pre111Val, ok := fieldData.GetOk("use_pre111_group_cn_behavior"); ok {
 		activeDirectoryConf.UsePre111GroupCNBehavior = new(bool)
@@ -120,23 +141,28 @@ func (b *backend) configUpdateOperation(ctx context.Context, req *logical.Reques
 	if maxTTL < 1 {
 		return nil, errors.New("max_ttl must be positive")
 	}
-	if err := util.ValidatePwdSettings(formatter, length); err != nil {
-		return nil, err
-	}
 
-	passwordConf := &passwordConf{
-		TTL:       ttl,
-		MaxTTL:    maxTTL,
-		Length:    length,
-		Formatter: formatter,
+	passwordConf := passwordConf{
+		TTL:        ttl,
+		MaxTTL:     maxTTL,
+		Length:     length,
+		Formatter:  formatter,
+		PolicyName: passwordPolicy,
 	}
-
-	config := &configuration{passwordConf, &client.ADConf{ConfigEntry: activeDirectoryConf}, lastRotationTolerance}
-	entry, err := logical.StorageEntryJSON(configStorageKey, config)
+	err = passwordConf.validate()
 	if err != nil {
 		return nil, err
 	}
-	if err := req.Storage.Put(ctx, entry); err != nil {
+
+	config := configuration{
+		PasswordConf: passwordConf,
+		ADConf: &client.ADConf{
+			ConfigEntry: activeDirectoryConf,
+		},
+		LastRotationTolerance: lastRotationTolerance,
+	}
+	err = writeConfig(ctx, req.Storage, &config)
+	if err != nil {
 		return nil, err
 	}
 
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_creds.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_creds.go
index c2a192b9acf9..9e88ba34245e 100644
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_creds.go
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_creds.go
@@ -7,7 +7,6 @@ import (
 	"time"
 
 	"github.com/go-errors/errors"
-	"github.com/hashicorp/vault-plugin-secrets-ad/plugin/util"
 	"github.com/hashicorp/vault/sdk/framework"
 	"github.com/hashicorp/vault/sdk/logical"
 )
@@ -150,7 +149,7 @@ func (b *backend) credReadOperation(ctx context.Context, req *logical.Request, f
 }
 
 func (b *backend) generateAndReturnCreds(ctx context.Context, engineConf *configuration, storage logical.Storage, roleName string, role *backendRole, previousCred map[string]interface{}) (*logical.Response, error) {
-	newPassword, err := util.GeneratePassword(engineConf.PasswordConf.Formatter, engineConf.PasswordConf.Length)
+	newPassword, err := GeneratePassword(ctx, engineConf.PasswordConf, b.System())
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_roles.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_roles.go
index 2f92f0bf18c9..1c4b864e0f6a 100644
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_roles.go
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_roles.go
@@ -221,7 +221,7 @@ func getServiceAccountName(fieldData *framework.FieldData) (string, error) {
 	return serviceAccountName, nil
 }
 
-func getValidatedTTL(passwordConf *passwordConf, fieldData *framework.FieldData) (int, error) {
+func getValidatedTTL(passwordConf passwordConf, fieldData *framework.FieldData) (int, error) {
 	ttl := fieldData.Get("ttl").(int)
 	if ttl == 0 {
 		ttl = passwordConf.TTL
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_rotate_root_creds.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_rotate_root_creds.go
index 402a0aa79392..d911eef9f78e 100644
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_rotate_root_creds.go
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_rotate_root_creds.go
@@ -8,7 +8,6 @@ import (
 	"sync/atomic"
 	"time"
 
-	"github.com/hashicorp/vault-plugin-secrets-ad/plugin/util"
 	"github.com/hashicorp/vault/sdk/framework"
 	"github.com/hashicorp/vault/sdk/logical"
 )
@@ -38,7 +37,7 @@ func (b *backend) pathRotateCredentialsUpdate(ctx context.Context, req *logical.
 		return nil, errors.New("the config is currently unset")
 	}
 
-	newPassword, err := util.GeneratePassword(engineConf.PasswordConf.Formatter, engineConf.PasswordConf.Length)
+	newPassword, err := GeneratePassword(ctx, engineConf.PasswordConf, b.System())
 	if err != nil {
 		return nil, err
 	}
@@ -58,7 +57,7 @@ func (b *backend) pathRotateCredentialsUpdate(ctx context.Context, req *logical.
 	engineConf.ADConf.BindPassword = newPassword
 
 	// Update the password locally.
-	if pwdStoringErr := storePassword(ctx, req, engineConf); pwdStoringErr != nil {
+	if pwdStoringErr := writeConfig(ctx, req.Storage, engineConf); pwdStoringErr != nil {
 		// We were unable to store the new password locally. We can't continue in this state because we won't be able
 		// to roll any passwords, including our own to get back into a state of working. So, we need to roll back to
 		// the last password we successfully got into storage.
@@ -93,17 +92,6 @@ func (b *backend) rollBackPassword(ctx context.Context, engineConf *configuratio
 	return err
 }
 
-func storePassword(ctx context.Context, req *logical.Request, engineConf *configuration) error {
-	entry, err := logical.StorageEntryJSON(configStorageKey, engineConf)
-	if err != nil {
-		return err
-	}
-	if err := req.Storage.Put(ctx, entry); err != nil {
-		return err
-	}
-	return nil
-}
-
 const pathRotateCredentialsUpdateHelpSyn = `
 Request to rotate the root credentials.
 `
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/passwords.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/passwords.go
deleted file mode 100644
index a0c7cb013d16..000000000000
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/passwords.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package util
-
-import (
-	"encoding/base64"
-	"fmt"
-	"strings"
-
-	"github.com/hashicorp/go-uuid"
-)
-
-var (
-	// Per https://en.wikipedia.org/wiki/Password_strength#Guidelines_for_strong_passwords
-	minimumLengthOfComplexString = 8
-
-	PasswordComplexityPrefix = "?@09AZ"
-	PwdFieldTmpl             = "{{PASSWORD}}"
-)
-
-func GeneratePassword(formatter string, totalLength int) (string, error) {
-	if err := ValidatePwdSettings(formatter, totalLength); err != nil {
-		return "", err
-	}
-	pwd, err := generatePassword(totalLength)
-	if err != nil {
-		return "", err
-	}
-	if formatter == "" {
-		pwd = PasswordComplexityPrefix + pwd
-		return pwd[:totalLength], nil
-	}
-	return strings.Replace(formatter, PwdFieldTmpl, pwd[:lengthOfPassword(formatter, totalLength)], 1), nil
-}
-
-func ValidatePwdSettings(formatter string, totalLength int) error {
-	// Check for if there's no formatter.
-	if formatter == "" {
-		if totalLength < len(PasswordComplexityPrefix)+minimumLengthOfComplexString {
-			return fmt.Errorf("it's not possible to generate a _secure_ password of length %d, please boost length to %d, though Vault recommends higher", totalLength, minimumLengthOfComplexString+len(PasswordComplexityPrefix))
-		}
-		return nil
-	}
-
-	// Check for if there is a formatter.
-	if lengthOfPassword(formatter, totalLength) < minimumLengthOfComplexString {
-		return fmt.Errorf("since the desired length is %d, it isn't possible to generate a sufficiently complex password - please increase desired length or remove characters from the formatter", totalLength)
-	}
-	numPwdFields := strings.Count(formatter, PwdFieldTmpl)
-	if numPwdFields == 0 {
-		return fmt.Errorf("%s must contain password replacement field of %s", formatter, PwdFieldTmpl)
-	}
-	if numPwdFields > 1 {
-		return fmt.Errorf("%s must contain ONE password replacement field of %s", formatter, PwdFieldTmpl)
-	}
-	return nil
-}
-
-func lengthOfPassword(formatter string, totalLength int) int {
-	lengthOfText := len(formatter) - len(PwdFieldTmpl)
-	return totalLength - lengthOfText
-}
-
-// generatePassword returns a password of a length AT LEAST as long as the desired length,
-// it may be longer.
-func generatePassword(desiredLength int) (string, error) {
-	b, err := uuid.GenerateRandomBytes(desiredLength)
-	if err != nil {
-		return "", err
-	}
-	result := ""
-	// Though the result should immediately be longer than the desiredLength,
-	// do this in a loop to ensure there's absolutely no risk of a panic when slicing it down later.
-	for len(result) <= desiredLength {
-		// Encode to base64 because it's more complex.
-		result += base64.StdEncoding.EncodeToString(b)
-	}
-	return result, nil
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 170003e27e56..b6922fd84a03 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -433,7 +433,7 @@ github.com/hashicorp/vault-plugin-auth-oci
 github.com/hashicorp/vault-plugin-database-elasticsearch
 # github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.0-beta1.0.20200521152755-9cf156a44f9c
 github.com/hashicorp/vault-plugin-database-mongodbatlas
-# github.com/hashicorp/vault-plugin-secrets-ad v0.6.4-beta1.0.20200518124111-3dceeb3ce90e
+# github.com/hashicorp/vault-plugin-secrets-ad v0.6.6
 github.com/hashicorp/vault-plugin-secrets-ad/plugin
 github.com/hashicorp/vault-plugin-secrets-ad/plugin/client
 github.com/hashicorp/vault-plugin-secrets-ad/plugin/util
@@ -457,7 +457,7 @@ github.com/hashicorp/vault-plugin-secrets-openldap
 github.com/hashicorp/vault-plugin-secrets-openldap/client
 # github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f => ./api
 github.com/hashicorp/vault/api
-# github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 => ./sdk
+# github.com/hashicorp/vault/sdk v0.1.14-0.20200527182800-ad90e0b39d2f => ./sdk
 github.com/hashicorp/vault/sdk/database/dbplugin
 github.com/hashicorp/vault/sdk/database/helper/connutil
 github.com/hashicorp/vault/sdk/database/helper/credsutil
diff --git a/website/pages/api-docs/secret/ad/index.mdx b/website/pages/api-docs/secret/ad/index.mdx
index 12e6879bd9f0..0e5157aad647 100644
--- a/website/pages/api-docs/secret/ad/index.mdx
+++ b/website/pages/api-docs/secret/ad/index.mdx
@@ -21,10 +21,20 @@ The `config` endpoint configures the LDAP connection and binding parameters, as
 
 ### Password parameters
 
-- `ttl` (string, optional) - The default password time-to-live in seconds. Once the ttl has passed, a password will be rotated the next time it's requested.
-- `max_ttl` (string, optional) - The maximum password time-to-live in seconds. No role will be allowed to set a custom ttl greater than the `max_ttl`.
-- `length` (string, optional) - The desired password length. Defaults to 64. Minimum is 14.
-- `formatter` (string, optional) - Text into which the base64 password should be inserted, formatted like so: `mycustom{{PASSWORD}}`.
+- `ttl` `(int: "")` - The default password time-to-live in seconds. Once the ttl has passed, a password will
+  be rotated the next time it's requested.
+- `max_ttl` `(int: "")` - The maximum password time-to-live in seconds. No role will be allowed to set a
+  custom ttl greater than the `max_ttl`.
+- `password_policy` `(string: "")` - Name of the [password policy](/docs/concepts/password-policies) to use to
+  generate passwords from. Mutually exclusive with `length` and `formatter`.
+
+**Deprecated parameters**:
+- `length` (string, optional) - The desired password length. Defaults to 64. Minimum is 14. Mutually exclusive
+  with `password_policy`.
+- `formatter` (string, optional) - Text into which the base64 password should be inserted, formatted like so:
+  `mycustom{{PASSWORD}}`. Mutually exclusive with `password_policy`.
+
+The following statement is applicable when using `length` and/or `formatter`, but not `password_policy`:
 
 To meet Microsoft's password complexity requirements, all passwords begin with "?@09AZ" unless a `formatter` is provided.
 The `formatter` is for organizations with different, custom password requirements. It allows an organization to supply
@@ -68,6 +78,9 @@ valid AD credentials with proper permissions.
 
 ### Sample Post Request
 
+<Tabs>
+<Tab heading="cURL">
+
 ```shell-session
 $ curl \
     --header "X-Vault-Token: ..." \
@@ -75,6 +88,18 @@ $ curl \
     --data @payload.json \
     http://127.0.0.1:8200/v1/ad/config
 ```
+</Tab>
+<Tab heading="CLI">
+
+```shell-session
+$ vault write ad/config \
+    binddn="domain-admin" \
+    bindpass="pa$$w0rd" \
+    url="ldaps://127.0.0.1" \
+    userdn="dc=example,dc=com"
+```
+</Tab>
+</Tabs>
 
 ### Sample Post Payload
 
@@ -126,6 +151,9 @@ When adding a role, Vault verifies its associated service account exists.
 
 ### Sample Post Request
 
+<Tabs>
+<Tab heading="cURL">
+
 ```shell-session
 $ curl \
     --header "X-Vault-Token: ..." \
@@ -133,6 +161,16 @@ $ curl \
     --data @payload.json \
     http://127.0.0.1:8200/v1/ad/roles/my-application
 ```
+</Tab>
+<Tab heading="CLI">
+
+```shell-session
+$ vault write ad/roles/my-application \
+    service_account_name="my-application@example.com" \
+    ttl=100
+```
+</Tab>
+</Tabs>
 
 ### Sample Post Payload
 
@@ -172,12 +210,23 @@ The `creds` endpoint offers the credential information for a given role.
 
 ### Sample Get Request
 
+<Tabs>
+<Tab heading="cURL">
+
 ```shell-session
 $ curl \
     --header "X-Vault-Token: ..." \
     --request GET \
     http://127.0.0.1:8200/v1/ad/creds/my-application
 ```
+</Tab>
+<Tab heading="CLI">
+
+```shell-session
+$ vault read ad/creds/my-application
+```
+</Tab>
+</Tabs>
 
 ### Sample Get Response
 
diff --git a/website/pages/docs/secrets/ad/index.mdx b/website/pages/docs/secrets/ad/index.mdx
index c5cc41c3d75f..13123c568ad5 100644
--- a/website/pages/docs/secrets/ad/index.mdx
+++ b/website/pages/docs/secrets/ad/index.mdx
@@ -3,10 +3,7 @@ layout: docs
 page_title: Active Directory - Secrets Engines
 sidebar_title: Active Directory
 description: >-
-  The Active Directory secrets engine for Vault generates passwords dynamically
-  based on
-
-  roles.
+  The Active Directory secrets engine allowing Vault to generate dynamic credentials.
 ---
 
 # Active Directory Secrets Engine
@@ -29,6 +26,17 @@ will check them in when their lending period (or, "ttl", in Vault's language) en
 
 ## Password Rotation
 
+### Customizing Password Generation
+
+There are two ways of customizing how passwords are generated in the Active Directory secret engine:
+
+1. [Password Policies](/docs/concepts/password-policies)
+2. `length` and `formatter` fields within the [configuration](api-docs/secret/ad#password-parameters)
+
+Utilizing password policies is the recommended path as the `length` and `formatter` fields have
+been deprecated in favor of password policies. The `password_policy` field within the configuration
+cannot be specified alongside either `length` or `formatter` to prevent a confusing configuration.
+
 ### A Note on Lazy Rotation
 
 To drive home the point that passwords are rotated "lazily", consider this scenario:

From f0b848ed96a73938ad2cf531449f005544dc984d Mon Sep 17 00:00:00 2001
From: Austin Gebauer <34121980+austingebauer@users.noreply.github.com>
Date: Mon, 15 Jun 2020 10:11:20 -0700
Subject: [PATCH 23/29] Update oracle cloud infrastructure auth plugin to
 v0.5.5 (#9210)

---
 api/go.sum                                    | 40 ++++++++++++++++++
 go.mod                                        |  2 +-
 go.sum                                        |  4 +-
 .../hashicorp/vault-plugin-auth-oci/cli.go    |  8 +++-
 .../hashicorp/vault-plugin-auth-oci/go.mod    |  4 +-
 .../hashicorp/vault-plugin-auth-oci/go.sum    |  8 ++--
 .../vault-plugin-auth-oci/path_login.go       | 42 ++++++++++++++-----
 vendor/github.com/hashicorp/vault/api/go.sum  | 40 ++++++++++++++++++
 vendor/modules.txt                            |  2 +-
 9 files changed, 128 insertions(+), 22 deletions(-)

diff --git a/api/go.sum b/api/go.sum
index d57d24817be8..c827e5a13d7e 100644
--- a/api/go.sum
+++ b/api/go.sum
@@ -8,12 +8,15 @@ github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
 github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
 github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
@@ -25,9 +28,19 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
 github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
 github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
@@ -89,6 +102,7 @@ github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5
 github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
 github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -104,17 +118,24 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90Pveol
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk=
 golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
 golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -125,12 +146,31 @@ golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
 google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
 gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
 gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/go.mod b/go.mod
index 9b315839c318..3f19dd483b37 100644
--- a/go.mod
+++ b/go.mod
@@ -80,7 +80,7 @@ require (
 	github.com/hashicorp/vault-plugin-auth-jwt v0.6.2
 	github.com/hashicorp/vault-plugin-auth-kerberos v0.1.5
 	github.com/hashicorp/vault-plugin-auth-kubernetes v0.6.1
-	github.com/hashicorp/vault-plugin-auth-oci v0.5.4
+	github.com/hashicorp/vault-plugin-auth-oci v0.5.5
 	github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4
 	github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.0-beta1.0.20200521152755-9cf156a44f9c
 	github.com/hashicorp/vault-plugin-secrets-ad v0.6.6
diff --git a/go.sum b/go.sum
index 497135683f4f..b7207df52c55 100644
--- a/go.sum
+++ b/go.sum
@@ -496,8 +496,8 @@ github.com/hashicorp/vault-plugin-auth-kerberos v0.1.5 h1:knWedzZ51g8Aj6Hyi1ATlQ
 github.com/hashicorp/vault-plugin-auth-kerberos v0.1.5/go.mod h1:r4UqWITHYKmBeAMKPWqLo4V8bl/wNqoSIaQcMpeK9ss=
 github.com/hashicorp/vault-plugin-auth-kubernetes v0.6.1 h1:TpdQhHdZZN1Wo9RpJG33gUfuiVtajVcSF/hNpHWaatI=
 github.com/hashicorp/vault-plugin-auth-kubernetes v0.6.1/go.mod h1:/Y9W5aZULfPeNVRQK0/nrFGpHWyNm0J3UWhOdsAu0vM=
-github.com/hashicorp/vault-plugin-auth-oci v0.5.4 h1:Hoauxh1V8Lusf7BRs+yXfoDTFQzgykbb3OC77aReXDY=
-github.com/hashicorp/vault-plugin-auth-oci v0.5.4/go.mod h1:j05O2b9fw2Q82NxDPhHMYVfHKvitUYGWfmqmpBdqmmc=
+github.com/hashicorp/vault-plugin-auth-oci v0.5.5 h1:nIP8g+VZd2V+LY/D5omWhLSnhHuogIJx7Bz6JyLt628=
+github.com/hashicorp/vault-plugin-auth-oci v0.5.5/go.mod h1:Cn5cjR279Y+snw8LTaiLTko3KGrbigRbsQPOd2D5xDw=
 github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4 h1:YE4qndazWmYGpVOoZI7nDGG+gwTZKzL1Ou4WZQ+Tdxk=
 github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4/go.mod h1:QjGrrxcRXv/4XkEZAlM0VMZEa3uxKAICFqDj27FP/48=
 github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.0-beta1.0.20200521152755-9cf156a44f9c h1:9pXwe7sEVhZ5C3U6egIrKaZBb5lD0FvLIjISEvpbQQA=
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-oci/cli.go b/vendor/github.com/hashicorp/vault-plugin-auth-oci/cli.go
index 8f3b4ffccd37..0966d668228c 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-oci/cli.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-oci/cli.go
@@ -50,13 +50,19 @@ Configuration:
 }
 
 func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
+	mount, ok := m["mount"]
+	if !ok {
+		mount = "oci"
+	}
+	mount = strings.TrimSuffix(mount, "/")
+
 	role, ok := m["role"]
 	if !ok {
 		return nil, fmt.Errorf("Enter the role")
 	}
 	role = strings.ToLower(role)
 
-	path := fmt.Sprintf(PathBaseFormat, role)
+	path := fmt.Sprintf(PathBaseFormat, mount, role)
 	signingPath := PathVersionBase + path
 
 	loginData, err := CreateLoginData(c.Address(), m, signingPath)
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-oci/go.mod b/vendor/github.com/hashicorp/vault-plugin-auth-oci/go.mod
index 1cb11b0b65b2..bd2e8ba5f326 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-oci/go.mod
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-oci/go.mod
@@ -12,8 +12,8 @@ require (
 	github.com/hashicorp/go-immutable-radix v1.1.0 // indirect
 	github.com/hashicorp/go-version v1.2.0 // indirect
 	github.com/hashicorp/golang-lru v0.5.3 // indirect
-	github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02
-	github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02
+	github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820
+	github.com/hashicorp/vault/sdk v0.1.14-0.20200215224050-f6547fa8e820
 	github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
 	github.com/oracle/oci-go-sdk v7.0.0+incompatible
 	github.com/pierrec/lz4 v2.2.6+incompatible // indirect
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-oci/go.sum b/vendor/github.com/hashicorp/vault-plugin-auth-oci/go.sum
index 66d2d64082ed..bcd0f2507924 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-oci/go.sum
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-oci/go.sum
@@ -80,12 +80,12 @@ github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8
 github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
 github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
 github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02 h1:OGEV0U0+lb8SP5aZA1m456Sr3MYxFel2awVr55QRri0=
-github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
+github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820 h1:biZidYDDEWnuOI9mXnJre8lwHKhb5ym85aSXk3oz/dc=
+github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
 github.com/hashicorp/vault/sdk v0.1.14-0.20200215195600-2ca765f0a500 h1:tiMX2ewq4ble+e2zENzBvaH2dMoFHe80NbnrF5Ir9Kk=
 github.com/hashicorp/vault/sdk v0.1.14-0.20200215195600-2ca765f0a500/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
-github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02 h1:vVrOAVfunVvkTkE9iF3Fe1+PGPLwGIp3nP4qgHGrHFs=
-github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
+github.com/hashicorp/vault/sdk v0.1.14-0.20200215224050-f6547fa8e820 h1:TmDZ1sS6gU0hFeFlFuyJVUwRPEzifZIHCBeS2WF2uSc=
+github.com/hashicorp/vault/sdk v0.1.14-0.20200215224050-f6547fa8e820/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
 github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-oci/path_login.go b/vendor/github.com/hashicorp/vault-plugin-auth-oci/path_login.go
index 6484f137f711..85e1f078aae0 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-oci/path_login.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-oci/path_login.go
@@ -4,21 +4,25 @@ package ociauth
 import (
 	"context"
 	"fmt"
+	"net/http"
+	"strings"
+	"unicode"
+
 	log "github.com/hashicorp/go-hclog"
 	"github.com/hashicorp/vault/sdk/framework"
 	"github.com/hashicorp/vault/sdk/logical"
 	"github.com/oracle/oci-go-sdk/common"
 	"github.com/pkg/errors"
-	"net/http"
-	"strings"
-	"unicode"
 )
 
 // These constants store the required http path & method information for validating the signed request
 const (
-	PathVersionBase = "/v1"
-	PathBaseFormat  = "/auth/oci/login/%s"
-	PathLoginMethod = "get"
+	PathVersionBase    = "/v1"
+	PathBaseFormat     = "/auth/%s/login/%s"
+	PathLoginMethod    = "get"
+	PathSegmentAuth    = "auth"
+	PathSegmentLogin   = "login"
+	PathSegmentVersion = "v1"
 )
 
 // Signing Header constants
@@ -78,8 +82,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat
 	authenticateRequestHeaders := requestHeaders.(http.Header)
 
 	// Find the targetUrl and Method
-	finalLoginPath := PathVersionBase + fmt.Sprintf(PathBaseFormat, roleName)
-	method, targetUrl, err := requestTargetToMethodURL(authenticateRequestHeaders[HdrRequestTarget], PathLoginMethod, finalLoginPath)
+	method, targetUrl, err := requestTargetToMethodURL(authenticateRequestHeaders[HdrRequestTarget], roleName)
 	if err != nil {
 		return unauthorizedLogicalResponse(req, b.Logger(), err)
 	}
@@ -213,14 +216,31 @@ func unauthorizedLogicalResponse(req *logical.Request, logger log.Logger, err er
 	return logical.RespondWithStatusCode(nil, req, http.StatusUnauthorized)
 }
 
-func requestTargetToMethodURL(requestTarget []string, expectedMethod string, expectedUrl string) (method string, url string, err error) {
+func requestTargetToMethodURL(requestTarget []string, roleName string) (method string, url string, err error) {
 	if len(requestTarget) == 0 {
 		return "", "", errors.New("no (request-target) specified in header")
 	}
+	errHeader := errors.New("incorrect (request-target) specified in header")
+
+	// Ensure both the request method and URL path are present in the (request-target) header
 	parts := strings.FieldsFunc(requestTarget[0], unicode.IsSpace)
-	if len(parts) != 2 || strings.ToLower(parts[0]) != expectedMethod || strings.ToLower(parts[1]) != expectedUrl {
-		return "", "", errors.New("incorrect (request-target) specified in header")
+	if len(parts) != 2 {
+		return "", "", errHeader
+	}
+
+	// Validate the request method
+	if strings.ToLower(parts[0]) != PathLoginMethod {
+		return "", "", errHeader
 	}
+
+	// Validate the URL path by inspecting its segments.
+	// The path mount segment of the URL is not validated.
+	segments := strings.Split(strings.TrimPrefix(parts[1], "/"), "/")
+	if len(segments) < 5 || segments[0] != PathSegmentVersion || segments[1] != PathSegmentAuth ||
+		segments[len(segments)-2] != PathSegmentLogin || segments[len(segments)-1] != roleName {
+		return "", "", errHeader
+	}
+
 	return parts[0], parts[1], nil
 }
 
diff --git a/vendor/github.com/hashicorp/vault/api/go.sum b/vendor/github.com/hashicorp/vault/api/go.sum
index d57d24817be8..c827e5a13d7e 100644
--- a/vendor/github.com/hashicorp/vault/api/go.sum
+++ b/vendor/github.com/hashicorp/vault/api/go.sum
@@ -8,12 +8,15 @@ github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
 github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
 github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
@@ -25,9 +28,19 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
 github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
 github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
@@ -89,6 +102,7 @@ github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5
 github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
 github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -104,17 +118,24 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90Pveol
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk=
 golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
 golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -125,12 +146,31 @@ golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
 google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
 gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
 gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b6922fd84a03..78a47a581b84 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -427,7 +427,7 @@ github.com/hashicorp/vault-plugin-auth-jwt
 github.com/hashicorp/vault-plugin-auth-kerberos
 # github.com/hashicorp/vault-plugin-auth-kubernetes v0.6.1
 github.com/hashicorp/vault-plugin-auth-kubernetes
-# github.com/hashicorp/vault-plugin-auth-oci v0.5.4
+# github.com/hashicorp/vault-plugin-auth-oci v0.5.5
 github.com/hashicorp/vault-plugin-auth-oci
 # github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4
 github.com/hashicorp/vault-plugin-database-elasticsearch

From d9c863b170602c4733511fc99a6047ed7e5d54e8 Mon Sep 17 00:00:00 2001
From: Austin Gebauer <34121980+austingebauer@users.noreply.github.com>
Date: Mon, 15 Jun 2020 11:10:10 -0700
Subject: [PATCH 24/29] changelog++

---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1439f9d58963..f064e4c0ce2c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -40,6 +40,7 @@ BUG FIXES:
 * ui: Add Toggle component into core addon so it is available in KMIP and other Ember Engines.[[GH-8913]](https://github.com/hashicorp/vault/pull/8913)
 * secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9129](https://github.com/hashicorp/vault/pull/9129)]
 * secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9186](https://github.com/hashicorp/vault/pull/9186)]
+* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-7](https://github.com/hashicorp/vault-plugin-auth-oci/pull/7)]
 
 ## 1.4.2 (May 21st, 2020)
 

From f360b2be4e2413ae77bbfd9e1bc06517f0b9d605 Mon Sep 17 00:00:00 2001
From: Matt Whiteley <mattwhiteley@gmail.com>
Date: Mon, 15 Jun 2020 11:36:15 -0700
Subject: [PATCH 25/29] Fix typo (#9217)

correct parameter is `leader_ca_cert_file`
---
 website/pages/docs/configuration/storage/raft.mdx | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/website/pages/docs/configuration/storage/raft.mdx b/website/pages/docs/configuration/storage/raft.mdx
index f0e0450cd2fb..26149e1f4d57 100644
--- a/website/pages/docs/configuration/storage/raft.mdx
+++ b/website/pages/docs/configuration/storage/raft.mdx
@@ -140,7 +140,7 @@ storage "raft" {
   node_id = "node1"
   retry_join {
     leader_api_addr = "http://127.0.0.2:8200"
-    leader_ca_cer_file = "/path/to/ca1"
+    leader_ca_cert_file = "/path/to/ca1"
     leader_client_cert_file = "/path/to/client/cert1"
     leader_client_key_file = "/path/to/client/key1"
   }

From d8884e44b3266249323e2138520a941adbb45c64 Mon Sep 17 00:00:00 2001
From: Austin Gebauer <34121980+austingebauer@users.noreply.github.com>
Date: Mon, 15 Jun 2020 11:57:51 -0700
Subject: [PATCH 26/29] docs: fix typo in gcp and gcpkms secrets (#9228)

---
 website/pages/docs/secrets/gcp/index.mdx    | 2 +-
 website/pages/docs/secrets/gcpkms/index.mdx | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/website/pages/docs/secrets/gcp/index.mdx b/website/pages/docs/secrets/gcp/index.mdx
index 5bb12cf85796..d608c84d2451 100644
--- a/website/pages/docs/secrets/gcp/index.mdx
+++ b/website/pages/docs/secrets/gcp/index.mdx
@@ -60,7 +60,7 @@ management tool.
 
     If you are running Vault from inside [Google Compute Engine][gce] or [Google
     Kubernetes Engine][gke], the instance or pod service account can be used in
-    place or specifying the credentials JSON file.
+    place of specifying the credentials JSON file.
     For more information on authentication, see the [authentication section](#authentication) below.
 
 1.  Configure a roleset. Rolesets determine the permissions that Service Account
diff --git a/website/pages/docs/secrets/gcpkms/index.mdx b/website/pages/docs/secrets/gcpkms/index.mdx
index 74f75c1af42b..d3be768071d4 100644
--- a/website/pages/docs/secrets/gcpkms/index.mdx
+++ b/website/pages/docs/secrets/gcpkms/index.mdx
@@ -41,7 +41,7 @@ management tool.
 
    If you are running Vault from inside [Google Compute Engine][gce] or [Google
    Kubernetes Engine][gke], the instance or pod service account can be used in
-   place or specifying the credentials JSON file. For more information on
+   place of specifying the credentials JSON file. For more information on
    authentication, see the [authentication section](#authentication) below.
 
 1. Create a Google Cloud KMS key:

From bf90d67fefe81e20360544a16d09b29d4f809d50 Mon Sep 17 00:00:00 2001
From: ncabatoff <ncabatoff@hashicorp.com>
Date: Mon, 15 Jun 2020 15:25:45 -0400
Subject: [PATCH 27/29] Restart template server if it shuts down (#9200)

---
 command/agent/template/template.go | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/command/agent/template/template.go b/command/agent/template/template.go
index 63e1a2b5051a..3ad820504528 100644
--- a/command/agent/template/template.go
+++ b/command/agent/template/template.go
@@ -153,7 +153,13 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct
 			}
 		case err := <-ts.runner.ErrCh:
 			ts.logger.Error("template server error", "error", err.Error())
-			return
+			ts.runner.StopImmediately()
+			ts.runner, err = manager.NewRunner(runnerConfig, false)
+			if err != nil {
+				ts.logger.Error("template server failed to create", "error", err)
+				return
+			}
+			go ts.runner.Start()
 		case <-ts.runner.TemplateRenderedCh():
 			// A template has been rendered, figure out what to do
 			events := ts.runner.RenderEvents()

From cbd1369f0f0b65a4607c8bdd9d3ffc068c13a7f1 Mon Sep 17 00:00:00 2001
From: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com>
Date: Mon, 15 Jun 2020 15:49:35 -0400
Subject: [PATCH 28/29] docs/agent: add overview for consul template fetches
 (#9227)

* docs/vault-k8s: add overview for consul template fetches

* Add dynamic role link

* move to agent documentation, add link

* fix typo in certificate doc

* fix note about leased secrets

* update secret vs token, add note to pki

* add more secret vs token notes

* add note about caching
---
 website/pages/docs/agent/template/index.mdx   | 37 +++++++++++++++++++
 .../docs/platform/k8s/injector/index.mdx      |  7 +++-
 2 files changed, 43 insertions(+), 1 deletion(-)

diff --git a/website/pages/docs/agent/template/index.mdx b/website/pages/docs/agent/template/index.mdx
index 66490aeecc82..c4a1d86e0be1 100644
--- a/website/pages/docs/agent/template/index.mdx
+++ b/website/pages/docs/agent/template/index.mdx
@@ -124,3 +124,40 @@ template {
 If you only want to use the Vault agent to render one or more templates and do
 not need to sink the acquired credentials, you can omit the `sink` stanza from
 the `auto_auth` stanza in the agent configuration.
+
+## Renewals and Updating Secrets
+
+The Vault Agent templating automatically renews and fetches secrets/tokens. 
+Unlike [Vault Agent caching](/docs/agent/caching), the behavior of how Vault Agent 
+templating does this depends on the type of secret or token. The following is a 
+high level overview of different behaviors.
+
+### Renewable Secrets
+
+If a secret or token is renewable, Vault Agent will renew the secret at 1/3 
+of the secret's lease.
+
+### Non-Renewable Secrets
+
+If a secret or token isn't renewable or leased, Vault Agent will fetch the secret every 5 minutes. This is not
+configurable.  Non-renewable secrets include (but not limited to) [KV Version 2](/docs/secrets/kv/kv-v2).
+
+### Non-Renewable Leased Secrets
+
+If a secret or token is non-renewable but leased, Vault Agent will fetch the secret when 85% of the
+secrets time-to-live (TTL) is reached. Leased, non-renewable secrets include (but not limited
+to) dynamic secrets such as [database credentials](/docs/secrets/databases) and [KV Version 1](/docs/secrets/kv/kv-v1).
+
+### Static Roles
+
+If a secret has a `rotation_period`, such as a [database static role](/docs/secrets/databases#static-roles),
+Vault Agent template will fetch the new secret as it changes in Vault.  It does
+this by inspecting the secret's time-to-live (TTL).
+
+### Certificates
+
+If a secret is a [certificate](/docs/secrets/pki), Vault Agent template will fetch the new certificate
+using the certificates `validTo` field.
+
+This does not apply to certificates generated with `generate_lease: true`.  If set 
+Vault Agent template will apply the non-renewable, leased secret rules.
diff --git a/website/pages/docs/platform/k8s/injector/index.mdx b/website/pages/docs/platform/k8s/injector/index.mdx
index d8a8901d5744..9793ee2744d2 100644
--- a/website/pages/docs/platform/k8s/injector/index.mdx
+++ b/website/pages/docs/platform/k8s/injector/index.mdx
@@ -170,7 +170,12 @@ username: v-kubernet-pg-app-q0Z7WPfVNqqTJuoDqCTY-1576529094
 
 ~> Some secrets such as KV are stored in maps. Their data can be accessed using `.Data.data.<NAME>`
 
-#### Vault Agent Configuration Map
+### Renewals and Updating Secrets
+
+For more information on when Vault Agent fetches and renews secrets, see the 
+[Agent documentation](/docs/agent/template#renewals-and-updating-secrets).
+
+### Vault Agent Configuration Map
 
 For advanced use cases, it may be required to define Vault Agent configuration
 files to mount instead of using secret and template annotations. The Vault Agent

From 3559f33e69521548423bbf531e184da8f93841a5 Mon Sep 17 00:00:00 2001
From: Noelle Daley <noelledaley@users.noreply.github.com>
Date: Mon, 15 Jun 2020 14:53:48 -0700
Subject: [PATCH 29/29] Ui/add changelog link (#9216)

* link to changelog in version

* link to specific changelog version number

* update CHANGELOG headings to match expected format

* clean up and add tests

* handle errors

* use https

* update test url
---
 ui/app/templates/vault.hbs                    |  5 ++-
 .../core/addon/helpers/changelog-url-for.js   | 37 +++++++++++++++++++
 ui/lib/core/app/helpers/changelog-url-for.js  |  1 +
 .../helpers/changelog-url-for-test.js         | 29 +++++++++++++++
 4 files changed, 71 insertions(+), 1 deletion(-)
 create mode 100644 ui/lib/core/addon/helpers/changelog-url-for.js
 create mode 100644 ui/lib/core/app/helpers/changelog-url-for.js
 create mode 100644 ui/tests/integration/helpers/changelog-url-for-test.js

diff --git a/ui/app/templates/vault.hbs b/ui/app/templates/vault.hbs
index 2aa97f49b18f..0e1bb414bc53 100644
--- a/ui/app/templates/vault.hbs
+++ b/ui/app/templates/vault.hbs
@@ -10,7 +10,10 @@
       &copy; {{date-format (now) "YYYY"}} HashiCorp
     </span>
     <span>
-      Vault {{activeCluster.leaderNode.version}}
+      <a href={{changelog-url-for activeCluster.leaderNode.version}}
+      class="link has-text-grey">
+        Vault {{activeCluster.leaderNode.version}}
+      </a>
     </span>
     {{#if (is-version "OSS")}}
       <span>
diff --git a/ui/lib/core/addon/helpers/changelog-url-for.js b/ui/lib/core/addon/helpers/changelog-url-for.js
new file mode 100644
index 000000000000..008755ba1442
--- /dev/null
+++ b/ui/lib/core/addon/helpers/changelog-url-for.js
@@ -0,0 +1,37 @@
+import { helper } from '@ember/component/helper';
+
+/*
+This helper returns a url to the changelog for the specified version.
+It assumes that Changelog headers for Vault versions >= 1.4.3 are structured as:
+
+## v1.5.0
+### Month, DD, YYYY
+
+## v1.4.5
+### Month, DD, YYY
+
+etc.
+*/
+
+export function changelogUrlFor([version]) {
+  let url = 'https://www.github.com/hashicorp/vault/blob/master/CHANGELOG.md#';
+
+  try {
+    // strip the '+prem' from enterprise versions and remove periods
+    let versionNumber = version
+      .split('+')[0]
+      .split('.')
+      .join('');
+
+    // only recent versions have a predictable url
+    if (versionNumber >= '143') {
+      return url.concat('v', versionNumber);
+    }
+  } catch (e) {
+    console.log(e);
+    console.log('Cannot generate URL for version: ', version);
+  }
+  return url;
+}
+
+export default helper(changelogUrlFor);
diff --git a/ui/lib/core/app/helpers/changelog-url-for.js b/ui/lib/core/app/helpers/changelog-url-for.js
new file mode 100644
index 000000000000..75934aff36f3
--- /dev/null
+++ b/ui/lib/core/app/helpers/changelog-url-for.js
@@ -0,0 +1 @@
+export { default, changelogUrlFor } from 'core/helpers/changelog-url-for';
diff --git a/ui/tests/integration/helpers/changelog-url-for-test.js b/ui/tests/integration/helpers/changelog-url-for-test.js
new file mode 100644
index 000000000000..64e66b5629fa
--- /dev/null
+++ b/ui/tests/integration/helpers/changelog-url-for-test.js
@@ -0,0 +1,29 @@
+import { module, test } from 'qunit';
+import { setupRenderingTest } from 'ember-qunit';
+import { changelogUrlFor } from '../../../helpers/changelog-url-for';
+
+const CHANGELOG_URL = 'https://www.github.com/hashicorp/vault/blob/master/CHANGELOG.md#';
+
+module('Integration | Helper | changelog-url-for', function(hooks) {
+  setupRenderingTest(hooks);
+
+  test('it builds an enterprise URL', function(assert) {
+    const result = changelogUrlFor(['1.5.0+prem']);
+    assert.equal(result, CHANGELOG_URL.concat('v150'));
+  });
+
+  test('it builds an OSS URL', function(assert) {
+    const result = changelogUrlFor(['1.4.3']);
+    assert.equal(result, CHANGELOG_URL.concat('v143'));
+  });
+
+  test('it returns the base changelog URL if the version is less than 1.4.3', function(assert) {
+    const result = changelogUrlFor(['1.4.0']);
+    assert.equal(result, CHANGELOG_URL);
+  });
+
+  test('it returns the base changelog URL if version cannot be found', function(assert) {
+    const result = changelogUrlFor(['']);
+    assert.equal(result, CHANGELOG_URL);
+  });
+});