diff --git a/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json b/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json deleted file mode 100644 index 80b3d55212b2..000000000000 --- a/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 3 - }, - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 4 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 5 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 3 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 5 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 4 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - } - ] -} diff --git a/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json b/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json deleted file mode 100644 index a497fb0ebe00..000000000000 --- a/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 3 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 4 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 5 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 1 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 3 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 4 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 5 - } - ] -} diff --git a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json deleted file mode 100644 index 857677b72f07..000000000000 --- a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - } - ] -} diff --git a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json deleted file mode 100644 index 1c67cd3bcfdb..000000000000 --- a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-east-1", - "test_group": 1 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 2 - } - ] -} diff --git a/.github/workflows/build-vault-oss.yml b/.github/workflows/build-vault-oss.yml index ea7b2cacb57a..056abca3692f 100644 --- a/.github/workflows/build-vault-oss.yml +++ b/.github/workflows/build-vault-oss.yml @@ -9,9 +9,6 @@ name: build_vault on: workflow_call: inputs: - bundle-path: - required: false - type: string cgo-enabled: type: string default: 0 @@ -35,12 +32,7 @@ on: web-ui-cache-key: type: string required: true - vault-base-version: - type: string - required: true - vault-prerelease-version: - type: string - required: true + jobs: build: runs-on: custom-linux-xl-vault-latest diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1ad1a7e06df2..076acdccf9b0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,13 +34,10 @@ jobs: outputs: build-date: ${{ steps.get-metadata.outputs.build-date }} filepath: ${{ steps.generate-metadata-file.outputs.filepath }} - matrix-test-group: ${{ steps.get-metadata.outputs.matrix-test-group }} package-name: ${{ steps.get-metadata.outputs.package-name }} vault-revision: ${{ steps.get-metadata.outputs.vault-revision }} vault-version: ${{ steps.set-product-version.outputs.product-version }} - vault-base-version: ${{ steps.set-product-version.outputs.base-product-version }} - vault-prerelease-version: ${{ steps.set-product-version.outputs.prerelease-product-version }} - vault-minor-version: ${{ steps.set-product-version.outputs.minor-product-version }} + vault-version-package: ${{ steps.get-metadata.outputs.vault-version-package }} steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Ensure Go modules are cached @@ -55,17 +52,13 @@ jobs: - name: Get metadata id: get-metadata env: - # MATRIX_MAX_TEST_GROUPS is required to determine the randomly selected - # test group. It should be set to the highest test_group used in the - # enos-run-matrices. - MATRIX_MAX_TEST_GROUPS: 5 VAULT_VERSION: ${{ steps.set-product-version.outputs.product-version }} run: | # shellcheck disable=SC2129 echo "build-date=$(make ci-get-date)" >> "$GITHUB_OUTPUT" - echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> "$GITHUB_OUTPUT" echo "package-name=vault" >> "$GITHUB_OUTPUT" echo "vault-revision=$(make ci-get-revision)" >> "$GITHUB_OUTPUT" + echo "vault-version-package=$(make ci-get-version-package)" >> "$GITHUB_OUTPUT" - uses: hashicorp/actions-generate-metadata@v1 id: generate-metadata-file with: @@ -134,8 +127,6 @@ jobs: package-name: ${{ needs.product-metadata.outputs.package-name }} web-ui-cache-key: ${{ needs.build-ui.outputs.cache-key }} vault-version: ${{ needs.product-metadata.outputs.vault-version }} - vault-base-version: ${{ needs.product-metadata.outputs.vault-base-version }} - vault-prerelease-version: ${{ needs.product-metadata.outputs.vault-prerelease-version }} secrets: inherit build-linux: @@ -156,8 +147,6 @@ jobs: package-name: ${{ needs.product-metadata.outputs.package-name }} web-ui-cache-key: ${{ needs.build-ui.outputs.cache-key }} vault-version: ${{ needs.product-metadata.outputs.vault-version }} - vault-base-version: ${{ needs.product-metadata.outputs.vault-base-version }} - vault-prerelease-version: ${{ needs.product-metadata.outputs.vault-prerelease-version }} secrets: inherit build-darwin: @@ -179,8 +168,6 @@ jobs: package-name: ${{ needs.product-metadata.outputs.package-name }} web-ui-cache-key: ${{ needs.build-ui.outputs.cache-key }} vault-version: ${{ needs.product-metadata.outputs.vault-version }} - vault-base-version: ${{ needs.product-metadata.outputs.vault-base-version }} - vault-prerelease-version: ${{ needs.product-metadata.outputs.vault-prerelease-version }} secrets: inherit build-docker: @@ -199,7 +186,7 @@ jobs: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: hashicorp/actions-docker-build@v1 with: - version: "${{ env.version }}" + version: ${{ env.version }} target: default arch: ${{ matrix.arch }} zip_artifact_name: vault_${{ env.version }}_linux_${{ matrix.arch }}.zip @@ -227,6 +214,7 @@ jobs: target: ubi arch: ${{ matrix.arch }} zip_artifact_name: vault_${{ env.version }}_linux_${{ matrix.arch }}.zip + # The redhat_tag differs on CE and ENT editions. Be mindful when resolving merge conflicts. redhat_tag: quay.io/redhat-isv-containers/5f89bb5e0b94cf64cfeb500a:${{ env.version }}-ubi test: @@ -248,19 +236,25 @@ jobs: fail-fast: false matrix: include: - - matrix-file-name: build-github-oss-linux-amd64-zip + - sample-name: build_oss_linux_amd64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb + - sample-name: build_oss_linux_arm64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb + - sample-name: build_oss_linux_amd64_rpm + build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm + - sample-name: build_oss_linux_arm64_rpm + build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm + - sample-name: build_oss_linux_amd64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip - - matrix-file-name: build-github-oss-linux-arm64-zip + - sample-name: build_oss_linux_arm64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip with: build-artifact-name: ${{ matrix.build-artifact-name }} - matrix-file-name: ${{ matrix.matrix-file-name }} - matrix-test-group: ${{ needs.product-metadata.outputs.matrix-test-group }} - vault-edition: oss - vault-revision: ${{ needs.product-metadata.outputs.vault-revision }} + sample-max: 1 + sample-name: ${{ matrix.sample-name }} ssh-key-name: ${{ github.event.repository.name }}-ci-ssh-key + vault-revision: ${{ needs.product-metadata.outputs.vault-revision }} vault-version: ${{ needs.product-metadata.outputs.vault-version }} - vault-minor-version: ${{ needs.product-metadata.outputs.vault-minor-version }} secrets: inherit test-docker-k8s: diff --git a/.github/workflows/enos-release-testing-oss.yml b/.github/workflows/enos-release-testing-oss.yml index 0fbf9f2d44ec..7fb66ae2923f 100644 --- a/.github/workflows/enos-release-testing-oss.yml +++ b/.github/workflows/enos-release-testing-oss.yml @@ -12,17 +12,13 @@ concurrency: cancel-in-progress: true jobs: - product-metadata: if: ${{ startsWith(github.event.client_payload.payload.branch, 'release/') }} runs-on: ubuntu-latest outputs: - matrix-test-group: ${{ steps.get-metadata.outputs.matrix-test-group }} - vault-revision: ${{ steps.get-metadata.outputs.vault-revision }} - vault-version: ${{ steps.set-product-version.outputs.product-version }} - vault-base-version: ${{ steps.set-product-version.outputs.base-product-version }} - vault-prerelease-version: ${{ steps.set-product-version.outputs.prerelease-product-version }} - vault-minor-version: ${{ steps.set-product-version.outputs.minor-product-version }} + vault-revision: ${{ github.event.client_payload.payload.sha }} + vault-version: ${{ github.event.client_payload.payload.version }} + vault-version-package: ${{ steps.get-metadata.outputs.vault-version-package }} steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: @@ -31,15 +27,9 @@ jobs: ref: ${{ github.event.client_payload.payload.sha }} - id: get-metadata env: - # MATRIX_MAX_TEST_GROUPS is required to determine the randomly selected - # test group. It should be set to the highest test_group used in the - # enos-run-matrices. - MATRIX_MAX_TEST_GROUPS: 2 + VAULT_VERSION: ${{ github.event.client_payload.payload.version }} run: | - # shellcheck disable=SC2129 - echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> "$GITHUB_OUTPUT" - echo "vault-revision=$(make ci-get-revision)" >> "$GITHUB_OUTPUT" - # Get the workflow summary similar to CRT workflows + echo "vault-version-package=$(make ci-get-version-package)" >> "$GITHUB_OUTPUT" - name: Release Artifact Info run: | # shellcheck disable=SC2129 @@ -48,10 +38,6 @@ jobs: echo "__Commit:__ ${{ github.event.client_payload.payload.sha }}" >> "$GITHUB_STEP_SUMMARY" echo "" >> "$GITHUB_STEP_SUMMARY" echo "[Build Workflow](https://github.com/${{github.event.client_payload.payload.org}}/${{github.event.client_payload.payload.repo}}/actions/runs/${{github.event.client_payload.payload.buildworkflowid}})" >> "$GITHUB_STEP_SUMMARY" - - name: Set Product version - id: set-product-version - uses: hashicorp/actions-set-product-version@v1 - test: name: Test ${{ matrix.build-artifact-name }} @@ -62,18 +48,24 @@ jobs: fail-fast: false matrix: include: - - matrix-file-name: enos_release_testing_oss-artifactory-oss-linux-amd64-zip + - sample-name: release_oss_linux_amd64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb + - sample-name: release_oss_linux_arm64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb + - sample-name: release_oss_linux_amd64_rpm + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm + - sample-name: release_oss_linux_arm64_rpm + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm + - sample-name: release_oss_linux_amd64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip - - matrix-file-name: enos_release_testing_oss-artifactory-oss-linux-arm64-zip + - sample-name: release_oss_linux_arm64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip with: build-artifact-name: ${{ matrix.build-artifact-name }} - matrix-file-name: ${{ matrix.matrix-file-name }} - matrix-test-group: ${{ needs.product-metadata.outputs.matrix-test-group }} - vault-edition: oss + sample-max: 2 + sample-name: ${{ matrix.sample-name }} vault-revision: ${{ needs.product-metadata.outputs.vault-revision }} vault-version: ${{ needs.product-metadata.outputs.vault-version }} - vault-minor-version: ${{ needs.product-metadata.outputs.vault-minor-version }} secrets: inherit save-metadata: diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml index d8b57f2e7308..c89e16fbe7a7 100644 --- a/.github/workflows/test-go.yml +++ b/.github/workflows/test-go.yml @@ -256,6 +256,8 @@ jobs: run: time make ci-bootstrap dev - uses: ./.github/actions/set-up-gotestsum - name: Install gVisor + # Enterprise repo runners do not allow sudo, so can't install gVisor there yet. + if: ${{ !inputs.enterprise }} run: | ( set -e @@ -282,7 +284,6 @@ jobs: } } EOF - sudo systemctl reload docker - id: run-go-tests name: Run Go tests @@ -333,7 +334,11 @@ jobs: # shellcheck disable=SC2193 # can get false positive for this comparision if [[ "${{ github.base_ref }}" == release/* ]] || [[ -z "${{ github.base_ref }}" && "${{ github.ref_name }}" == release/* ]] then - RERUN_FAILS="--rerun-fails" + # TODO remove this extra condition once 1.15 is about to released GA + if [[ "${{ github.base_ref }}" != release/1.15* ]] || [[ -z "${{ github.base_ref }}" && "${{ github.ref_name }}" != release/1.15* ]] + then + RERUN_FAILS="--rerun-fails" + fi fi # shellcheck disable=SC2086 # can't quote RERUN_FAILS diff --git a/.github/workflows/test-run-enos-scenario-matrix.yml b/.github/workflows/test-run-enos-scenario-matrix.yml index c216ae7db1c1..cdd72e72e08b 100644 --- a/.github/workflows/test-run-enos-scenario-matrix.yml +++ b/.github/workflows/test-run-enos-scenario-matrix.yml @@ -11,33 +11,15 @@ on: build-artifact-name: required: true type: string - # The base name of the file in ./github/enos-run-matrices that we use to - # determine which scenarios to run for the build artifact. - # - # They are named in the format of: - # $caller_workflow_name-$artifact_source-$vault_edition-$platform-$arch-$packing_type - # - # Where each are: - # caller_workflow_name: the Github Actions workflow that is calling - # this one - # artifact_source: where we're getting the artifact from. Either - # "github" or "artifactory" - # vault_edition: which edition of vault that we're testing. e.g. "oss" - # or "ent" - # platform: the vault binary target platform, e.g. "linux" or "macos" - # arch: the vault binary target architecture, e.g. "arm64" or "amd64" - # packing_type: how vault binary is packaged, e.g. "zip", "deb", "rpm" - # - # Examples: - # build-github-oss-linux-amd64-zip - matrix-file-name: + # The maximum number of scenarios to include in the test sample. + sample-max: + default: 1 + type: number + # The name of the enos scenario sample that defines compatible scenarios we can + # can test with. + sample-name: required: true type: string - # The test group we want to run. This corresponds to the test_group attribute - # defined in the enos-run-matrices files. - matrix-test-group: - default: 0 - type: string runs-on: # NOTE: The value should be JSON encoded as that's the only way we can # pass arrays with workflow_call. @@ -47,16 +29,9 @@ on: ssh-key-name: type: string default: ${{ github.event.repository.name }}-ci-ssh-key - # Which edition of Vault we're using. e.g. "oss", "ent", "ent.hsm.fips1402" - vault-edition: - required: true - type: string vault-version: required: true type: string - vault-minor-version: - required: true - type: string # The Git commit SHA used as the revision when building vault vault-revision: required: true @@ -67,37 +42,34 @@ jobs: runs-on: ${{ fromJSON(inputs.runs-on) }} outputs: build-date: ${{ steps.metadata.outputs.build-date }} - matrix: ${{ steps.metadata.outputs.matrix }} - env: - # Pass the vault edition as VAULT_METADATA so the CI make targets can create - # values that consider the edition. - VAULT_METADATA: ${{ inputs.vault-edition }} - VAULT_VERSION: ${{ inputs.vault-version }} - VAULT_MINOR_VERSION: ${{ inputs.vault-minor-version }} - # Pass in the matrix and matrix group for filtering - MATRIX_FILE: ./.github/enos-run-matrices/${{ inputs.matrix-file-name }}.json - MATRIX_TEST_GROUP: ${{ inputs.matrix-test-group }} + sample: ${{ steps.metadata.outputs.sample }} steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: ref: ${{ inputs.vault-revision }} + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - id: metadata run: | echo "build-date=$(make ci-get-date)" >> "$GITHUB_OUTPUT" - filtered="$(make ci-filter-matrix)" - echo "matrix=$filtered" >> "$GITHUB_OUTPUT" + sample="$(enos scenario sample observe ${{ inputs.sample-name }} --chdir ./enos --min 1 --max ${{ inputs.sample-max }} --seed "$(date +%s%N)" --format json | jq -c ".observation.elements")" + echo "sample=$sample" + echo "sample=$sample" >> "$GITHUB_OUTPUT" - # Run the Enos test scenarios + # Run the Enos test scenario(s) run: needs: metadata + name: run ${{ matrix.scenario.id.filter }} strategy: fail-fast: false # don't fail as that can skip required cleanup steps for jobs - matrix: ${{ fromJson(needs.metadata.outputs.matrix) }} - runs-on: ubuntu-latest + matrix: + include: ${{ fromJSON(needs.metadata.outputs.sample) }} + runs-on: ${{ fromJSON(inputs.runs-on) }} env: GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} # Pass in enos variables - ENOS_VAR_aws_region: ${{ matrix.aws_region }} + ENOS_VAR_aws_region: ${{ matrix.attributes.aws_region }} ENOS_VAR_aws_ssh_keypair_name: ${{ inputs.ssh-key-name }} ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} @@ -121,7 +93,7 @@ jobs: with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} - aws-region: ${{ matrix.aws_region }} + aws-region: ${{ matrix.attributes.aws_region }} role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} role-skip-session-tagging: true role-duration-seconds: 3600 @@ -135,12 +107,12 @@ jobs: echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > "./enos/support/private_key.pem" chmod 600 "./enos/support/private_key.pem" echo "debug_data_artifact_name=enos-debug-data_$(echo "${{ matrix.scenario }}" | sed -e 's/ /_/g' | sed -e 's/:/=/g')" >> "$GITHUB_OUTPUT" - - if: contains(inputs.matrix-file-name, 'github') + - if: contains(inputs.sample-name, 'build') uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: name: ${{ inputs.build-artifact-name }} path: ./enos/support/downloads - - if: contains(inputs.matrix-file-name, 'ent') + - if: contains(inputs.sample-name, 'ent') name: Configure Vault license run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - name: Run Enos scenario @@ -148,12 +120,11 @@ jobs: # Continue once and retry to handle occasional blips when creating # infrastructure. continue-on-error: true - run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} + run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} - name: Retry Enos scenario if necessary id: run_retry if: steps.run.outcome == 'failure' - continue-on-error: true - run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} + run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} - name: Upload Debug Data if: failure() uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 @@ -169,7 +140,7 @@ jobs: # With Enos version 0.0.11 the destroy step returns an error if the infrastructure # is already destroyed by enos run. So temporarily setting it to continue on error in GHA continue-on-error: true - run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} + run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} - name: Clean up Enos runtime directories id: cleanup if: ${{ always() }} @@ -182,7 +153,7 @@ jobs: # There is an incoming webhook set up on the "Enos Vault Failure Bot" Slackbot https://api.slack.com/apps/A05E31CH1LG/incoming-webhooks - name: Send Slack notification on Enos run failure uses: hashicorp/actions-slack-status@v1 - if: ${{ always() }} + if: ${{ always() && ! cancelled() }} with: failure-message: "An Enos scenario `run` failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" status: ${{ steps.run.outcome }} @@ -190,7 +161,7 @@ jobs: # Send a Slack notification to #feed-vault-enos-failures if the 'run_retry' step fails. - name: Send Slack notification on Enos run_retry failure uses: hashicorp/actions-slack-status@v1 - if: ${{ always() }} + if: ${{ always() && ! cancelled() }} with: failure-message: "An Enos scenario `run_retry` failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" status: ${{ steps.run_retry.outcome }} @@ -198,7 +169,7 @@ jobs: # Send a Slack notification to #feed-vault-enos-failures if the 'destroy' step fails. - name: Send Slack notification on Enos destroy failure uses: hashicorp/actions-slack-status@v1 - if: ${{ always() }} + if: ${{ always() && ! cancelled() }} with: failure-message: "An Enos scenario `destroy` failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" status: ${{ steps.destroy.outcome }} diff --git a/.go-version b/.go-version index 3500250a4b05..2844977405c2 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.21.0 +1.21.1 diff --git a/Makefile b/Makefile index 670b413f847a..0cebdf74b840 100644 --- a/Makefile +++ b/Makefile @@ -113,7 +113,7 @@ vet: echo "and fix them if necessary before submitting the code for reviewal."; \ fi -# deprecations runs staticcheck tool to look for deprecations. Checks entire code to see if it +# deprecations runs staticcheck tool to look for deprecations. Checks entire code to see if it # has deprecated function, variable, constant or field deprecations: bootstrap prep @BUILD_TAGS='$(BUILD_TAGS)' ./scripts/deprecations-checker.sh "" @@ -128,13 +128,13 @@ tools/codechecker/.bin/codechecker: # vet-codechecker runs our custom linters on the test functions. All output gets # piped to revgrep which will only return an error if new piece of code violates -# the check +# the check vet-codechecker: bootstrap tools/codechecker/.bin/codechecker prep @$(GO_CMD) vet -vettool=./tools/codechecker/.bin/codechecker -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep # vet-codechecker runs our custom linters on the test functions. All output gets -# piped to revgrep which will only return an error if new piece of code that is -# not on main violates the check +# piped to revgrep which will only return an error if new piece of code that is +# not on main violates the check ci-vet-codechecker: ci-bootstrap tools/codechecker/.bin/codechecker prep @$(GO_CMD) vet -vettool=./tools/codechecker/.bin/codechecker -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep origin/main @@ -279,7 +279,7 @@ hana-database-plugin: mongodb-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin -.PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-codechecker ci-vet-codechecker +.PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-codechecker ci-vet-codechecker .NOTPARALLEL: ember-dist ember-dist-dev @@ -293,34 +293,26 @@ ci-build: ci-build-ui: @$(CURDIR)/scripts/ci-helper.sh build-ui -.PHONY: ci-filter-matrix -ci-filter-matrix: - @$(CURDIR)/scripts/ci-helper.sh matrix-filter-file +.PHONY: ci-bundle +ci-bundle: + @$(CURDIR)/scripts/ci-helper.sh bundle + +.PHONY: ci-get-artifact-basename +ci-get-artifact-basename: + @$(CURDIR)/scripts/ci-helper.sh artifact-basename .PHONY: ci-get-date ci-get-date: @$(CURDIR)/scripts/ci-helper.sh date -.PHONY: ci-get-matrix-group-id -ci-get-matrix-group-id: - @$(CURDIR)/scripts/ci-helper.sh matrix-group-id .PHONY: ci-get-revision ci-get-revision: @$(CURDIR)/scripts/ci-helper.sh revision -.PHONY: ci-prepare-legal -ci-prepare-legal: - @$(CURDIR)/scripts/ci-helper.sh prepare-legal - .PHONY: ci-get-version-package ci-get-version-package: @$(CURDIR)/scripts/ci-helper.sh version-package -.PHONY: ci-get-artifact-basename -ci-get-artifact-basename: - @$(CURDIR)/scripts/ci-helper.sh artifact-basename - -.PHONY: ci-bundle -ci-bundle: - @$(CURDIR)/scripts/ci-helper.sh bundle - +.PHONY: ci-prepare-legal +ci-prepare-legal: + @$(CURDIR)/scripts/ci-helper.sh prepare-legal diff --git a/api/sys_plugins_runtimes.go b/api/sys_plugins_runtimes.go index c3380a85d1bf..d88bca9b7269 100644 --- a/api/sys_plugins_runtimes.go +++ b/api/sys_plugins_runtimes.go @@ -64,8 +64,8 @@ type RegisterPluginRuntimeInput struct { OCIRuntime string `json:"oci_runtime,omitempty"` CgroupParent string `json:"cgroup_parent,omitempty"` - CPU int64 `json:"cpu,omitempty"` - Memory int64 `json:"memory,omitempty"` + CPU int64 `json:"cpu_nanos,omitempty"` + Memory int64 `json:"memory_bytes,omitempty"` } // RegisterPluginRuntime registers the plugin with the given information. diff --git a/audit/options.go b/audit/options.go index 812ac51d5479..71ae8cf843fb 100644 --- a/audit/options.go +++ b/audit/options.go @@ -13,8 +13,9 @@ import ( // getDefaultOptions returns options with their default values. func getDefaultOptions() options { return options{ - withNow: time.Now(), - withFormat: JSONFormat, + withNow: time.Now(), + withFormat: JSONFormat, + withHMACAccessor: true, } } @@ -108,11 +109,7 @@ func WithFormat(f string) Option { // WithPrefix provides an Option to represent a prefix for a file sink. func WithPrefix(prefix string) Option { return func(o *options) error { - prefix = strings.TrimSpace(prefix) - - if prefix != "" { - o.withPrefix = prefix - } + o.withPrefix = prefix return nil } diff --git a/audit/options_test.go b/audit/options_test.go index 5d089f229b3f..edb8e6142ff2 100644 --- a/audit/options_test.go +++ b/audit/options_test.go @@ -211,9 +211,9 @@ func TestOptions_WithPrefix(t *testing.T) { ExpectedValue: "", }, "whitespace": { - Value: " ", - IsErrorExpected: false, - ExpectedErrorMessage: "", + Value: " ", + IsErrorExpected: false, + ExpectedValue: " ", }, "valid": { Value: "test", diff --git a/audit/writer_json_test.go b/audit/writer_json_test.go index 47320cc8a175..822f26851be9 100644 --- a/audit/writer_json_test.go +++ b/audit/writer_json_test.go @@ -98,7 +98,7 @@ func TestFormatJSON_formatRequest(t *testing.T) { for name, tc := range cases { var buf bytes.Buffer - cfg, err := NewFormatterConfig() + cfg, err := NewFormatterConfig(WithHMACAccessor(false)) require.NoError(t, err) f, err := NewEntryFormatter(cfg, ss) require.NoError(t, err) diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go index 402c064472d6..f715c20e3218 100644 --- a/builtin/audit/file/backend.go +++ b/builtin/audit/file/backend.go @@ -22,6 +22,11 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const ( + stdout = "stdout" + discard = "discard" +) + func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) { if conf.SaltConfig == nil { return nil, fmt.Errorf("nil salt config") @@ -39,53 +44,45 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool } // normalize path if configured for stdout - if strings.EqualFold(path, "stdout") { - path = "stdout" + if strings.EqualFold(path, stdout) { + path = stdout } - if strings.EqualFold(path, "discard") { - path = "discard" + if strings.EqualFold(path, discard) { + path = discard } - format, ok := conf.Config["format"] - if !ok { - format = audit.JSONFormat.String() - } - switch format { - case audit.JSONFormat.String(), audit.JSONxFormat.String(): - default: - return nil, fmt.Errorf("unknown format type %q", format) + var cfgOpts []audit.Option + + if format, ok := conf.Config["format"]; ok { + cfgOpts = append(cfgOpts, audit.WithFormat(format)) } // Check if hashing of accessor is disabled - hmacAccessor := true if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { - value, err := strconv.ParseBool(hmacAccessorRaw) + v, err := strconv.ParseBool(hmacAccessorRaw) if err != nil { return nil, err } - hmacAccessor = value + cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v)) } // Check if raw logging is enabled - logRaw := false if raw, ok := conf.Config["log_raw"]; ok { - b, err := strconv.ParseBool(raw) + v, err := strconv.ParseBool(raw) if err != nil { return nil, err } - logRaw = b + cfgOpts = append(cfgOpts, audit.WithRaw(v)) } - elideListResponses := false if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { - value, err := strconv.ParseBool(elideListResponsesRaw) + v, err := strconv.ParseBool(elideListResponsesRaw) if err != nil { return nil, err } - elideListResponses = value + cfgOpts = append(cfgOpts, audit.WithElision(v)) } - // Check if mode is provided mode := os.FileMode(0o600) if modeRaw, ok := conf.Config["mode"]; ok { m, err := strconv.ParseUint(modeRaw, 8, 32) @@ -95,7 +92,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool switch m { case 0: // if mode is 0000, then do not modify file mode - if path != "stdout" && path != "discard" { + if path != stdout && path != discard { fileInfo, err := os.Stat(path) if err != nil { return nil, err @@ -107,12 +104,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool } } - cfg, err := audit.NewFormatterConfig( - audit.WithElision(elideListResponses), - audit.WithFormat(format), - audit.WithHMACAccessor(hmacAccessor), - audit.WithRaw(logRaw), - ) + cfg, err := audit.NewFormatterConfig(cfgOpts...) if err != nil { return nil, err } @@ -136,11 +128,13 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool return nil, fmt.Errorf("error creating formatter: %w", err) } var w audit.Writer - switch format { - case "json": + switch b.formatConfig.RequiredFormat { + case audit.JSONFormat: w = &audit.JSONWriter{Prefix: conf.Config["prefix"]} - case "jsonx": + case audit.JSONxFormat: w = &audit.JSONxWriter{Prefix: conf.Config["prefix"]} + default: + return nil, fmt.Errorf("unknown format type %q", b.formatConfig.RequiredFormat) } fw, err := audit.NewEntryFormatterWriter(b.formatConfig, f, w) @@ -164,16 +158,24 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool var sinkNode eventlogger.Node switch path { - case "stdout": - sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewStdoutSinkNode(format)} - case "discard": + case stdout: + sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewStdoutSinkNode(b.formatConfig.RequiredFormat.String())} + case discard: sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewNoopSink()} default: var err error + var opts []event.Option + // Check if mode is provided + if modeRaw, ok := conf.Config["mode"]; ok { + opts = append(opts, event.WithFileMode(modeRaw)) + } + // The NewFileSink function attempts to open the file and will // return an error if it can't. - n, err := event.NewFileSink(b.path, format, event.WithFileMode(strconv.FormatUint(uint64(mode), 8))) + n, err := event.NewFileSink( + b.path, + b.formatConfig.RequiredFormat.String(), opts...) if err != nil { return nil, fmt.Errorf("file sink creation failed for path %q: %w", path, err) } @@ -189,8 +191,8 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool b.nodeMap[sinkNodeID] = sinkNode } else { switch path { - case "stdout": - case "discard": + case stdout: + case discard: default: // Ensure that the file can be successfully opened for writing; // otherwise it will be too late to catch later without problems @@ -257,9 +259,9 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { var writer io.Writer switch b.path { - case "stdout": + case stdout: writer = os.Stdout - case "discard": + case discard: return nil } @@ -288,7 +290,7 @@ func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) er if _, err := reader.WriteTo(writer); err == nil { b.fileLock.Unlock() return nil - } else if b.path == "stdout" { + } else if b.path == stdout { b.fileLock.Unlock() return err } @@ -313,9 +315,9 @@ func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) er func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { var writer io.Writer switch b.path { - case "stdout": + case stdout: writer = os.Stdout - case "discard": + case discard: return nil } @@ -337,9 +339,9 @@ func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, conf // Old behavior var writer io.Writer switch b.path { - case "stdout": + case stdout: writer = os.Stdout - case "discard": + case discard: return nil } @@ -390,7 +392,7 @@ func (b *Backend) open() error { func (b *Backend) Reload(_ context.Context) error { switch b.path { - case "stdout", "discard": + case stdout, discard: return nil } diff --git a/builtin/audit/socket/backend.go b/builtin/audit/socket/backend.go index cd87689b46a1..85370a3506f4 100644 --- a/builtin/audit/socket/backend.go +++ b/builtin/audit/socket/backend.go @@ -12,9 +12,10 @@ import ( "sync" "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/eventlogger" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/internal/observability/event" "github.com/hashicorp/vault/sdk/helper/salt" @@ -38,7 +39,6 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool if !ok { socketType = "tcp" } - writeDeadline, ok := conf.Config["write_timeout"] if !ok { writeDeadline = "2s" @@ -48,51 +48,39 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool return nil, err } - format, ok := conf.Config["format"] - if !ok { - format = audit.JSONFormat.String() - } - switch format { - case audit.JSONFormat.String(), audit.JSONxFormat.String(): - default: - return nil, fmt.Errorf("unknown format type %q", format) + var cfgOpts []audit.Option + + if format, ok := conf.Config["format"]; ok { + cfgOpts = append(cfgOpts, audit.WithFormat(format)) } // Check if hashing of accessor is disabled - hmacAccessor := true if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { - value, err := strconv.ParseBool(hmacAccessorRaw) + v, err := strconv.ParseBool(hmacAccessorRaw) if err != nil { return nil, err } - hmacAccessor = value + cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v)) } // Check if raw logging is enabled - logRaw := false if raw, ok := conf.Config["log_raw"]; ok { - b, err := strconv.ParseBool(raw) + v, err := strconv.ParseBool(raw) if err != nil { return nil, err } - logRaw = b + cfgOpts = append(cfgOpts, audit.WithRaw(v)) } - elideListResponses := false if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { - value, err := strconv.ParseBool(elideListResponsesRaw) + v, err := strconv.ParseBool(elideListResponsesRaw) if err != nil { return nil, err } - elideListResponses = value + cfgOpts = append(cfgOpts, audit.WithElision(v)) } - cfg, err := audit.NewFormatterConfig( - audit.WithElision(elideListResponses), - audit.WithFormat(format), - audit.WithHMACAccessor(hmacAccessor), - audit.WithRaw(logRaw), - ) + cfg, err := audit.NewFormatterConfig(cfgOpts...) if err != nil { return nil, err } @@ -113,10 +101,10 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool return nil, fmt.Errorf("error creating formatter: %w", err) } var w audit.Writer - switch format { - case audit.JSONFormat.String(): + switch b.formatConfig.RequiredFormat { + case audit.JSONFormat: w = &audit.JSONWriter{Prefix: conf.Config["prefix"]} - case audit.JSONxFormat.String(): + case audit.JSONxFormat: w = &audit.JSONxWriter{Prefix: conf.Config["prefix"]} } @@ -128,6 +116,16 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool b.formatter = fw if useEventLogger { + var opts []event.Option + + if socketType, ok := conf.Config["socket_type"]; ok { + opts = append(opts, event.WithSocketType(socketType)) + } + + if writeDeadline, ok := conf.Config["write_timeout"]; ok { + opts = append(opts, event.WithMaxDuration(writeDeadline)) + } + b.nodeIDList = make([]eventlogger.NodeID, 2) b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node) @@ -138,7 +136,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool b.nodeIDList[0] = formatterNodeID b.nodeMap[formatterNodeID] = f - n, err := event.NewSocketSink(format, address, event.WithSocketType(socketType), event.WithMaxDuration(writeDuration.String())) + n, err := event.NewSocketSink(b.formatConfig.RequiredFormat.String(), address, opts...) if err != nil { return nil, fmt.Errorf("error creating socket sink node: %w", err) } diff --git a/builtin/audit/syslog/backend.go b/builtin/audit/syslog/backend.go index 07fe94b3fe35..f1b7f8179045 100644 --- a/builtin/audit/syslog/backend.go +++ b/builtin/audit/syslog/backend.go @@ -39,57 +39,45 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool tag = "vault" } - format, ok := conf.Config["format"] - if !ok { - format = audit.JSONFormat.String() - } - switch format { - case audit.JSONFormat.String(), audit.JSONxFormat.String(): - default: - return nil, fmt.Errorf("unknown format type %q", format) + var cfgOpts []audit.Option + + if format, ok := conf.Config["format"]; ok { + cfgOpts = append(cfgOpts, audit.WithFormat(format)) } // Check if hashing of accessor is disabled - hmacAccessor := true if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { - value, err := strconv.ParseBool(hmacAccessorRaw) + v, err := strconv.ParseBool(hmacAccessorRaw) if err != nil { return nil, err } - hmacAccessor = value + cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v)) } // Check if raw logging is enabled - logRaw := false if raw, ok := conf.Config["log_raw"]; ok { - b, err := strconv.ParseBool(raw) + v, err := strconv.ParseBool(raw) if err != nil { return nil, err } - logRaw = b + cfgOpts = append(cfgOpts, audit.WithRaw(v)) } - elideListResponses := false if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { - value, err := strconv.ParseBool(elideListResponsesRaw) + v, err := strconv.ParseBool(elideListResponsesRaw) if err != nil { return nil, err } - elideListResponses = value + cfgOpts = append(cfgOpts, audit.WithElision(v)) } - // Get the logger - logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag) + cfg, err := audit.NewFormatterConfig(cfgOpts...) if err != nil { return nil, err } - cfg, err := audit.NewFormatterConfig( - audit.WithElision(elideListResponses), - audit.WithFormat(format), - audit.WithHMACAccessor(hmacAccessor), - audit.WithRaw(logRaw), - ) + // Get the logger + logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag) if err != nil { return nil, err } @@ -108,10 +96,10 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool } var w audit.Writer - switch format { - case audit.JSONFormat.String(): + switch b.formatConfig.RequiredFormat { + case audit.JSONFormat: w = &audit.JSONWriter{Prefix: conf.Config["prefix"]} - case audit.JSONxFormat.String(): + case audit.JSONxFormat: w = &audit.JSONxWriter{Prefix: conf.Config["prefix"]} } @@ -123,6 +111,17 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool b.formatter = fw if useEventLogger { + var opts []event.Option + + // Get facility or default to AUTH + if facility, ok := conf.Config["facility"]; ok { + opts = append(opts, event.WithFacility(facility)) + } + + if tag, ok := conf.Config["tag"]; ok { + opts = append(opts, event.WithTag(tag)) + } + b.nodeIDList = make([]eventlogger.NodeID, 2) b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node) @@ -133,7 +132,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool b.nodeIDList[0] = formatterNodeID b.nodeMap[formatterNodeID] = f - n, err := event.NewSyslogSink(format, event.WithFacility(facility), event.WithTag(tag)) + n, err := event.NewSyslogSink(b.formatConfig.RequiredFormat.String(), opts...) if err != nil { return nil, fmt.Errorf("error creating syslog sink node: %w", err) } diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index 3124db2c2a6a..b7d5cc8b4bf7 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -1153,10 +1153,12 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // Now test encrypting the same value twice req.Data = map[string]interface{}{ - "plaintext": "emlwIHphcA==", // "zip zap" - "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" + "plaintext": "emlwIHphcA==", // "zip zap" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } + if ver == 0 { + req.Data["nonce"] = "b25ldHdvdGhyZWVl" // "onetwothreee" + } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -1187,11 +1189,10 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // For sanity, also check a different nonce value... req.Data = map[string]interface{}{ - "plaintext": "emlwIHphcA==", // "zip zap" - "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour" + "plaintext": "emlwIHphcA==", // "zip zap" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } - if ver < 2 { + if ver == 0 { req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" } else { req.Data["context"] = "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOldandSdd7S" @@ -1230,10 +1231,12 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // ...and a different context value req.Data = map[string]interface{}{ - "plaintext": "emlwIHphcA==", // "zip zap" - "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour" + "plaintext": "emlwIHphcA==", // "zip zap" "context": "qV4h9iQyvn+raODOer4JNAsOhkXBwdT4HZ677Ql4KLqXSU+Jk4C/fXBWbv6xkSYT", } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -1345,9 +1348,11 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // Finally, check operations on empty values // First, check without setting a plaintext at all req.Data = map[string]interface{}{ - "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } resp, err = b.HandleRequest(context.Background(), req) if err == nil { t.Fatal("expected error, got nil") @@ -1362,9 +1367,11 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // Now set plaintext to empty req.Data = map[string]interface{}{ "plaintext": "", - "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) diff --git a/builtin/logical/transit/path_datakey.go b/builtin/logical/transit/path_datakey.go index 9a82eceb2a36..ad3887e0e3d7 100644 --- a/builtin/logical/transit/path_datakey.go +++ b/builtin/logical/transit/path_datakey.go @@ -170,6 +170,10 @@ func (b *backend) pathDatakeyWrite(ctx context.Context, req *logical.Request, d }, } + if len(nonce) > 0 && !nonceAllowed(p) { + return nil, ErrNonceNotAllowed + } + if constants.IsFIPS() && shouldWarnAboutNonceUsage(p, nonce) { resp.AddWarning("A provided nonce value was used within FIPS mode, this violates FIPS 140 compliance.") } diff --git a/builtin/logical/transit/path_encrypt.go b/builtin/logical/transit/path_encrypt.go index 390b0eaef00c..0b6c98e3aa63 100644 --- a/builtin/logical/transit/path_encrypt.go +++ b/builtin/logical/transit/path_encrypt.go @@ -468,6 +468,13 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d successesInBatch := false for i, item := range batchInputItems { if batchResponseItems[i].Error != "" { + userErrorInBatch = true + continue + } + + if item.Nonce != "" && !nonceAllowed(p) { + userErrorInBatch = true + batchResponseItems[i].Error = ErrNonceNotAllowed.Error() continue } @@ -568,6 +575,25 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d return batchRequestResponse(d, resp, req, successesInBatch, userErrorInBatch, internalErrorInBatch) } +func nonceAllowed(p *keysutil.Policy) bool { + var supportedKeyType bool + switch p.Type { + case keysutil.KeyType_MANAGED_KEY: + return true + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: + supportedKeyType = true + default: + supportedKeyType = false + } + + if supportedKeyType && p.ConvergentEncryption && p.ConvergentVersion == 1 { + // We only use the user supplied nonce for v1 convergent encryption keys + return true + } + + return false +} + // Depending on the errors in the batch, different status codes should be returned. User errors // will return a 400 and precede internal errors which return a 500. The reasoning behind this is // that user errors are non-retryable without making changes to the request, and should be surfaced diff --git a/builtin/logical/transit/path_encrypt_test.go b/builtin/logical/transit/path_encrypt_test.go index 1b64982428ed..4f5088e8e669 100644 --- a/builtin/logical/transit/path_encrypt_test.go +++ b/builtin/logical/transit/path_encrypt_test.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "net/http" "reflect" "strings" "testing" @@ -654,13 +655,26 @@ func TestTransit_BatchEncryptionCase12(t *testing.T) { } // Case13: Incorrect input for nonce when we aren't in convergent encryption should fail the operation -func TestTransit_BatchEncryptionCase13(t *testing.T) { +func TestTransit_EncryptionCase13(t *testing.T) { var err error b, s := createBackendWithStorage(t) + // Non-batch first + data := map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "R80hr9eNUIuFV52e"} + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: data, + } + resp, err := b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected invalid request") + } + batchInput := []interface{}{ - map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "YmFkbm9uY2U="}, + map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "R80hr9eNUIuFV52e"}, } batchData := map[string]interface{}{ @@ -672,10 +686,71 @@ func TestTransit_BatchEncryptionCase13(t *testing.T) { Storage: s, Data: batchData, } - _, err = b.HandleRequest(context.Background(), batchReq) + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil { + t.Fatal(err) + } + + if v, ok := resp.Data["http_status_code"]; !ok || v.(int) != http.StatusBadRequest { + t.Fatal("expected request error") + } +} + +// Case14: Incorrect input for nonce when we are in convergent version 3 should fail +func TestTransit_EncryptionCase14(t *testing.T) { + var err error + + b, s := createBackendWithStorage(t) + + cReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/my-key", + Storage: s, + Data: map[string]interface{}{ + "convergent_encryption": "true", + "derived": "true", + }, + } + resp, err := b.HandleRequest(context.Background(), cReq) + if err != nil { + t.Fatal(err) + } + + // Non-batch first + data := map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "context": "SGVsbG8sIFdvcmxkCg==", "nonce": "R80hr9eNUIuFV52e"} + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: data, + } + + resp, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected invalid request") + } + + batchInput := []interface{}{ + data, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) if err != nil { t.Fatal(err) } + + if v, ok := resp.Data["http_status_code"]; !ok || v.(int) != http.StatusBadRequest { + t.Fatal("expected request error") + } } // Test that the fast path function decodeBatchRequestItems behave like mapstructure.Decode() to decode []BatchRequestItem. diff --git a/builtin/logical/transit/path_rewrap.go b/builtin/logical/transit/path_rewrap.go index e8d17358e83b..15a213748c4b 100644 --- a/builtin/logical/transit/path_rewrap.go +++ b/builtin/logical/transit/path_rewrap.go @@ -6,6 +6,7 @@ package transit import ( "context" "encoding/base64" + "errors" "fmt" "github.com/hashicorp/vault/helper/constants" @@ -16,6 +17,8 @@ import ( "github.com/mitchellh/mapstructure" ) +var ErrNonceNotAllowed = errors.New("provided nonce not allowed for this key") + func (b *backend) pathRewrap() *framework.Path { return &framework.Path{ Pattern: "rewrap/" + framework.GenericNameRegex("name"), @@ -152,6 +155,11 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * continue } + if item.Nonce != "" && !nonceAllowed(p) { + batchResponseItems[i].Error = ErrNonceNotAllowed.Error() + continue + } + plaintext, err := p.Decrypt(item.DecodedContext, item.DecodedNonce, item.Ciphertext) if err != nil { switch err.(type) { diff --git a/changelog/22852.txt b/changelog/22852.txt new file mode 100644 index 000000000000..3a667eb23bb0 --- /dev/null +++ b/changelog/22852.txt @@ -0,0 +1,3 @@ +```release-note:security +secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. +``` diff --git a/changelog/22879.txt b/changelog/22879.txt new file mode 100644 index 000000000000..335b099ce46a --- /dev/null +++ b/changelog/22879.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kubernetes: Update plugin to v0.17.1 +``` diff --git a/changelog/22907.txt b/changelog/22907.txt new file mode 100644 index 000000000000..dfaa4e1b0431 --- /dev/null +++ b/changelog/22907.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/terraform: Update plugin to v0.7.3 +``` diff --git a/changelog/22914.txt b/changelog/22914.txt new file mode 100644 index 000000000000..2764d4856938 --- /dev/null +++ b/changelog/22914.txt @@ -0,0 +1,6 @@ +```release-note:bug +plugins: Fix instance where broken/unresponsive plugins could cause Vault to hang. +``` +```release-note:bug +plugins: Fix instance where Vault could fail to kill broken/unresponsive plugins. +``` diff --git a/changelog/22926.txt b/changelog/22926.txt new file mode 100644 index 000000000000..69da688a10d5 --- /dev/null +++ b/changelog/22926.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds mount configuration details to Kubernetes secrets engine configuration view +``` \ No newline at end of file diff --git a/changelog/_22640.txt b/changelog/_22640.txt new file mode 100644 index 000000000000..cbc85c75119f --- /dev/null +++ b/changelog/_22640.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Add support for SAML login flow +``` diff --git a/changelog/_go-ver-1150.txt b/changelog/_go-ver-1150.txt index 72a01d6dce9e..6df482655f34 100644 --- a/changelog/_go-ver-1150.txt +++ b/changelog/_go-ver-1150.txt @@ -1,3 +1,3 @@ ```release-note:change -core: Bump Go version to 1.21.0. +core: Bump Go version to 1.21.1. ``` diff --git a/command/agent_test.go b/command/agent_test.go index 2edde97564b0..3cc5d6446da7 100644 --- a/command/agent_test.go +++ b/command/agent_test.go @@ -390,7 +390,7 @@ listener "tcp" { select { case <-cmd.startedCh: case <-time.After(5 * time.Second): - t.Errorf("timeout") + t.Fatalf("timeout") } // defer agent shutdown diff --git a/command/commands.go b/command/commands.go index 45c09c786895..d26f9a782394 100644 --- a/command/commands.go +++ b/command/commands.go @@ -604,6 +604,31 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, + "plugin runtime": func() (cli.Command, error) { + return &PluginRuntimeCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime register": func() (cli.Command, error) { + return &PluginRuntimeRegisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime deregister": func() (cli.Command, error) { + return &PluginRuntimeDeregisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime info": func() (cli.Command, error) { + return &PluginRuntimeInfoCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime list": func() (cli.Command, error) { + return &PluginRuntimeListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "proxy": func() (cli.Command, error) { return &ProxyCommand{ BaseCommand: &BaseCommand{ diff --git a/command/plugin_register_test.go b/command/plugin_register_test.go index c8c5fcf5d8ad..e2c3ce3e7020 100644 --- a/command/plugin_register_test.go +++ b/command/plugin_register_test.go @@ -6,14 +6,11 @@ package command import ( "encoding/json" "fmt" - "io" - "net/http" "reflect" "sort" "strings" "testing" - "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/consts" @@ -338,40 +335,3 @@ func TestFlagParsing(t *testing.T) { }) } } - -func mockClient(t *testing.T) (*api.Client, *recordingRoundTripper) { - t.Helper() - - config := api.DefaultConfig() - httpClient := cleanhttp.DefaultClient() - roundTripper := &recordingRoundTripper{} - httpClient.Transport = roundTripper - config.HttpClient = httpClient - client, err := api.NewClient(config) - if err != nil { - t.Fatal(err) - } - - return client, roundTripper -} - -var _ http.RoundTripper = (*recordingRoundTripper)(nil) - -type recordingRoundTripper struct { - path string - body []byte -} - -func (r *recordingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - r.path = req.URL.Path - defer req.Body.Close() - body, err := io.ReadAll(req.Body) - if err != nil { - return nil, err - } - - r.body = body - return &http.Response{ - StatusCode: 200, - }, nil -} diff --git a/command/plugin_runtime.go b/command/plugin_runtime.go new file mode 100644 index 000000000000..38781a724879 --- /dev/null +++ b/command/plugin_runtime.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*PluginRuntimeCommand)(nil) + +type PluginRuntimeCommand struct { + *BaseCommand +} + +func (c *PluginRuntimeCommand) Synopsis() string { + return "Interact with Vault plugin runtimes catalog." +} + +func (c *PluginRuntimeCommand) Help() string { + helpText := ` +Usage: vault plugin runtime [options] [args] + + This command groups subcommands for interacting with Vault's plugin runtimes and the + plugin runtime catalog. The plugin runtime catalog is divided into types. Currently, + Vault only supports "container" plugin runtimes. A plugin runtime allows users to + fine-tune the parameters with which a plugin is executed. For example, you can select + a different OCI-compatible runtime, or set resource limits. A plugin runtime can + optionally be referenced during plugin registration. A type must be specified on each call. + Here are a few examples of the plugin runtime commands. + + List all available plugin runtimes in the catalog of a particular type: + + $ vault plugin runtime list -type=container + + Register a new plugin runtime to the catalog as a particular type: + + $ vault plugin runtime register -type=container -oci_runtime=my-oci-runtime my-custom-plugin-runtime + + Get information about a plugin runtime in the catalog listed under a particular type: + + $ vault plugin runtime info -type=container my-custom-plugin-runtime + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/plugin_runtime_deregister.go b/command/plugin_runtime_deregister.go new file mode 100644 index 000000000000..dfadc20a0972 --- /dev/null +++ b/command/plugin_runtime_deregister.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeDeregisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeDeregisterCommand)(nil) +) + +type PluginRuntimeDeregisterCommand struct { + *BaseCommand + + flagType string +} + +func (c *PluginRuntimeDeregisterCommand) Synopsis() string { + return "Deregister an existing plugin runtime in the catalog" +} + +func (c *PluginRuntimeDeregisterCommand) Help() string { + helpText := ` +Usage: vault plugin runtime deregister [options] NAME + + Deregister an existing plugin runtime in the catalog with the given name. If + any registered plugin references the plugin runtime, an error is returned. If + the plugin runtime does not exist, an error is returned. The -type flag + currently only accepts "container". + + Deregister a plugin runtime: + + $ vault plugin runtime deregister -type=container my-plugin-runtime + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeDeregisterCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + return set +} + +func (c *PluginRuntimeDeregisterCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeDeregisterCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeDeregisterCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) == 0 { + c.UI.Error("-type is required for plugin runtime deregistration") + return 1 + } + + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var runtimeNameRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + + // This case should come after invalid cases have been checked + case len(args) == 1: + runtimeNameRaw = args[0] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + runtimeName := strings.TrimSpace(runtimeNameRaw) + if err = client.Sys().DeregisterPluginRuntime(context.Background(), &api.DeregisterPluginRuntimeInput{ + Name: runtimeName, + Type: runtimeType, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error deregistering plugin runtime named %s: %s", runtimeName, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Deregistered plugin runtime: %s", runtimeName)) + return 0 +} diff --git a/command/plugin_runtime_deregister_test.go b/command/plugin_runtime_deregister_test.go new file mode 100644 index 000000000000..5cd411d5f700 --- /dev/null +++ b/command/plugin_runtime_deregister_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPluginRuntimeDeregisterCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeDeregisterCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeDeregisterCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeDeregisterCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{"-type=container"}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-type=container", "foo", "baz"}, + "Too many arguments", + 1, + }, + { + "invalid_runtime_type", + []string{"-type=foo", "bar"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "info_container_on_empty_plugin_runtime_catalog", + []string{"-type=container", "my-plugin-runtime"}, + "Error deregistering plugin runtime named my-plugin-runtime", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeDeregisterCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeDeregisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type=container", "my-plugin-runtime"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error deregistering plugin runtime named my-plugin-runtime" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeDeregisterCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_runtime_info.go b/command/plugin_runtime_info.go new file mode 100644 index 000000000000..b22af6c50119 --- /dev/null +++ b/command/plugin_runtime_info.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeInfoCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeInfoCommand)(nil) +) + +type PluginRuntimeInfoCommand struct { + *BaseCommand + + flagType string +} + +func (c *PluginRuntimeInfoCommand) Synopsis() string { + return "Read information about a plugin runtime in the catalog" +} + +func (c *PluginRuntimeInfoCommand) Help() string { + helpText := ` +Usage: vault plugin runtime info [options] NAME + + Displays information about a plugin runtime in the catalog with the given name. If + the plugin runtime does not exist, an error is returned. The -type flag + currently only accepts "container". + + Get info about a plugin runtime: + + $ vault plugin runtime info -type=container my-plugin-runtime + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeInfoCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + return set +} + +func (c *PluginRuntimeInfoCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeInfoCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeInfoCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) == 0 { + c.UI.Error("-type is required for plugin runtime info retrieval") + return 1 + } + + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var runtimeNameRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + + // This case should come after invalid cases have been checked + case len(args) == 1: + runtimeNameRaw = args[0] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + runtimeName := strings.TrimSpace(runtimeNameRaw) + resp, err := client.Sys().GetPluginRuntime(context.Background(), &api.GetPluginRuntimeInput{ + Name: runtimeName, + Type: runtimeType, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading plugin runtime named %s: %s", runtimeName, err)) + return 2 + } + + if resp == nil { + c.UI.Error(fmt.Sprintf("No value found for plugin runtime %q", runtimeName)) + return 2 + } + + data := map[string]interface{}{ + "name": resp.Name, + "type": resp.Type, + "oci_runtime": resp.OCIRuntime, + "cgroup_parent": resp.CgroupParent, + "cpu_nanos": resp.CPU, + "memory_bytes": resp.Memory, + } + + if c.flagField != "" { + return PrintRawField(c.UI, data, c.flagField) + } + return OutputData(c.UI, data) +} diff --git a/command/plugin_runtime_info_test.go b/command/plugin_runtime_info_test.go new file mode 100644 index 000000000000..cf1a5aee7c14 --- /dev/null +++ b/command/plugin_runtime_info_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPluginRuntimeInfoCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeInfoCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeInfoCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeInfoCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{"-type=container"}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-type=container", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "invalid_runtime_type", + []string{"-type=foo", "bar"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "info_container_on_empty_plugin_runtime_catalog", + []string{"-type=container", "my-plugin-runtime"}, + "Error reading plugin runtime named my-plugin-runtime", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeInfoCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeInfoCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type=container", "my-plugin-runtime"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error reading plugin runtime named my-plugin-runtime" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeInfoCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_runtime_list.go b/command/plugin_runtime_list.go new file mode 100644 index 000000000000..9fca07da6de6 --- /dev/null +++ b/command/plugin_runtime_list.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeListCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeListCommand)(nil) +) + +type PluginRuntimeListCommand struct { + *BaseCommand + + flagType string +} + +func (c *PluginRuntimeListCommand) Synopsis() string { + return "Lists available plugin runtimes" +} + +func (c *PluginRuntimeListCommand) Help() string { + helpText := ` +Usage: vault plugin runtime list [options] + + Lists available plugin runtimes registered in the catalog. This does not list whether + plugin runtimes are in use, but rather just their availability. + + List all available plugin runtimes in the catalog: + + $ vault plugin runtime list + + List all available container plugin runtimes in the catalog: + + $ vault plugin runtime list -type=container + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + return set +} + +func (c *PluginRuntimeListCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if len(f.Args()) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + var input *api.ListPluginRuntimesInput + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) > 0 { + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + input = &api.ListPluginRuntimesInput{Type: runtimeType} + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + resp, err := client.Sys().ListPluginRuntimes(context.Background(), input) + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing available plugin runtimes: %s", err)) + return 2 + } + if resp == nil { + c.UI.Error("No tableResponse from server when listing plugin runtimes") + return 2 + } + + switch Format(c.UI) { + case "table": + c.UI.Output(tableOutput(c.tableResponse(resp), nil)) + return 0 + default: + return OutputData(c.UI, resp.Runtimes) + } +} + +func (c *PluginRuntimeListCommand) tableResponse(response *api.ListPluginRuntimesResponse) []string { + out := []string{"Name | Type | OCI Runtime | Parent Cgroup | CPU Nanos | Memory Bytes"} + for _, runtime := range response.Runtimes { + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %d | %d", + runtime.Name, runtime.Type, runtime.OCIRuntime, runtime.CgroupParent, runtime.CPU, runtime.Memory)) + } + + return out +} diff --git a/command/plugin_runtime_list_test.go b/command/plugin_runtime_list_test.go new file mode 100644 index 000000000000..50ba6a6112ad --- /dev/null +++ b/command/plugin_runtime_list_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPluginRuntimeListCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeListCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeListCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, + }, + { + "invalid_runtime_type", + []string{"-type=foo"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "list container on empty plugin runtime catalog", + []string{"-type=container"}, + "Error listing available plugin runtimes:", + 2, + }, + { + "list on empty plugin runtime catalog", + nil, + "Error listing available plugin runtimes:", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeListCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeListCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type=container"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing available plugin runtimes: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeListCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_runtime_register.go b/command/plugin_runtime_register.go new file mode 100644 index 000000000000..11b831512e4f --- /dev/null +++ b/command/plugin_runtime_register.go @@ -0,0 +1,161 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeRegisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeRegisterCommand)(nil) +) + +type PluginRuntimeRegisterCommand struct { + *BaseCommand + + flagType string + flagOCIRuntime string + flagCgroupParent string + flagCPUNanos int64 + flagMemoryBytes int64 +} + +func (c *PluginRuntimeRegisterCommand) Synopsis() string { + return "Registers a new plugin runtime in the catalog" +} + +func (c *PluginRuntimeRegisterCommand) Help() string { + helpText := ` +Usage: vault plugin runtime register [options] NAME + + Registers a new plugin runtime in the catalog. Currently, Vault only supports registering runtimes of type "container". +The OCI runtime must be available on Vault's host. If no OCI runtime is specified, Vault will use "runsc", gVisor's OCI runtime. + + Register the plugin runtime named my-custom-plugin-runtime: + + $ vault plugin runtime register -type=container -oci_runtime=my-oci-runtime my-custom-plugin-runtime + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeRegisterCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + f.StringVar(&StringVar{ + Name: "oci_runtime", + Target: &c.flagOCIRuntime, + Completion: complete.PredictAnything, + Usage: "OCI runtime. Default is \"runsc\", gVisor's OCI runtime.", + }) + + f.StringVar(&StringVar{ + Name: "cgroup_parent", + Target: &c.flagCgroupParent, + Completion: complete.PredictAnything, + Usage: "Parent cgroup to set for each container. This can be used to control the total resource usage for a group of plugins.", + }) + + f.Int64Var(&Int64Var{ + Name: "cpu_nanos", + Target: &c.flagCPUNanos, + Completion: complete.PredictAnything, + Usage: "CPU limit to set per container in nanos. Defaults to no limit.", + }) + + f.Int64Var(&Int64Var{ + Name: "memory_bytes", + Target: &c.flagMemoryBytes, + Completion: complete.PredictAnything, + Usage: "Memory limit to set per container in bytes. Defaults to no limit.", + }) + + return set +} + +func (c *PluginRuntimeRegisterCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeRegisterCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeRegisterCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) == 0 { + c.UI.Error("-type is required for plugin runtime registration") + return 1 + } + + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var runtimeNameRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + + // This case should come after invalid cases have been checked + case len(args) == 1: + runtimeNameRaw = args[0] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + runtimeName := strings.TrimSpace(runtimeNameRaw) + ociRuntime := strings.TrimSpace(c.flagOCIRuntime) + cgroupParent := strings.TrimSpace(c.flagCgroupParent) + + if err := client.Sys().RegisterPluginRuntime(context.Background(), &api.RegisterPluginRuntimeInput{ + Name: runtimeName, + Type: runtimeType, + OCIRuntime: ociRuntime, + CgroupParent: cgroupParent, + CPU: c.flagCPUNanos, + Memory: c.flagMemoryBytes, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error registering plugin runtime %s: %s", runtimeName, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Registered plugin runtime: %s", runtimeName)) + return 0 +} diff --git a/command/plugin_runtime_register_test.go b/command/plugin_runtime_register_test.go new file mode 100644 index 000000000000..c43c96bac125 --- /dev/null +++ b/command/plugin_runtime_register_test.go @@ -0,0 +1,202 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/cli" +) + +func testPluginRuntimeRegisterCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeRegisterCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeRegisterCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeRegisterCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + flags []string + args []string + out string + code int + }{ + { + "no type specified", + []string{}, + []string{"foo"}, + "-type is required for plugin runtime registration", + 1, + }, + { + "invalid type", + []string{"-type", "foo"}, + []string{"not"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "not_enough_args", + []string{"-type", consts.PluginRuntimeTypeContainer.String()}, + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-type", consts.PluginRuntimeTypeContainer.String()}, + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeRegisterCommand(t) + cmd.client = client + + args := append(tc.flags, tc.args...) + code := cmd.Run(args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeRegisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type", consts.PluginRuntimeTypeContainer.String(), "my-plugin-runtime"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error registering plugin runtime my-plugin-runtime" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeRegisterCommand(t) + assertNoTabs(t, cmd) + }) +} + +// TestPluginRuntimeFlagParsing ensures that flags passed to vault plugin runtime register correctly +// translate into the expected JSON body and request path. +func TestPluginRuntimeFlagParsing(t *testing.T) { + for name, tc := range map[string]struct { + runtimeType api.PluginRuntimeType + name string + ociRuntime string + cgroupParent string + cpu int64 + memory int64 + args []string + expectedPayload string + }{ + "minimal": { + runtimeType: api.PluginRuntimeTypeContainer, + name: "foo", + expectedPayload: `{"type":1,"name":"foo"}`, + }, + "full": { + runtimeType: api.PluginRuntimeTypeContainer, + name: "foo", + cgroupParent: "/cpulimit/", + ociRuntime: "runtime", + cpu: 5678, + memory: 1234, + expectedPayload: `{"type":1,"cgroup_parent":"/cpulimit/","memory_bytes":1234,"cpu_nanos":5678,"oci_runtime":"runtime"}`, + }, + } { + tc := tc + t.Run(name, func(t *testing.T) { + ui, cmd := testPluginRuntimeRegisterCommand(t) + var requestLogger *recordingRoundTripper + cmd.client, requestLogger = mockClient(t) + + var args []string + if tc.cgroupParent != "" { + args = append(args, "-cgroup_parent="+tc.cgroupParent) + } + if tc.ociRuntime != "" { + args = append(args, "-oci_runtime="+tc.ociRuntime) + } + if tc.memory != 0 { + args = append(args, fmt.Sprintf("-memory_bytes=%d", tc.memory)) + } + if tc.cpu != 0 { + args = append(args, fmt.Sprintf("-cpu_nanos=%d", tc.cpu)) + } + + if tc.runtimeType != api.PluginRuntimeTypeUnsupported { + args = append(args, "-type="+tc.runtimeType.String()) + } + args = append(args, tc.name) + t.Log(args) + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d\nstdout: %s\nstderr: %s", code, exp, ui.OutputWriter.String(), ui.ErrorWriter.String()) + } + + actual := &api.RegisterPluginRuntimeInput{} + expected := &api.RegisterPluginRuntimeInput{} + err := json.Unmarshal(requestLogger.body, actual) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal([]byte(tc.expectedPayload), expected) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected: %s\ngot: %s", tc.expectedPayload, requestLogger.body) + } + expectedPath := fmt.Sprintf("/v1/sys/plugins/runtimes/catalog/%s/%s", tc.runtimeType.String(), tc.name) + + if requestLogger.path != expectedPath { + t.Errorf("Expected path %s, got %s", expectedPath, requestLogger.path) + } + }) + } +} diff --git a/command/server.go b/command/server.go index 54d7aed3b37a..1db98a5dff5f 100644 --- a/command/server.go +++ b/command/server.go @@ -215,7 +215,7 @@ func (c *ServerCommand) Flags() *FlagSets { f.BoolVar(&BoolVar{ Name: "recovery", Target: &c.flagRecovery, - Usage: "Enable recovery mode. In this mode, Vault is used to perform recovery actions." + + Usage: "Enable recovery mode. In this mode, Vault is used to perform recovery actions. " + "Using a recovery token, \"sys/raw\" API can be used to manipulate the storage.", }) diff --git a/command/util.go b/command/util.go index 9393d624df94..0ace16191477 100644 --- a/command/util.go +++ b/command/util.go @@ -6,10 +6,13 @@ package command import ( "fmt" "io" + "net/http" "os" + "testing" "time" "github.com/fatih/color" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/config" "github.com/hashicorp/vault/command/token" @@ -161,3 +164,40 @@ func getWriterFromUI(ui cli.Ui) io.Writer { return os.Stdout } } + +func mockClient(t *testing.T) (*api.Client, *recordingRoundTripper) { + t.Helper() + + config := api.DefaultConfig() + httpClient := cleanhttp.DefaultClient() + roundTripper := &recordingRoundTripper{} + httpClient.Transport = roundTripper + config.HttpClient = httpClient + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + + return client, roundTripper +} + +var _ http.RoundTripper = (*recordingRoundTripper)(nil) + +type recordingRoundTripper struct { + path string + body []byte +} + +func (r *recordingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + r.path = req.URL.Path + defer req.Body.Close() + body, err := io.ReadAll(req.Body) + if err != nil { + return nil, err + } + + r.body = body + return &http.Response{ + StatusCode: 200, + }, nil +} diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl new file mode 100644 index 000000000000..a9543280bdd5 --- /dev/null +++ b/enos/enos-globals.hcl @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +globals { + backend_tag_key = "VaultStorage" + build_tags = { + "oss" = ["ui"] + "ent" = ["ui", "enterprise", "ent"] + "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] + "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] + "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] + } + distro_version = { + "rhel" = var.rhel_distro_version + "ubuntu" = var.ubuntu_distro_version + } + packages = ["jq"] + sample_attributes = { + aws_region = ["us-east-1", "us-west-2"] + } + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + vault_install_dir_packages = { + rhel = "/bin" + ubuntu = "/usr/bin" + } + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "Type" // enos_vault_start expects Type as the tag key +} diff --git a/enos/enos-samples-oss-build.hcl b/enos/enos-samples-oss-build.hcl new file mode 100644 index 000000000000..3c39901a6255 --- /dev/null +++ b/enos/enos-samples-oss-build.hcl @@ -0,0 +1,142 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +sample "build_oss_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["oss"] + } + } +} + +sample "build_oss_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["oss"] + } + } +} + +sample "build_oss_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["oss"] + } + } +} + +sample "build_oss_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["oss"] + } + } +} + +sample "build_oss_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["oss"] + } + } +} + +sample "build_oss_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["oss"] + } + } +} diff --git a/enos/enos-samples-oss-release.hcl b/enos/enos-samples-oss-release.hcl new file mode 100644 index 000000000000..80eaaa042a35 --- /dev/null +++ b/enos/enos-samples-oss-release.hcl @@ -0,0 +1,142 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +sample "release_oss_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["oss"] + } + } +} + +sample "release_oss_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["oss"] + } + } +} + +sample "release_oss_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["oss"] + } + } +} + +sample "release_oss_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["oss"] + } + } +} + +sample "release_oss_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["oss"] + } + } +} + +sample "release_oss_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["oss"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["oss"] + } + } +} diff --git a/enos/enos-scenario-agent.hcl b/enos/enos-scenario-agent.hcl index f88dd2c032bb..0988e37bdf8e 100644 --- a/enos/enos-scenario-agent.hcl +++ b/enos/enos-scenario-agent.hcl @@ -7,6 +7,18 @@ scenario "agent" { artifact_source = ["local", "crt", "artifactory"] distro = ["ubuntu", "rhel"] edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } } terraform_cli = terraform_cli.default @@ -18,38 +30,19 @@ scenario "agent" { ] locals { - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null - distro_version = { - "rhel" = var.rhel_distro_version - "ubuntu" = var.ubuntu_distro_version - } enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } install_artifactory_artifact = local.bundle_path == null - packages = ["jq"] - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_tag_key = "Type" // enos_vault_start expects Type as the tag key } step "build_vault" { module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] bundle_path = local.bundle_path goarch = matrix.arch goos = "linux" @@ -74,7 +67,7 @@ scenario "agent" { module = module.create_vpc variables { - common_tags = local.tags + common_tags = global.tags } } @@ -83,7 +76,7 @@ scenario "agent" { module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path } } @@ -96,10 +89,10 @@ scenario "agent" { } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.vault_tag_key - common_tags = local.tags + cluster_tag_key = global.vault_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -123,7 +116,7 @@ scenario "agent" { install_dir = var.vault_install_dir license = matrix.edition != "oss" ? step.read_license.license : null local_artifact_path = local.bundle_path - packages = local.packages + packages = global.packages storage_backend = "raft" target_hosts = step.create_vault_cluster_targets.hosts unseal_method = "shamir" diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl index 0f63ffaccf26..d8b82f2d0946 100644 --- a/enos/enos-scenario-autopilot.hcl +++ b/enos/enos-scenario-autopilot.hcl @@ -10,17 +10,17 @@ scenario "autopilot" { edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] seal = ["awskms", "shamir"] - # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions - exclude { - edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] - artifact_type = ["package"] - } - # Our local builder always creates bundles exclude { artifact_source = ["local"] artifact_type = ["package"] } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } } terraform_cli = terraform_cli.default @@ -32,42 +32,21 @@ scenario "autopilot" { ] locals { - build_tags = { - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null - distro_version = { - "rhel" = var.rhel_distro_version - "ubuntu" = var.ubuntu_distro_version - } + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - packages = ["jq"] - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] - vault_tag_key = "Type" // enos_vault_start expects Type as the tag key + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] } step "build_vault" { module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -91,7 +70,7 @@ scenario "autopilot" { module = module.create_vpc variables { - common_tags = local.tags + common_tags = global.tags } } @@ -99,7 +78,7 @@ scenario "autopilot" { module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path } } @@ -112,10 +91,10 @@ scenario "autopilot" { } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.vault_tag_key - common_tags = local.tags + cluster_tag_key = global.vault_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -136,7 +115,7 @@ scenario "autopilot" { cluster_name = step.create_vault_cluster_targets.cluster_name install_dir = local.vault_install_dir license = matrix.edition != "oss" ? step.read_license.license : null - packages = local.packages + packages = global.packages release = var.vault_autopilot_initial_release storage_backend = "raft" storage_backend_addl_config = { @@ -205,9 +184,9 @@ scenario "autopilot" { } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - common_tags = local.tags + common_tags = global.tags cluster_name = step.create_vault_cluster_targets.cluster_name vpc_id = step.create_vpc.vpc_id } @@ -235,8 +214,9 @@ scenario "autopilot" { initialize_cluster = false install_dir = local.vault_install_dir license = matrix.edition != "oss" ? step.read_license.license : null - local_artifact_path = local.bundle_path - packages = local.packages + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = global.packages root_token = step.create_vault_cluster.root_token shamir_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null storage_backend = "raft" diff --git a/enos/enos-scenario-proxy.hcl b/enos/enos-scenario-proxy.hcl index 520c368943fa..6595ed40be9a 100644 --- a/enos/enos-scenario-proxy.hcl +++ b/enos/enos-scenario-proxy.hcl @@ -18,32 +18,11 @@ scenario "proxy" { ] locals { - backend_tag_key = "VaultStorage" - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null - distro_version = { - "rhel" = var.rhel_distro_version - "ubuntu" = var.ubuntu_distro_version - } enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - install_artifactory_artifact = local.bundle_path == null - packages = ["jq"] - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_tag_key = "Type" // enos_vault_start expects Type as the tag key } step "get_local_metadata" { @@ -55,7 +34,7 @@ scenario "proxy" { module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] bundle_path = local.bundle_path goarch = matrix.arch goos = "linux" @@ -80,7 +59,7 @@ scenario "proxy" { module = module.create_vpc variables { - common_tags = local.tags + common_tags = global.tags } } @@ -89,7 +68,7 @@ scenario "proxy" { module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path } } @@ -102,10 +81,10 @@ scenario "proxy" { } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.vault_tag_key - common_tags = local.tags + cluster_tag_key = global.vault_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -129,7 +108,7 @@ scenario "proxy" { install_dir = var.vault_install_dir license = matrix.edition != "oss" ? step.read_license.license : null local_artifact_path = local.bundle_path - packages = local.packages + packages = global.packages storage_backend = "raft" target_hosts = step.create_vault_cluster_targets.hosts unseal_method = "shamir" diff --git a/enos/enos-scenario-replication.hcl b/enos/enos-scenario-replication.hcl index bba7c9e9dc37..fb645a393b92 100644 --- a/enos/enos-scenario-replication.hcl +++ b/enos/enos-scenario-replication.hcl @@ -17,17 +17,17 @@ scenario "replication" { secondary_backend = ["raft", "consul"] secondary_seal = ["awskms", "shamir"] - # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions - exclude { - edition = ["ent.fips1402", "ent.hsm.fips1402"] - artifact_type = ["package"] - } - # Our local builder always creates bundles exclude { artifact_source = ["local"] artifact_type = ["package"] } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } } terraform_cli = terraform_cli.default @@ -39,45 +39,21 @@ scenario "replication" { ] locals { - # The path to the backend license file (Consul Enterprise) - backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) - backend_tag_key = "VaultStorage" - build_tags = { - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - distro_version = { - "rhel" = var.rhel_distro_version - "ubuntu" = var.ubuntu_distro_version - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - packages = ["jq"] - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] - vault_tag_key = "Type" // enos_vault_start expects Type as the tag key + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] } step "build_vault" { module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -101,7 +77,7 @@ scenario "replication" { module = module.create_vpc variables { - common_tags = local.tags + common_tags = global.tags } } @@ -112,7 +88,7 @@ scenario "replication" { module = module.read_license variables { - file_name = local.backend_license_path + file_name = global.backend_license_path } } @@ -136,10 +112,10 @@ scenario "replication" { } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.vault_tag_key - common_tags = local.tags + cluster_tag_key = global.vault_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -157,8 +133,8 @@ scenario "replication" { variables { ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.backend_tag_key - common_tags = local.tags + cluster_tag_key = global.backend_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -175,11 +151,11 @@ scenario "replication" { } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] awskms_unseal_key_arn = step.create_vpc.kms_key_arn cluster_name = step.create_primary_cluster_targets.cluster_name - cluster_tag_key = local.vault_tag_key - common_tags = local.tags + cluster_tag_key = global.vault_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -193,10 +169,10 @@ scenario "replication" { } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.vault_tag_key - common_tags = local.tags + cluster_tag_key = global.vault_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -212,8 +188,8 @@ scenario "replication" { variables { ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.backend_tag_key - common_tags = local.tags + cluster_tag_key = global.backend_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -230,7 +206,7 @@ scenario "replication" { variables { cluster_name = step.create_primary_cluster_backend_targets.cluster_name - cluster_tag_key = local.backend_tag_key + cluster_tag_key = global.backend_tag_key license = (matrix.primary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null release = { edition = var.backend_edition @@ -256,7 +232,7 @@ scenario "replication" { artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null awskms_unseal_key_arn = step.create_vpc.kms_key_arn backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name - backend_cluster_tag_key = local.backend_tag_key + backend_cluster_tag_key = global.backend_tag_key consul_license = (matrix.primary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null cluster_name = step.create_primary_cluster_targets.cluster_name consul_release = matrix.primary_backend == "consul" ? { @@ -266,8 +242,9 @@ scenario "replication" { enable_file_audit_device = var.vault_enable_file_audit_device install_dir = local.vault_install_dir license = matrix.edition != "oss" ? step.read_vault_license.license : null - local_artifact_path = local.bundle_path - packages = local.packages + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = global.packages storage_backend = matrix.primary_backend target_hosts = step.create_primary_cluster_targets.hosts unseal_method = matrix.primary_seal @@ -286,7 +263,7 @@ scenario "replication" { variables { cluster_name = step.create_secondary_cluster_backend_targets.cluster_name - cluster_tag_key = local.backend_tag_key + cluster_tag_key = global.backend_tag_key license = (matrix.secondary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null release = { edition = var.backend_edition @@ -312,7 +289,7 @@ scenario "replication" { artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null awskms_unseal_key_arn = step.create_vpc.kms_key_arn backend_cluster_name = step.create_secondary_cluster_backend_targets.cluster_name - backend_cluster_tag_key = local.backend_tag_key + backend_cluster_tag_key = global.backend_tag_key consul_license = (matrix.secondary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null cluster_name = step.create_secondary_cluster_targets.cluster_name consul_release = matrix.secondary_backend == "consul" ? { @@ -322,8 +299,9 @@ scenario "replication" { enable_file_audit_device = var.vault_enable_file_audit_device install_dir = local.vault_install_dir license = matrix.edition != "oss" ? step.read_vault_license.license : null - local_artifact_path = local.bundle_path - packages = local.packages + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = global.packages storage_backend = matrix.secondary_backend target_hosts = step.create_secondary_cluster_targets.hosts unseal_method = matrix.secondary_seal @@ -553,25 +531,27 @@ scenario "replication" { artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null awskms_unseal_key_arn = step.create_vpc.kms_key_arn backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name - backend_cluster_tag_key = local.backend_tag_key + backend_cluster_tag_key = global.backend_tag_key cluster_name = step.create_primary_cluster_targets.cluster_name consul_license = (matrix.primary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null consul_release = matrix.primary_backend == "consul" ? { edition = var.backend_edition version = matrix.consul_version } : null - force_unseal = matrix.primary_seal == "shamir" - initialize_cluster = false - install_dir = local.vault_install_dir - license = matrix.edition != "oss" ? step.read_vault_license.license : null - local_artifact_path = local.bundle_path - packages = local.packages - root_token = step.create_primary_cluster.root_token - shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null - storage_backend = matrix.primary_backend - storage_node_prefix = "newprimary_node" - target_hosts = step.create_primary_cluster_additional_targets.hosts - unseal_method = matrix.primary_seal + enable_file_audit_device = var.vault_enable_file_audit_device + force_unseal = matrix.primary_seal == "shamir" + initialize_cluster = false + install_dir = local.vault_install_dir + license = matrix.edition != "oss" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = global.packages + root_token = step.create_primary_cluster.root_token + shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null + storage_backend = matrix.primary_backend + storage_node_prefix = "newprimary_node" + target_hosts = step.create_primary_cluster_additional_targets.hosts + unseal_method = matrix.primary_seal } } diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl index 97a7d2713ebf..27bc342e4ab8 100644 --- a/enos/enos-scenario-smoke.hcl +++ b/enos/enos-scenario-smoke.hcl @@ -12,17 +12,17 @@ scenario "smoke" { edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] seal = ["awskms", "shamir"] - # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions - exclude { - edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] - artifact_type = ["package"] - } - # Our local builder always creates bundles exclude { artifact_source = ["local"] artifact_type = ["package"] } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } } terraform_cli = terraform_cli.default @@ -34,37 +34,13 @@ scenario "smoke" { ] locals { - backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) - backend_tag_key = "VaultStorage" - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null - distro_version = { - "rhel" = var.rhel_distro_version - "ubuntu" = var.ubuntu_distro_version - } + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - packages = ["jq"] - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] - vault_tag_key = "Type" // enos_vault_start expects Type as the tag key + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] } step "get_local_metadata" { @@ -76,8 +52,8 @@ scenario "smoke" { module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -101,7 +77,7 @@ scenario "smoke" { module = module.create_vpc variables { - common_tags = local.tags + common_tags = global.tags } } @@ -112,7 +88,7 @@ scenario "smoke" { module = module.read_license variables { - file_name = local.backend_license_path + file_name = global.backend_license_path } } @@ -121,7 +97,7 @@ scenario "smoke" { module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path } } @@ -134,10 +110,10 @@ scenario "smoke" { } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.vault_tag_key - common_tags = local.tags + cluster_tag_key = global.vault_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -153,8 +129,8 @@ scenario "smoke" { variables { ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.backend_tag_key - common_tags = local.tags + cluster_tag_key = global.backend_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -171,7 +147,7 @@ scenario "smoke" { variables { cluster_name = step.create_vault_cluster_backend_targets.cluster_name - cluster_tag_key = local.backend_tag_key + cluster_tag_key = global.backend_tag_key license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null release = { edition = var.backend_edition @@ -197,7 +173,7 @@ scenario "smoke" { artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null awskms_unseal_key_arn = step.create_vpc.kms_key_arn backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name - backend_cluster_tag_key = local.backend_tag_key + backend_cluster_tag_key = global.backend_tag_key cluster_name = step.create_vault_cluster_targets.cluster_name consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null consul_release = matrix.backend == "consul" ? { @@ -207,8 +183,9 @@ scenario "smoke" { enable_file_audit_device = var.vault_enable_file_audit_device install_dir = local.vault_install_dir license = matrix.edition != "oss" ? step.read_vault_license.license : null - local_artifact_path = local.bundle_path - packages = local.packages + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = global.packages storage_backend = matrix.backend target_hosts = step.create_vault_cluster_targets.hosts unseal_method = matrix.seal diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl index 81b18bdb8f0d..54b1cc273b8b 100644 --- a/enos/enos-scenario-upgrade.hcl +++ b/enos/enos-scenario-upgrade.hcl @@ -12,10 +12,16 @@ scenario "upgrade" { edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] seal = ["awskms", "shamir"] - # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions + # Our local builder always creates bundles exclude { - edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] - artifact_type = ["package"] + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] } } @@ -28,37 +34,13 @@ scenario "upgrade" { ] locals { - backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) - backend_tag_key = "VaultStorage" - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null - distro_version = { - "rhel" = var.rhel_distro_version - "ubuntu" = var.ubuntu_distro_version - } + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - packages = ["jq"] - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] - vault_tag_key = "Type" // enos_vault_start expects Type as the tag key + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] } step "get_local_metadata" { @@ -71,8 +53,8 @@ scenario "upgrade" { module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -96,7 +78,7 @@ scenario "upgrade" { module = module.create_vpc variables { - common_tags = local.tags + common_tags = global.tags } } @@ -107,7 +89,7 @@ scenario "upgrade" { module = module.read_license variables { - file_name = local.backend_license_path + file_name = global.backend_license_path } } @@ -116,7 +98,7 @@ scenario "upgrade" { module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path } } @@ -129,10 +111,10 @@ scenario "upgrade" { } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.vault_tag_key - common_tags = local.tags + cluster_tag_key = global.vault_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -148,8 +130,8 @@ scenario "upgrade" { variables { ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_tag_key = local.backend_tag_key - common_tags = local.tags + cluster_tag_key = global.backend_tag_key + common_tags = global.tags vpc_id = step.create_vpc.vpc_id } } @@ -166,7 +148,7 @@ scenario "upgrade" { variables { cluster_name = step.create_vault_cluster_backend_targets.cluster_name - cluster_tag_key = local.backend_tag_key + cluster_tag_key = global.backend_tag_key license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null release = { edition = var.backend_edition @@ -191,7 +173,7 @@ scenario "upgrade" { variables { awskms_unseal_key_arn = step.create_vpc.kms_key_arn backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name - backend_cluster_tag_key = local.backend_tag_key + backend_cluster_tag_key = global.backend_tag_key consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null cluster_name = step.create_vault_cluster_targets.cluster_name consul_release = matrix.backend == "consul" ? { @@ -201,7 +183,7 @@ scenario "upgrade" { enable_file_audit_device = var.vault_enable_file_audit_device install_dir = local.vault_install_dir license = matrix.edition != "oss" ? step.read_vault_license.license : null - packages = local.packages + packages = global.packages release = var.vault_upgrade_initial_release storage_backend = matrix.backend target_hosts = step.create_vault_cluster_targets.hosts @@ -259,7 +241,7 @@ scenario "upgrade" { variables { vault_api_addr = "http://localhost:8200" vault_instances = step.create_vault_cluster_targets.hosts - vault_local_artifact_path = local.bundle_path + vault_local_artifact_path = local.artifact_path vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null vault_install_dir = local.vault_install_dir vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null diff --git a/enos/modules/vault_artifactory_artifact/variables.tf b/enos/modules/vault_artifactory_artifact/variables.tf index 5294cc5336e2..a2d9042af535 100644 --- a/enos/modules/vault_artifactory_artifact/variables.tf +++ b/enos/modules/vault_artifactory_artifact/variables.tf @@ -1,7 +1,5 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 - - variable "artifactory_username" { type = string description = "The username to use when connecting to artifactory" @@ -28,6 +26,7 @@ variable "artifactory_repo" { } variable "arch" {} variable "artifact_type" {} +variable "artifact_path" {} variable "distro" {} variable "edition" {} variable "revision" {} diff --git a/enos/modules/vault_cluster/main.tf b/enos/modules/vault_cluster/main.tf index b9203e2c4603..8fc34f2b5a22 100644 --- a/enos/modules/vault_cluster/main.tf +++ b/enos/modules/vault_cluster/main.tf @@ -69,23 +69,6 @@ locals { vault_service_user = "vault" } -resource "enos_remote_exec" "install_packages" { - for_each = { - for idx, host in var.target_hosts : idx => var.target_hosts[idx] - if length(var.packages) > 0 - } - - content = templatefile("${path.module}/templates/install-packages.sh", { - packages = join(" ", var.packages) - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - resource "enos_bundle_install" "consul" { for_each = { for idx, host in var.target_hosts : idx => var.target_hosts[idx] @@ -117,6 +100,26 @@ resource "enos_bundle_install" "vault" { } } +resource "enos_remote_exec" "install_packages" { + depends_on = [ + enos_bundle_install.vault, // Don't race for the package manager locks with vault install + ] + for_each = { + for idx, host in var.target_hosts : idx => var.target_hosts[idx] + if length(var.packages) > 0 + } + + content = templatefile("${path.module}/templates/install-packages.sh", { + packages = join(" ", var.packages) + }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + resource "enos_consul_start" "consul" { for_each = enos_bundle_install.consul @@ -272,6 +275,7 @@ resource "enos_vault_unseal" "leader" { # user on all nodes, since logging will only happen on the leader. resource "enos_remote_exec" "create_audit_log_dir" { depends_on = [ + enos_bundle_install.vault, enos_vault_unseal.leader, ] for_each = toset([ @@ -395,3 +399,11 @@ resource "enos_remote_exec" "vault_write_license" { } } } + +resource "enos_local_exec" "wait_for_install_packages" { + depends_on = [ + enos_remote_exec.install_packages, + ] + + inline = ["true"] +} diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh index 76b44f53557b..7e1655ff84da 100644 --- a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh @@ -18,7 +18,7 @@ retries=5 while :; do # Find the leader private IP address leader_private_ip=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') - match_ip=$(echo $instance_ips |jq -r --argjson ip $leader_private_ip 'map(select(. == $ip))') + match_ip=$(echo "$instance_ips" |jq -r --argjson ip "$leader_private_ip" 'map(select(. == $ip))') if [[ "$leader_private_ip" != 'null' ]] && [[ "$match_ip" != '[]' ]]; then echo "$leader_private_ip" | sed 's/\"//g' diff --git a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh b/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh index 426963cc7717..4ae3bd2a9e8b 100644 --- a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh +++ b/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh @@ -2,24 +2,36 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 - set -e +# shellcheck disable=SC2154 binpath=${vault_install_dir}/vault fail() { echo "$1" 1>&2 - return 1 + exit 1 } test -x "$binpath" || fail "unable to locate vault binary at $binpath" export VAULT_ADDR='http://127.0.0.1:8200' -health_status=$(curl http://127.0.0.1:8200/v1/sys/health |jq '.') -unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') -if [[ "$unseal_status" != 'true' ]]; then - fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status" -fi - -echo $health_status +count=0 +retries=4 +while :; do + health_status=$(curl http://127.0.0.1:8200/v1/sys/health |jq '.') + unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') + if [[ "$unseal_status" == 'true' ]]; then + echo "$health_status" + exit 0 + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + # shellcheck disable=SC2154 + fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status" + fi +done diff --git a/go.mod b/go.mod index a9b650a53c2b..d7907cdc3534 100644 --- a/go.mod +++ b/go.mod @@ -133,7 +133,7 @@ require ( github.com/hashicorp/vault-plugin-auth-gcp v0.16.1 github.com/hashicorp/vault-plugin-auth-jwt v0.17.0 github.com/hashicorp/vault-plugin-auth-kerberos v0.10.1 - github.com/hashicorp/vault-plugin-auth-kubernetes v0.17.0 + github.com/hashicorp/vault-plugin-auth-kubernetes v0.17.1 github.com/hashicorp/vault-plugin-auth-oci v0.14.2 github.com/hashicorp/vault-plugin-database-couchbase v0.9.4 github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.3 @@ -151,7 +151,7 @@ require ( github.com/hashicorp/vault-plugin-secrets-kv v0.16.2 github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.10.1 github.com/hashicorp/vault-plugin-secrets-openldap v0.11.2 - github.com/hashicorp/vault-plugin-secrets-terraform v0.7.1 + github.com/hashicorp/vault-plugin-secrets-terraform v0.7.3 github.com/hashicorp/vault-testing-stepwise v0.1.3 github.com/hashicorp/vault/api v1.10.0 github.com/hashicorp/vault/api/auth/approle v0.1.0 @@ -386,8 +386,8 @@ require ( github.com/hashicorp/go-msgpack/v2 v2.0.0 // indirect github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 // indirect github.com/hashicorp/go-secure-stdlib/plugincontainer v0.2.0 // indirect - github.com/hashicorp/go-slug v0.11.1 // indirect - github.com/hashicorp/go-tfe v1.25.1 // indirect + github.com/hashicorp/go-slug v0.12.1 // indirect + github.com/hashicorp/go-tfe v1.33.0 // indirect github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/mdns v1.0.4 // indirect diff --git a/go.sum b/go.sum index 4bede1d0904f..5bd7cf227550 100644 --- a/go.sum +++ b/go.sum @@ -2012,7 +2012,6 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-plugin v1.5.0/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/hashicorp/go-plugin v1.5.1 h1:oGm7cWBaYIp3lJpx1RUEfLWophprE2EV/KUeqBYo+6k= github.com/hashicorp/go-plugin v1.5.1/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= @@ -2048,7 +2047,6 @@ github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnU github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= -github.com/hashicorp/go-secure-stdlib/plugincontainer v0.1.1/go.mod h1:kRpzC4wHYXc2+sjXA9vuKawXYs0x0d0HuqqbaW1fj1w= github.com/hashicorp/go-secure-stdlib/plugincontainer v0.2.0 h1:1jd8y6HKfDED6vdsXFRM9SpFQNfhBEIHOC41GyILGyY= github.com/hashicorp/go-secure-stdlib/plugincontainer v0.2.0/go.mod h1:Cv387jRKKbetAp5AWK4zL7UxdeBeDTgUJOnmS4T/4I8= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 h1:SMGUnbpAcat8rIKHkBPjfv81yC46a8eCNZ2hsR2l1EI= @@ -2058,16 +2056,16 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= -github.com/hashicorp/go-slug v0.11.1 h1:c6lLdQnlhUWbS5I7hw8SvfymoFuy6EmiFDedy6ir994= -github.com/hashicorp/go-slug v0.11.1/go.mod h1:Ib+IWBYfEfJGI1ZyXMGNbu2BU+aa3Dzu41RKLH301v4= +github.com/hashicorp/go-slug v0.12.1 h1:lYhmKXXonP4KGSz3JBmks6YpDRjP1cMA/yvcoPxoNw8= +github.com/hashicorp/go-slug v0.12.1/go.mod h1:JZVtycnZZbiJ4oxpJ/zfhyfBD8XxT4f0uOSyjNLCqFY= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-sockaddr v1.0.4 h1:NJY/hSAoWy0EhQQdDxxoBlwyJex/xC2qNWXD0up6D48= github.com/hashicorp/go-sockaddr v1.0.4/go.mod h1:LPGW7TbF+cTE2o/bBlBWD4XG8rgRJeIurURxH5kEHr8= github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-tfe v1.25.1 h1:OxjDhY8Rj36n/uTSmhdFRLcnhXFfRTsopiovYSkJjak= -github.com/hashicorp/go-tfe v1.25.1/go.mod h1:1Y6nsdMuJ14lYdc1VMLl/erlthvMzUsJn+WYWaAdSc4= +github.com/hashicorp/go-tfe v1.33.0 h1:UQ04F/Dd+IgjQDciBZz/BKHTFwc7zXbJTI1+zNN355U= +github.com/hashicorp/go-tfe v1.33.0/go.mod h1:Js4M/3kv14oTBlvLxqh0bbrWfosmNsEb9ad9YazsyfM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -2137,8 +2135,8 @@ github.com/hashicorp/vault-plugin-auth-jwt v0.17.0 h1:ZfgyFjZfquIn9qk1bytkaqUfG8 github.com/hashicorp/vault-plugin-auth-jwt v0.17.0/go.mod h1:R5ZtloCRWHnElOm+MXJadj2jkGMwF9Ybk3sn2kV3L48= github.com/hashicorp/vault-plugin-auth-kerberos v0.10.1 h1:nXni7zfOyhOWJBC42iWqIEZA+aYCo3diyVrr1mHs5yo= github.com/hashicorp/vault-plugin-auth-kerberos v0.10.1/go.mod h1:S0XEzmbUO+iuC44a8wqnL869l6WH0DUMVqxTIEkITys= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.17.0 h1:+Cpp1RYfa765+vheMw++WfwucakC6YAVL2r9J6GxjWk= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.17.0/go.mod h1:KE7jUeiD2KE88CeC3YINWZ6A9B2VXPpzkX4bQgsl2lI= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.17.1 h1:MVGosnlKQcgr6z9xrehCi5taYJyRw67JIJMNHaMXSAc= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.17.1/go.mod h1:KE7jUeiD2KE88CeC3YINWZ6A9B2VXPpzkX4bQgsl2lI= github.com/hashicorp/vault-plugin-auth-oci v0.14.2 h1:NcTn5LPRL6lVusPjqGkav+C8LRsy46QKdEk9HElQ5B0= github.com/hashicorp/vault-plugin-auth-oci v0.14.2/go.mod h1:FaLJvP+AUbeo4yop49aVit4JW/I9GfajFqI8wpX+b0w= github.com/hashicorp/vault-plugin-database-couchbase v0.9.4 h1:MaKlz3Guy9eVRJvTM4zUqlBzhEVE8LdlvsQSAURaVDo= @@ -2173,8 +2171,8 @@ github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.10.1 h1:6WyystBBEx1bDp github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.10.1/go.mod h1:OdXvez+GH0XBSRS7gxbS8B1rLUPb8bGk+bDVyEaAzI8= github.com/hashicorp/vault-plugin-secrets-openldap v0.11.2 h1:LNzIP4zfWivAy/hgCIwETJFr7BBS91bsJ6AlsGhqAc8= github.com/hashicorp/vault-plugin-secrets-openldap v0.11.2/go.mod h1:osvWc6/BOZPR8Mdqp5dF40dAaHddEiylO1pDRI7DOqo= -github.com/hashicorp/vault-plugin-secrets-terraform v0.7.1 h1:Icb3EDpNvb4ltnGff2Zrm3JVNDDdbbL2wdA2LouD2KQ= -github.com/hashicorp/vault-plugin-secrets-terraform v0.7.1/go.mod h1:JHHo1nWOgYPsbTqE/PVwkTKRkLSlPSqo9RBqZ7NLKB8= +github.com/hashicorp/vault-plugin-secrets-terraform v0.7.3 h1:k5jCx6laFvQHvrQod+TSHSoDqF3ZSIlQB4Yzj6koz0I= +github.com/hashicorp/vault-plugin-secrets-terraform v0.7.3/go.mod h1:yqCovAKNUNYnNrs5Wh95aExpsWEU45GB9FV7EquaSbA= github.com/hashicorp/vault-testing-stepwise v0.1.3 h1:GYvm98EB4nUKUntkBcLicnKsebeV89KPHmAGJUCPU/c= github.com/hashicorp/vault-testing-stepwise v0.1.3/go.mod h1:Ym1T/kMM2sT6qgCIIJ3an7uaSWCJ8O7ohsWB9UiB5tI= github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 h1:Y/+BtwxmRak3Us9jrByARvYW6uNeqZlEpMylIdXVIjY= diff --git a/helper/experiments/experiments.go b/helper/experiments/experiments.go index 713d7ef6c98c..23635b1eae08 100644 --- a/helper/experiments/experiments.go +++ b/helper/experiments/experiments.go @@ -7,7 +7,6 @@ import "slices" const ( VaultExperimentCoreAuditEventsAlpha1 = "core.audit.events.alpha1" - VaultExperimentSecretsSyncAlpha1 = "secrets.sync.alpha1" // Unused experiments. We keep them so that we don't break users who include them in their // flags or configs, but they no longer have any effect. @@ -17,7 +16,6 @@ const ( var validExperiments = []string{ VaultExperimentEventsAlpha1, VaultExperimentCoreAuditEventsAlpha1, - VaultExperimentSecretsSyncAlpha1, } var unusedExperiments = []string{ diff --git a/helper/testhelpers/pluginhelpers/pluginhelpers.go b/helper/testhelpers/pluginhelpers/pluginhelpers.go index 1251da35e870..40035fc59f38 100644 --- a/helper/testhelpers/pluginhelpers/pluginhelpers.go +++ b/helper/testhelpers/pluginhelpers/pluginhelpers.go @@ -154,6 +154,8 @@ func BuildPluginContainerImage(t testing.T, plugin TestPlugin, pluginDir string) ref := plugin.Name if plugin.Version != "" { ref += ":" + strings.TrimPrefix(plugin.Version, "v") + } else { + ref += ":latest" } args := []string{"build", "--tag=" + ref, "--build-arg=plugin=" + plugin.FileName, "--file=vault/testdata/Dockerfile", pluginDir} cmd := exec.Command("docker", args...) diff --git a/http/events.go b/http/events.go index 8060b65288a8..384a6719cd31 100644 --- a/http/events.go +++ b/http/events.go @@ -244,8 +244,9 @@ func handleEventsSubscribe(core *vault.Core, req *logical.Request) http.Handler // continually validate subscribe access while the websocket is running ctx, cancelCtx := context.WithCancel(ctx) defer cancelCtx() - go validateSubscribeAccessLoop(core, ctx, cancelCtx, req) + isRoot := entry.IsRoot() + go validateSubscribeAccessLoop(core, ctx, cancelCtx, req) sub := &eventSubscriber{ ctx: ctx, capabilitiesFunc: core.CapabilitiesAndSubscribeEventTypes, @@ -258,7 +259,7 @@ func handleEventsSubscribe(core *vault.Core, req *logical.Request) http.Handler json: json, checkCache: cache.New(webSocketRevalidationTime, webSocketRevalidationTime), clientToken: auth.ClientToken, - isRootToken: entry.IsRoot(), + isRootToken: isRoot, } closeStatus, closeReason, err := sub.handleEventsSubscribeWebsocket() if err != nil { @@ -301,7 +302,7 @@ func validateSubscribeAccessLoop(core *vault.Core, ctx context.Context, cancel c // if something breaks, default to canceling the websocket defer cancel() for { - _, _, err := core.CheckToken(ctx, req, false) + _, _, err := core.CheckTokenWithLock(ctx, req, false) if err != nil { core.Logger().Debug("Token does not have access to subscription path in its own namespace, terminating WebSocket subscription", "path", req.Path, "error", err) return diff --git a/http/events_test.go b/http/events_test.go index b5f81822b19a..4eea2134eb69 100644 --- a/http/events_test.go +++ b/http/events_test.go @@ -49,6 +49,8 @@ func TestEventsSubscribe(t *testing.T) { stop := atomic.Bool{} const eventType = "abc" + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() // send some events go func() { @@ -60,7 +62,7 @@ func TestEventsSubscribe(t *testing.T) { pluginInfo := &logical.EventPluginInfo{ MountPath: "secret", } - err = core.Events().SendEventInternal(namespace.RootContext(context.Background()), namespace.RootNamespace, pluginInfo, logical.EventType(eventType), &logical.EventData{ + err = core.Events().SendEventInternal(namespace.RootContext(ctx), namespace.RootNamespace, pluginInfo, logical.EventType(eventType), &logical.EventData{ Id: id, Metadata: nil, EntityIds: nil, @@ -77,7 +79,6 @@ func TestEventsSubscribe(t *testing.T) { stop.Store(true) }) - ctx := context.Background() wsAddr := strings.Replace(addr, "http", "ws", 1) testCases := []struct { @@ -147,29 +148,31 @@ func TestBexprFilters(t *testing.T) { } } - sendEvent := func(eventType string) error { - pluginInfo := &logical.EventPluginInfo{ - MountPath: "secret", - } - ns := namespace.RootNamespace - id, err := uuid.GenerateUUID() - if err != nil { - core.Logger().Info("Error generating UUID, exiting sender", "error", err) - return err - } - err = core.Events().SendEventInternal(namespace.RootContext(context.Background()), ns, pluginInfo, logical.EventType(eventType), &logical.EventData{ - Id: id, - Metadata: nil, - EntityIds: nil, - Note: "testing", - }) - if err != nil { - core.Logger().Info("Error sending event, exiting sender", "error", err) - return err + // send duplicates to help avoid flaky tests in CI + sendEvents := func(ctx context.Context, eventTypes ...string) { + for i := 0; i < 10; i++ { + time.Sleep(10 * time.Millisecond) + for _, eventType := range eventTypes { + pluginInfo := &logical.EventPluginInfo{ + MountPath: "secret", + } + ns := namespace.RootNamespace + id := eventType + err := core.Events().SendEventInternal(namespace.RootContext(ctx), ns, pluginInfo, logical.EventType(eventType), &logical.EventData{ + Id: id, + Metadata: nil, + EntityIds: nil, + Note: "testing", + }) + if err != nil { + return + } + } } - return nil } - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + wsAddr := strings.Replace(addr, "http", "ws", 1) bexprFilter := url.QueryEscape("event_type == abc") @@ -181,36 +184,32 @@ func TestBexprFilters(t *testing.T) { t.Fatal(err) } defer conn.Close(websocket.StatusNormalClosure, "") - err = sendEvent("def") - if err != nil { - t.Fatal(err) - } - err = sendEvent("xyz") - if err != nil { - t.Fatal(err) - } - err = sendEvent("abc") - if err != nil { - t.Fatal(err) - } - // we should get the abc message - _, msg, err := conn.Read(context.Background()) - if err != nil { - t.Fatal(err) - } - event := map[string]interface{}{} - err = json.Unmarshal(msg, &event) - if err != nil { - t.Fatal(err) + go sendEvents(ctx, "abc", "def", "xyz") + // read until we time out + seen := map[string]bool{} + done := false + for !done { + done = func() bool { + readCtx, readCancel := context.WithTimeout(context.Background(), 1*time.Second) + defer readCancel() + _, msg, err := conn.Read(readCtx) + if err != nil { + return true + } + event := map[string]interface{}{} + err = json.Unmarshal(msg, &event) + if err != nil { + t.Error(err) + return true + } + seen[event["id"].(string)] = true + return false + }() } - assert.Equal(t, "abc", event["data"].(map[string]interface{})["event_type"].(string)) - - // and no other messages - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - _, _, err = conn.Read(ctx) - assert.ErrorContains(t, err, "context deadline exceeded") + // we should only get the "abc" messages + assert.Len(t, seen, 1) + assert.Contains(t, seen, "abc") } func TestNamespacePrepend(t *testing.T) { @@ -298,7 +297,8 @@ func TestEventsSubscribeAuth(t *testing.T) { nonPrivilegedToken = secret.Auth.ClientToken } - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() wsAddr := strings.Replace(addr, "http", "ws", 1) // Get a 403 with no token. diff --git a/internal/observability/event/options.go b/internal/observability/event/options.go index 2cd7cf5c4bc4..80ad1bf9996e 100644 --- a/internal/observability/event/options.go +++ b/internal/observability/event/options.go @@ -32,12 +32,15 @@ type options struct { // getDefaultOptions returns Options with their default values. func getDefaultOptions() options { + fileMode := os.FileMode(0o600) + return options{ withNow: time.Now(), withFacility: "AUTH", withTag: "vault", withSocketType: "tcp", withMaxDuration: 2 * time.Second, + withFileMode: &fileMode, } } @@ -110,11 +113,7 @@ func WithNow(now time.Time) Option { // WithFacility provides an Option to represent a 'facility' for a syslog sink. func WithFacility(facility string) Option { return func(o *options) error { - facility = strings.TrimSpace(facility) - - if facility != "" { - o.withFacility = facility - } + o.withFacility = facility return nil } @@ -123,11 +122,7 @@ func WithFacility(facility string) Option { // WithTag provides an Option to represent a 'tag' for a syslog sink. func WithTag(tag string) Option { return func(o *options) error { - tag = strings.TrimSpace(tag) - - if tag != "" { - o.withTag = tag - } + o.withTag = tag return nil } diff --git a/internal/observability/event/options_test.go b/internal/observability/event/options_test.go index 0f36014740cf..676c79833078 100644 --- a/internal/observability/event/options_test.go +++ b/internal/observability/event/options_test.go @@ -205,7 +205,7 @@ func TestOptions_WithFacility(t *testing.T) { }, "whitespace": { Value: " ", - ExpectedValue: "", + ExpectedValue: " ", }, "value": { Value: "juan", @@ -213,7 +213,7 @@ func TestOptions_WithFacility(t *testing.T) { }, "spacey-value": { Value: " juan ", - ExpectedValue: "juan", + ExpectedValue: " juan ", }, } @@ -243,7 +243,7 @@ func TestOptions_WithTag(t *testing.T) { }, "whitespace": { Value: " ", - ExpectedValue: "", + ExpectedValue: " ", }, "value": { Value: "juan", @@ -251,7 +251,7 @@ func TestOptions_WithTag(t *testing.T) { }, "spacey-value": { Value: " juan ", - ExpectedValue: "juan", + ExpectedValue: " juan ", }, } diff --git a/internalshared/configutil/kms.go b/internalshared/configutil/kms.go index a9c1fa468c68..f3f34bf341f4 100644 --- a/internalshared/configutil/kms.go +++ b/internalshared/configutil/kms.go @@ -216,6 +216,9 @@ func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]strin var err error envConfig := GetEnvConfigFunc(configKMS) + if len(envConfig) > 0 && configKMS.Config == nil { + configKMS.Config = make(map[string]string) + } // transit is a special case, because some config values take precedence over env vars if configKMS.Type == wrapping.WrapperTypeTransit.String() { mergeTransitConfig(configKMS.Config, envConfig) diff --git a/scripts/ci-helper.sh b/scripts/ci-helper.sh index d863b02e0d9b..611df09c6c9a 100755 --- a/scripts/ci-helper.sh +++ b/scripts/ci-helper.sh @@ -85,7 +85,7 @@ function build() { : "${GO_TAGS:=""}" : "${REMOVE_SYMBOLS:=""}" - GOOS= GOARCH= go generate ./... + (unset GOOS; unset GOARCH; go generate ./...) # Build our ldflags msg="--> Building Vault revision $revision, built $build_date" @@ -129,53 +129,10 @@ function prepare_legal() { popd } -# Determine the matrix group number that we'll select for execution. If the -# MATRIX_TEST_GROUP environment variable has set then it will always return -# that value. If has not been set, we will randomly select a number between 1 -# and the value of MATRIX_MAX_TEST_GROUPS. -function matrix_group_id() { - : "${MATRIX_TEST_GROUP:=""}" - if [ -n "$MATRIX_TEST_GROUP" ]; then - echo "$MATRIX_TEST_GROUP" - return - fi - - : "${MATRIX_MAX_TEST_GROUPS:=1}" - awk -v min=1 -v max=$MATRIX_MAX_TEST_GROUPS 'BEGIN{srand(); print int(min+rand()*(max-min+1))}' -} - -# Filter matrix file reads in the contents of MATRIX_FILE and filters out -# scenarios that are not in the current test group and/or those that have not -# met minimux or maximum version requirements. -function matrix_filter_file() { - : "${MATRIX_FILE:=""}" - if [ -z "$MATRIX_FILE" ]; then - echo "You must specify the MATRIX_FILE variable for this command" >&2 - exit 1 - fi - - : "${VAULT_MINOR_VERSION:=""}" - if [ -z "$VAULT_MINOR_VERSION" ]; then - echo "You must specify the VAULT_MINOR_VERSION variable for this command" >&2 - exit 1 - fi - - : "${MATRIX_TEST_GROUP:=$(matrix_group_id)}" - - local path - local matrix - path=$(readlink -f $MATRIX_FILE) - matrix=$(cat "$path" | jq ".include | - map(. | - select( - ((.min_minor_version == null) or (.min_minor_version <= $VAULT_MINOR_VERSION)) and - ((.max_minor_version == null) or (.max_minor_version >= $VAULT_MINOR_VERSION)) and - ((.test_group == null) or (.test_group == $MATRIX_TEST_GROUP)) - ) - )" - ) - - echo "{\"include\":$matrix}" | jq -c . +# Package version converts a vault version string into a compatible representation for system +# packages. +function version_package() { + awk '{ gsub("-","~",$1); print $1 }' <<< "$VAULT_VERSION" } # Run the CI Helper @@ -199,12 +156,6 @@ function main() { prepare-legal) prepare_legal ;; - matrix-filter-file) - matrix_filter_file - ;; - matrix-group-id) - matrix_group_id - ;; revision) build_revision ;; diff --git a/sdk/helper/keysutil/policy.go b/sdk/helper/keysutil/policy.go index b4d7204584e9..d63d3b217d63 100644 --- a/sdk/helper/keysutil/policy.go +++ b/sdk/helper/keysutil/policy.go @@ -2013,9 +2013,15 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value encBytes := 32 hmacBytes := 0 - if p.convergentVersion(ver) > 2 { + convergentVersion := p.convergentVersion(ver) + if convergentVersion > 2 { deriveHMAC = true hmacBytes = 32 + if len(nonce) > 0 { + return "", errutil.UserError{Err: "nonce provided when not allowed"} + } + } else if len(nonce) > 0 && (!p.ConvergentEncryption || convergentVersion != 1) { + return "", errutil.UserError{Err: "nonce provided when not allowed"} } if p.Type == KeyType_AES128_GCM96 { encBytes = 16 diff --git a/sdk/helper/pluginutil/run_config.go b/sdk/helper/pluginutil/run_config.go index fddca60508b8..bd231647e3fa 100644 --- a/sdk/helper/pluginutil/run_config.go +++ b/sdk/helper/pluginutil/run_config.go @@ -20,6 +20,15 @@ import ( "github.com/hashicorp/vault/sdk/helper/pluginruntimeutil" ) +const ( + // Labels for plugin container ownership + labelVaultPID = "com.hashicorp.vault.pid" + labelVaultClusterID = "com.hashicorp.vault.cluster.id" + labelVaultPluginName = "com.hashicorp.vault.plugin.name" + labelVaultPluginVersion = "com.hashicorp.vault.plugin.version" + labelVaultPluginType = "com.hashicorp.vault.plugin.type" +) + type PluginClientConfig struct { Name string PluginType consts.PluginType @@ -123,7 +132,10 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error Hash: sha256.New(), } } else { - containerCfg := rc.containerConfig(cmd.Env) + containerCfg, err := rc.containerConfig(ctx, cmd.Env) + if err != nil { + return nil, err + } clientConfig.SkipHostEnv = true clientConfig.RunnerFunc = containerCfg.NewContainerRunner clientConfig.UnixSocketConfig = &plugin.UnixSocketConfig{ @@ -133,7 +145,11 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error return clientConfig, nil } -func (rc runConfig) containerConfig(env []string) *plugincontainer.Config { +func (rc runConfig) containerConfig(ctx context.Context, env []string) (*plugincontainer.Config, error) { + clusterID, err := rc.Wrapper.ClusterID(ctx) + if err != nil { + return nil, err + } cfg := &plugincontainer.Config{ Image: rc.image, Tag: rc.imageTag, @@ -143,9 +159,14 @@ func (rc runConfig) containerConfig(env []string) *plugincontainer.Config { GroupAdd: os.Getgid(), Runtime: consts.DefaultContainerPluginOCIRuntime, Labels: map[string]string{ - "managed-by": "hashicorp.com/vault", + labelVaultPID: strconv.Itoa(os.Getpid()), + labelVaultClusterID: clusterID, + labelVaultPluginName: rc.PluginClientConfig.Name, + labelVaultPluginType: rc.PluginClientConfig.PluginType.String(), + labelVaultPluginVersion: rc.PluginClientConfig.Version, }, } + // Use rc.command and rc.args directly instead of cmd.Path and cmd.Args, as // exec.Command may mutate the provided command. if rc.command != "" { @@ -163,7 +184,7 @@ func (rc runConfig) containerConfig(env []string) *plugincontainer.Config { } } - return cfg + return cfg, nil } func (rc runConfig) run(ctx context.Context) (*plugin.Client, error) { @@ -240,6 +261,11 @@ func (r *PluginRunner) RunConfig(ctx context.Context, opts ...RunOpt) (*plugin.C sha256: r.Sha256, env: r.Env, runtimeConfig: r.RuntimeConfig, + PluginClientConfig: PluginClientConfig{ + Name: r.Name, + PluginType: r.Type, + Version: r.Version, + }, } for _, opt := range opts { diff --git a/sdk/helper/pluginutil/run_config_test.go b/sdk/helper/pluginutil/run_config_test.go index 446940120369..25a950725c20 100644 --- a/sdk/helper/pluginutil/run_config_test.go +++ b/sdk/helper/pluginutil/run_config_test.go @@ -432,11 +432,16 @@ func (m *mockRunnerUtil) MlockEnabled() bool { return args.Bool(0) } +func (m *mockRunnerUtil) ClusterID(ctx context.Context) (string, error) { + return "1234", nil +} + func TestContainerConfig(t *testing.T) { dummySHA, err := hex.DecodeString("abc123") if err != nil { t.Fatal(err) } + myPID := strconv.Itoa(os.Getpid()) for name, tc := range map[string]struct { rc runConfig expected plugincontainer.Config @@ -460,8 +465,11 @@ func TestContainerConfig(t *testing.T) { MagicCookieKey: "magic_cookie_key", MagicCookieValue: "magic_cookie_value", }, - Logger: hclog.NewNullLogger(), - AutoMTLS: true, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + Name: "some-plugin", + PluginType: consts.PluginTypeCredential, + Version: "v0.1.0", }, }, expected: plugincontainer.Config{ @@ -477,7 +485,11 @@ func TestContainerConfig(t *testing.T) { fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), }, Labels: map[string]string{ - "managed-by": "hashicorp.com/vault", + labelVaultPID: myPID, + labelVaultClusterID: "1234", + labelVaultPluginName: "some-plugin", + labelVaultPluginType: "auth", + labelVaultPluginVersion: "v0.1.0", }, Runtime: consts.DefaultContainerPluginOCIRuntime, GroupAdd: os.Getgid(), @@ -505,8 +517,11 @@ func TestContainerConfig(t *testing.T) { MagicCookieKey: "magic_cookie_key", MagicCookieValue: "magic_cookie_value", }, - Logger: hclog.NewNullLogger(), - AutoMTLS: true, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + Name: "some-plugin", + PluginType: consts.PluginTypeCredential, + Version: "v0.1.0", }, }, expected: plugincontainer.Config{ @@ -519,7 +534,11 @@ func TestContainerConfig(t *testing.T) { fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), }, Labels: map[string]string{ - "managed-by": "hashicorp.com/vault", + labelVaultPID: myPID, + labelVaultClusterID: "1234", + labelVaultPluginName: "some-plugin", + labelVaultPluginType: "auth", + labelVaultPluginVersion: "v0.1.0", }, Runtime: "some-oci-runtime", GroupAdd: os.Getgid(), @@ -540,7 +559,8 @@ func TestContainerConfig(t *testing.T) { if err != nil { t.Fatal(err) } - cfg := tc.rc.containerConfig(cmd.Env) + cfg, err := tc.rc.containerConfig(context.Background(), cmd.Env) + require.NoError(t, err) require.Equal(t, tc.expected, *cfg) }) } diff --git a/sdk/helper/pluginutil/runner.go b/sdk/helper/pluginutil/runner.go index 316a16fe3f3f..c627204b1ac3 100644 --- a/sdk/helper/pluginutil/runner.go +++ b/sdk/helper/pluginutil/runner.go @@ -33,6 +33,7 @@ type RunnerUtil interface { ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) MlockEnabled() bool VaultVersion(ctx context.Context) (string, error) + ClusterID(ctx context.Context) (string, error) } // LookRunnerUtil defines the functions for both Looker and Wrapper diff --git a/sdk/plugin/grpc_backend_client.go b/sdk/plugin/grpc_backend_client.go index a343356d19d8..4e92ad13ec58 100644 --- a/sdk/plugin/grpc_backend_client.go +++ b/sdk/plugin/grpc_backend_client.go @@ -25,7 +25,7 @@ var ( ) // Validate backendGRPCPluginClient satisfies the logical.Backend interface -var _ logical.Backend = &backendGRPCPluginClient{} +var _ logical.Backend = (*backendGRPCPluginClient)(nil) // backendPluginClient implements logical.Backend and is the // go-plugin client. @@ -183,17 +183,21 @@ func (b *backendGRPCPluginClient) Cleanup(ctx context.Context) { defer close(quitCh) defer cancel() - b.client.Cleanup(ctx, &pb.Empty{}) - - // This will block until Setup has run the function to create a new server - // in b.server. If we stop here before it has a chance to actually start - // listening, when it starts listening it will immediately error out and - // exit, which is fine. Overall this ensures that we do not miss stopping - // the server if it ends up being created after Cleanup is called. - <-b.cleanupCh + // Only wait on graceful cleanup if we can establish communication with the + // plugin, otherwise b.cleanupCh may never get closed. + if _, err := b.client.Cleanup(ctx, &pb.Empty{}); status.Code(err) != codes.Unavailable { + // This will block until Setup has run the function to create a new server + // in b.server. If we stop here before it has a chance to actually start + // listening, when it starts listening it will immediately error out and + // exit, which is fine. Overall this ensures that we do not miss stopping + // the server if it ends up being created after Cleanup is called. + select { + case <-b.cleanupCh: + } + } server := b.server.Load() - if server != nil { - server.(*grpc.Server).GracefulStop() + if grpcServer, ok := server.(*grpc.Server); ok && grpcServer != nil { + grpcServer.GracefulStop() } } diff --git a/ui/app/adapters/auth-method.js b/ui/app/adapters/auth-method.js index c23a1ad46933..01d74651741f 100644 --- a/ui/app/adapters/auth-method.js +++ b/ui/app/adapters/auth-method.js @@ -68,6 +68,12 @@ export default ApplicationAdapter.extend({ return this.ajax(`/v1/auth/${encodePath(path)}/oidc/callback`, 'GET', { data: { state, code } }); }, + pollSAMLToken(path, token_poll_id, client_verifier) { + return this.ajax(`/v1/auth/${encodePath(path)}/token`, 'PUT', { + data: { token_poll_id, client_verifier }, + }); + }, + tune(path, data) { const url = `${this.buildURL()}/${this.pathForType()}/${encodePath(path)}tune`; return this.ajax(url, 'POST', { data }); diff --git a/ui/app/adapters/role-saml.js b/ui/app/adapters/role-saml.js new file mode 100644 index 000000000000..052f2024f689 --- /dev/null +++ b/ui/app/adapters/role-saml.js @@ -0,0 +1,66 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import ApplicationAdapter from './application'; +import { inject as service } from '@ember/service'; +import { encodePath } from 'vault/utils/path-encoding-helpers'; +import { v4 as uuidv4 } from 'uuid'; + +export default ApplicationAdapter.extend({ + router: service(), + + // generateClientChallenge generates a client challenge from a verifier. + // The client challenge is the base64(sha256(verifier)). The verifier is + // later presented to the server to obtain the resulting Vault token. + async generateClientChallenge(verifier) { + const encoder = new TextEncoder(); + const data = encoder.encode(verifier); + const hashBuffer = await crypto.subtle.digest('SHA-256', data); + const hashArray = new Uint8Array(hashBuffer); + return btoa(String.fromCharCode.apply(null, hashArray)); + }, + + async findRecord(store, type, id, snapshot) { + let [path, role] = JSON.parse(id); + path = preparePathSegment(path); + + // Create the ACS URL based on the cluster the UI is targeting + let acs_url = `${window.location.origin}/v1/`; + let namespace = snapshot?.adapterOptions.namespace; + if (namespace) { + namespace = preparePathSegment(namespace); + acs_url = acs_url.concat(namespace, '/'); + } + acs_url = acs_url.concat('auth/', path, '/callback'); + + // Create the client verifier and challenge + const verifier = uuidv4(); + const challenge = await this.generateClientChallenge(verifier); + // Kick off the authentication flow by generating the SSO service URL + // It requires the client challenge generated from the verifier. We'll + // later provide the verifier to match up with the challenge on the server + // when we poll for the Vault token by its returned token poll ID. + const response = await this.ajax(`/v1/auth/${path}/sso_service_url`, 'PUT', { + data: { + acs_url, + role, + client_challenge: challenge, + client_type: 'browser', + }, + }); + return { + ...response.data, + client_verifier: verifier, + }; + }, +}); + +// preparePathSegment prepares the given segment for being included in a URL +// path by trimming leading and trailing forward slashes and URL encoding. +function preparePathSegment(segment) { + segment = segment.replace(/^\//, ''); + segment = segment.replace(/\/$/, ''); + return encodePath(segment); +} diff --git a/ui/app/components/auth-form.js b/ui/app/components/auth-form.js index e0ca05e7c672..4f44fdc6d353 100644 --- a/ui/app/components/auth-form.js +++ b/ui/app/components/auth-form.js @@ -10,13 +10,11 @@ import { match, alias, or } from '@ember/object/computed'; import { dasherize } from '@ember/string'; import Component from '@ember/component'; import { computed } from '@ember/object'; -import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends'; +import { allSupportedAuthBackends, supportedAuthBackends } from 'vault/helpers/supported-auth-backends'; import { task, timeout } from 'ember-concurrency'; import { waitFor } from '@ember/test-waiters'; import { v4 as uuidv4 } from 'uuid'; -const BACKENDS = supportedAuthBackends(); - /** * @module AuthForm * The `AuthForm` is used to sign users into Vault. @@ -49,6 +47,7 @@ export default Component.extend(DEFAULTS, { flashMessages: service(), store: service(), csp: service('csp-event'), + version: service(), // passed in via a query param selectedAuth: null, @@ -58,11 +57,14 @@ export default Component.extend(DEFAULTS, { wrappedToken: null, // internal oldNamespace: null, - authMethods: BACKENDS, // number answer for okta number challenge if applicable oktaNumberChallengeAnswer: null, + authMethods: computed('version.isEnterprise', function () { + return this.version.isEnterprise ? allSupportedAuthBackends() : supportedAuthBackends(); + }), + didReceiveAttrs() { this._super(...arguments); const { @@ -139,7 +141,7 @@ export default Component.extend(DEFAULTS, { if (keyIsPath && !type) { return methods.findBy('path', selected); } - return BACKENDS.findBy('type', selected); + return this.authMethods.findBy('type', selected); }, selectedAuthIsPath: match('selectedAuth', /\/$/), @@ -168,21 +170,21 @@ export default Component.extend(DEFAULTS, { cspErrorText: `This is a standby Vault node but can't communicate with the active node via request forwarding. Sign in at the active node to use the Vault UI.`, - allSupportedMethods: computed('methodsToShow', 'hasMethodsWithPath', function () { + allSupportedMethods: computed('methodsToShow', 'hasMethodsWithPath', 'authMethods', function () { const hasMethodsWithPath = this.hasMethodsWithPath; const methodsToShow = this.methodsToShow; - return hasMethodsWithPath ? methodsToShow.concat(BACKENDS) : methodsToShow; + return hasMethodsWithPath ? methodsToShow.concat(this.authMethods) : methodsToShow; }), hasMethodsWithPath: computed('methodsToShow', function () { return this.methodsToShow.isAny('path'); }), - methodsToShow: computed('methods', function () { + methodsToShow: computed('methods', 'authMethods', function () { const methods = this.methods || []; const shownMethods = methods.filter((m) => - BACKENDS.find((b) => b.type.toLowerCase() === m.type.toLowerCase()) + this.authMethods.find((b) => b.type.toLowerCase() === m.type.toLowerCase()) ); - return shownMethods.length ? shownMethods : BACKENDS; + return shownMethods.length ? shownMethods : this.authMethods; }), unwrapToken: task( @@ -299,9 +301,9 @@ export default Component.extend(DEFAULTS, { this.set('token', token); } this.set('error', null); - // if callback from oidc or jwt we have a token at this point + // if callback from oidc, jwt, or saml we have a token at this point const backend = token ? this.getAuthBackend('token') : this.selectedAuthBackend || {}; - const backendMeta = BACKENDS.find( + const backendMeta = this.authMethods.find( (b) => (b.type || '').toLowerCase() === (backend.type || '').toLowerCase() ); const attributes = (backendMeta || {}).formAttributes || []; diff --git a/ui/app/components/auth-saml.js b/ui/app/components/auth-saml.js new file mode 100644 index 000000000000..9b5814e80046 --- /dev/null +++ b/ui/app/components/auth-saml.js @@ -0,0 +1,162 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { inject as service } from '@ember/service'; +import Component from './outer-html'; +import { task, timeout, waitForEvent } from 'ember-concurrency'; +import { computed } from '@ember/object'; +import errorMessage from 'vault/utils/error-message'; + +const WAIT_TIME = 500; +const ERROR_WINDOW_CLOSED = + 'The provider window was closed before authentication was complete. Your web browser may have blocked or closed a pop-up window. Please check your settings and click Sign In to try again.'; +const ERROR_TIMEOUT = 'The authentication request has timed out. Please click Sign In to try again.'; +const ERROR_MISSING_PARAMS = + 'The callback from the provider did not supply all of the required parameters. Please click Sign In to try again. If the problem persists, you may want to contact your administrator.'; +export { ERROR_WINDOW_CLOSED, ERROR_MISSING_PARAMS }; + +export default Component.extend({ + store: service(), + featureFlagService: service('featureFlag'), + + selectedAuthPath: null, + selectedAuthType: null, + roleName: null, + errorMessage: null, + onRoleName() {}, + onLoading() {}, + onError() {}, + onNamespace() {}, + + didReceiveAttrs() { + this._super(); + this.set('errorMessage', null); + }, + + getWindow() { + return this.window || window; + }, + + canLoginSaml: computed('getWindow', function () { + return this.getWindow().isSecureContext; + }), + + async fetchRole(roleName) { + const path = this.selectedAuthPath || this.selectedAuthType; + const id = JSON.stringify([path, roleName]); + return this.store.findRecord('role-saml', id, { + adapterOptions: { namespace: this.namespace }, + }); + }, + + cancelLogin(samlWindow, errorMessage) { + this.closeWindow(samlWindow); + this.handleSAMLError(errorMessage); + this.exchangeSAMLTokenPollID.cancelAll(); + }, + + closeWindow(samlWindow) { + this.watchPopup.cancelAll(); + this.watchCurrent.cancelAll(); + samlWindow.close(); + }, + + handleSAMLError(err) { + this.onLoading(false); + this.onError(err); + }, + + watchPopup: task(function* (samlWindow) { + while (true) { + yield timeout(WAIT_TIME); + if (!samlWindow || samlWindow.closed) { + this.exchangeSAMLTokenPollID.cancelAll(); + return this.handleSAMLError(ERROR_WINDOW_CLOSED); + } + } + }), + + watchCurrent: task(function* (samlWindow) { + // when user is about to change pages, close the popup window + yield waitForEvent(this.getWindow(), 'beforeunload'); + samlWindow?.close(); + }), + + exchangeSAMLTokenPollID: task(function* (samlWindow, role) { + this.onLoading(true); + + // start watching the popup window and the current one + this.watchPopup.perform(samlWindow); + this.watchCurrent.perform(samlWindow); + + const path = this.selectedAuthPath || this.selectedAuthType; + const adapter = this.store.adapterFor('auth-method'); + this.onNamespace(this.namespace); + + // Wait up to 3 minutes for the token to become available + let resp; + for (let i = 0; i < 180; i++) { + yield timeout(WAIT_TIME); + try { + resp = yield adapter.pollSAMLToken(path, role.tokenPollID, role.clientVerifier); + if (!resp?.auth) { + continue; + } + // We've obtained the Vault token for the authentication flow, now log in. + yield this.onSubmit(null, null, resp.auth.client_token); + this.closeWindow(samlWindow); + return; + } catch (e) { + if (e.httpStatus === 401) { + // Continue to retry on 401 Unauthorized + continue; + } + return this.cancelLogin(samlWindow, errorMessage(e)); + } + } + this.cancelLogin(samlWindow, ERROR_TIMEOUT); + }), + + actions: { + setRole(roleName) { + this.onRoleName(roleName); + }, + /* Saml auth flow on login button click: + * 1. find role-saml record which returns role info + * 2. open popup at url defined returned from role + * 3. watch popup window for close (and cancel polling if it closes) + * 4. poll vault for 200 token response + * 5. close popup, stop polling, and trigger onSubmit with token data + */ + async startSAMLAuth(callback, data, e) { + this.onError(null); + this.onLoading(true); + if (e && e.preventDefault) { + e.preventDefault(); + } + const roleName = data.role; + let role; + try { + role = await this.fetchRole(roleName); + } catch (error) { + this.handleSAMLError(error); + return; + } + + const win = this.getWindow(); + const POPUP_WIDTH = 500; + const POPUP_HEIGHT = 600; + const left = win.screen.width / 2 - POPUP_WIDTH / 2; + const top = win.screen.height / 2 - POPUP_HEIGHT / 2; + const samlWindow = win.open( + role.ssoServiceURL, + 'vaultSAMLWindow', + `width=${POPUP_WIDTH},height=${POPUP_HEIGHT},resizable,scrollbars=yes,top=${top},left=${left}` + ); + + this.exchangeSAMLTokenPollID.perform(samlWindow, role); + }, + }, +}); diff --git a/ui/app/components/mount-backend/type-form.js b/ui/app/components/mount-backend/type-form.js index cdec52a095a3..f6cf98fda643 100644 --- a/ui/app/components/mount-backend/type-form.js +++ b/ui/app/components/mount-backend/type-form.js @@ -5,7 +5,7 @@ import Component from '@glimmer/component'; import { inject as service } from '@ember/service'; -import { methods } from 'vault/helpers/mountable-auth-methods'; +import { allMethods, methods } from 'vault/helpers/mountable-auth-methods'; import { allEngines, mountableEngines } from 'vault/helpers/mountable-secret-engines'; import { tracked } from '@glimmer/tracking'; @@ -31,7 +31,11 @@ export default class MountBackendTypeForm extends Component { return this.version.isEnterprise ? allEngines() : mountableEngines(); } + get authMethods() { + return this.version.isEnterprise ? allMethods() : methods(); + } + get mountTypes() { - return this.args.mountType === 'secret' ? this.secretEngines : methods(); + return this.args.mountType === 'secret' ? this.secretEngines : this.authMethods; } } diff --git a/ui/app/helpers/mountable-auth-methods.js b/ui/app/helpers/mountable-auth-methods.js index bdb865f8a17d..303e9baff4d0 100644 --- a/ui/app/helpers/mountable-auth-methods.js +++ b/ui/app/helpers/mountable-auth-methods.js @@ -5,6 +5,15 @@ import { helper as buildHelper } from '@ember/component/helper'; +const ENTERPRISE_AUTH_METHODS = [ + { + displayName: 'SAML', + value: 'saml', + type: 'saml', + category: 'generic', + }, +]; + const MOUNTABLE_AUTH_METHODS = [ { displayName: 'AliCloud', @@ -106,4 +115,8 @@ export function methods() { return MOUNTABLE_AUTH_METHODS.slice(); } +export function allMethods() { + return [...MOUNTABLE_AUTH_METHODS, ...ENTERPRISE_AUTH_METHODS]; +} + export default buildHelper(methods); diff --git a/ui/app/helpers/supported-auth-backends.js b/ui/app/helpers/supported-auth-backends.js index 10361bc839ae..e06cbd3387b3 100644 --- a/ui/app/helpers/supported-auth-backends.js +++ b/ui/app/helpers/supported-auth-backends.js @@ -72,8 +72,23 @@ const SUPPORTED_AUTH_BACKENDS = [ }, ]; +const ENTERPRISE_AUTH_METHODS = [ + { + type: 'saml', + typeDisplay: 'SAML', + description: 'Authenticate using SAML provider.', + tokenPath: 'client_token', + displayNamePath: 'display_name', + formAttributes: ['role'], + }, +]; + export function supportedAuthBackends() { return SUPPORTED_AUTH_BACKENDS; } +export function allSupportedAuthBackends() { + return [...SUPPORTED_AUTH_BACKENDS, ...ENTERPRISE_AUTH_METHODS]; +} + export default buildHelper(supportedAuthBackends); diff --git a/ui/app/models/namespace.js b/ui/app/models/namespace.js index 0cfc5949d8bd..5a2b2174b6f8 100644 --- a/ui/app/models/namespace.js +++ b/ui/app/models/namespace.js @@ -5,19 +5,23 @@ import Model, { attr } from '@ember-data/model'; import { withExpandedAttributes } from 'vault/decorators/model-expanded-attributes'; +import { withModelValidations } from 'vault/decorators/model-validations'; @withExpandedAttributes() +@withModelValidations({ + path: [ + { type: 'presence', message: `Path can't be blank.` }, + { type: 'endsInSlash', message: `Path can't end in forward slash '/'.` }, + { + type: 'containsWhiteSpace', + message: "Path can't contain whitespace.", + }, + ], +}) export default class NamespaceModel extends Model { - @attr('string', { - validationAttr: 'pathIsValid', - invalidMessage: 'You have entered and invalid path please only include letters, numbers, -, ., and _.', - }) + @attr('string') path; - get pathIsValid() { - return this.path && this.path.match(/^[\w\d-.]+$/g); - } - get fields() { return ['path'].map((f) => this.allByKey[f]); } diff --git a/ui/app/models/role-saml.js b/ui/app/models/role-saml.js new file mode 100644 index 000000000000..430845b094a2 --- /dev/null +++ b/ui/app/models/role-saml.js @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import Model, { attr } from '@ember-data/model'; + +export default class RoleSamlModel extends Model { + @attr('string') ssoServiceURL; + @attr('string') tokenPollID; + @attr('string') clientVerifier; +} diff --git a/ui/app/services/auth.js b/ui/app/services/auth.js index a66ccfdc1f13..d44b41bfb756 100644 --- a/ui/app/services/auth.js +++ b/ui/app/services/auth.js @@ -17,12 +17,12 @@ import { resolve, reject } from 'rsvp'; import getStorage from 'vault/lib/token-storage'; import ENV from 'vault/config/environment'; -import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends'; +import { allSupportedAuthBackends } from 'vault/helpers/supported-auth-backends'; const TOKEN_SEPARATOR = '☃'; const TOKEN_PREFIX = 'vault-'; const ROOT_PREFIX = '_root_'; -const BACKENDS = supportedAuthBackends(); +const BACKENDS = allSupportedAuthBackends(); export { TOKEN_SEPARATOR, TOKEN_PREFIX, ROOT_PREFIX }; diff --git a/ui/app/styles/components/code-snippet.scss b/ui/app/styles/components/code-snippet.scss deleted file mode 100644 index 05c4e85c37ea..000000000000 --- a/ui/app/styles/components/code-snippet.scss +++ /dev/null @@ -1,20 +0,0 @@ -.code-snippet-container { - background-color: $ui-gray-900; - display: flex; - justify-content: space-between; - border-radius: $radius-large; - - code { - white-space: pre; - } -} - -.code-snippet-copy-button { - cursor: pointer; - color: $ui-gray-300; - background: none; - border: none; - box-shadow: none; - min-width: auto; - padding: $size-11 $size-8; -} diff --git a/ui/app/styles/core.scss b/ui/app/styles/core.scss index 499be7a9ee67..19653c83dc26 100644 --- a/ui/app/styles/core.scss +++ b/ui/app/styles/core.scss @@ -60,7 +60,6 @@ @import './components/box-radio'; @import './components/calendar-widget'; @import './components/codemirror'; -@import './components/code-snippet'; @import './components/confirm'; @import './components/console-ui-panel'; @import './components/control-group'; diff --git a/ui/app/templates/components/auth-form.hbs b/ui/app/templates/components/auth-form.hbs index f93b524dc239..cb5633580414 100644 --- a/ui/app/templates/components/auth-form.hbs +++ b/ui/app/templates/components/auth-form.hbs @@ -52,12 +52,14 @@ {{/if}}
-
-

{{this.selectedAuthBackend.path}}

- - {{this.selectedAuthBackend.mountDescription}} - -
+ {{#if this.selectedAuthBackend.path}} +
+

{{this.selectedAuthBackend.path}}

+ + {{this.selectedAuthBackend.mountDescription}} + +
+ {{/if}} {{#if (or (not this.hasMethodsWithPath) (not this.selectedAuthIsPath))}} +
+ + +
+ {{yield}} +
+ + +{{else}} + + Nonsecure context detected + + Logging in with a SAML auth method requires a browser in a secure context. + + + + Read more about secure contexts. + + + + +{{/if}} \ No newline at end of file diff --git a/ui/lib/core/addon/components/code-snippet.hbs b/ui/lib/core/addon/components/code-snippet.hbs index ee846af401e7..e94cacada73e 100644 --- a/ui/lib/core/addon/components/code-snippet.hbs +++ b/ui/lib/core/addon/components/code-snippet.hbs @@ -3,9 +3,11 @@ SPDX-License-Identifier: BUSL-1.1 ~}} -
- - {{@codeBlock}} - - +
+ {{@codeBlock}} +
\ No newline at end of file diff --git a/ui/lib/core/addon/components/edit-form.js b/ui/lib/core/addon/components/edit-form.js index e71c09f7d84d..12f8b8205d82 100644 --- a/ui/lib/core/addon/components/edit-form.js +++ b/ui/lib/core/addon/components/edit-form.js @@ -15,6 +15,9 @@ export default Component.extend({ layout, flashMessages: service(), + // internal validations + invalidFormAlert: '', + modelValidations: null, // public API model: null, successMessage: 'Saved!', @@ -38,10 +41,25 @@ export default Component.extend({ // is the case, set this value to true callOnSaveAfterRender: false, + checkModelValidity(model) { + if (model.validate) { + const { isValid, state, invalidFormMessage } = model.validate(); + this.set('modelValidations', state); + this.set('invalidFormAlert', invalidFormMessage); + return isValid; + } + // no validations on model; return true + return true; + }, + save: task( waitFor(function* (model, options = { method: 'save' }) { const { method } = options; const messageKey = method === 'save' ? 'successMessage' : 'deleteSuccessMessage'; + if (method === 'save' && !this.checkModelValidity(model)) { + // if saving and model invalid, don't continue + return; + } try { yield model[method](); } catch (err) { diff --git a/ui/lib/core/addon/components/form-field.hbs b/ui/lib/core/addon/components/form-field.hbs index 7c9e4b5af098..31be930f44af 100644 --- a/ui/lib/core/addon/components/form-field.hbs +++ b/ui/lib/core/addon/components/form-field.hbs @@ -281,12 +281,6 @@ class="input {{if this.validationError 'has-error-border'}}" maxLength={{@attr.options.characterLimit}} /> - {{! TODO: explore removing in favor of new model validations pattern since it is only used on the namespace model }} - {{#if @attr.options.validationAttr}} - {{#if (and (get @model this.valuePath) (not (get @model @attr.options.validationAttr)))}} - - {{/if}} - {{/if}} {{/if}}
{{else if (or (eq @attr.type "boolean") (eq @attr.options.editType "boolean"))}} diff --git a/ui/lib/core/addon/templates/components/form-save-buttons.hbs b/ui/lib/core/addon/components/form-save-buttons.hbs similarity index 77% rename from ui/lib/core/addon/templates/components/form-save-buttons.hbs rename to ui/lib/core/addon/components/form-save-buttons.hbs index cf5586457958..2f6194d7a95c 100644 --- a/ui/lib/core/addon/templates/components/form-save-buttons.hbs +++ b/ui/lib/core/addon/components/form-save-buttons.hbs @@ -3,7 +3,7 @@ SPDX-License-Identifier: BUSL-1.1 ~}} -
+
{{/if}}
+ {{yield to="error"}}
\ No newline at end of file diff --git a/ui/lib/core/addon/components/form-save-buttons.js b/ui/lib/core/addon/components/form-save-buttons.js index 24cffe4ea1c9..a5029c1f6dff 100644 --- a/ui/lib/core/addon/components/form-save-buttons.js +++ b/ui/lib/core/addon/components/form-save-buttons.js @@ -3,18 +3,19 @@ * SPDX-License-Identifier: BUSL-1.1 */ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import layout from '../templates/components/form-save-buttons'; +import Component from '@glimmer/component'; /** * @module FormSaveButtons * `FormSaveButtons` displays a button save and a cancel button at the bottom of a form. + * To show an overall inline error message, use the :error yielded block like shown below. * * @example * ```js * + * "foo.route"}}> + * <:error>This is an error + * * ``` * * @param [saveButtonText="Save" {String}] - The text that will be rendered on the Save button. @@ -26,13 +27,11 @@ import layout from '../templates/components/form-save-buttons'; * */ -export default Component.extend({ - layout, - tagName: '', - - cancelLink: computed('cancelLinkParams.[]', function () { - if (!Array.isArray(this.cancelLinkParams) || !this.cancelLinkParams.length) return; - const [route, ...models] = this.cancelLinkParams; +export default class FormSaveButtons extends Component { + get cancelLink() { + const { cancelLinkParams } = this.args; + if (!Array.isArray(cancelLinkParams) || !cancelLinkParams.length) return null; + const [route, ...models] = cancelLinkParams; return { route, models }; - }), -}); + } +} diff --git a/ui/lib/core/addon/templates/components/edit-form.hbs b/ui/lib/core/addon/templates/components/edit-form.hbs index 11248b5479b0..02f0b12a0e73 100644 --- a/ui/lib/core/addon/templates/components/edit-form.hbs +++ b/ui/lib/core/addon/templates/components/edit-form.hbs @@ -9,17 +9,29 @@ {{#if (or this.model.fields this.model.attrs)}} {{#each (or this.model.fields this.model.attrs) as |attr|}} - + {{/each}} {{else if this.model.fieldGroups}} - + {{/if}}
+ > + <:error> + {{#if this.invalidFormAlert}} + + {{/if}} + + \ No newline at end of file diff --git a/ui/lib/kubernetes/addon/components/page/configuration.hbs b/ui/lib/kubernetes/addon/components/page/configuration.hbs index e78f1d295848..bede4356fcd3 100644 --- a/ui/lib/kubernetes/addon/components/page/configuration.hbs +++ b/ui/lib/kubernetes/addon/components/page/configuration.hbs @@ -28,4 +28,6 @@ {{/if}} {{else}} -{{/if}} \ No newline at end of file +{{/if}} + + \ No newline at end of file diff --git a/ui/lib/kv/addon/components/page/secret/paths.hbs b/ui/lib/kv/addon/components/page/secret/paths.hbs index 24af98b74488..f10b72f867d2 100644 --- a/ui/lib/kv/addon/components/page/secret/paths.hbs +++ b/ui/lib/kv/addon/components/page/secret/paths.hbs @@ -40,7 +40,6 @@ learn more.

- {{! HDS Adoption: Replace with Hds::Copy::Snippet }}

@@ -53,6 +52,5 @@ learn more.

- {{! HDS Adoption: Replace with Hds::Copy::Snippet }} \ No newline at end of file diff --git a/ui/lib/ldap/addon/components/page/library/details/accounts.hbs b/ui/lib/ldap/addon/components/page/library/details/accounts.hbs index 99f46bd0e2ac..bf5366d29e11 100644 --- a/ui/lib/ldap/addon/components/page/library/details/accounts.hbs +++ b/ui/lib/ldap/addon/components/page/library/details/accounts.hbs @@ -1,39 +1,41 @@ -
- -
-

All accounts

- {{#if @library.canCheckOut}} - - {{/if}} -
+
+
+ +
+

All accounts

+ {{#if @library.canCheckOut}} + + {{/if}} +
-

The accounts within this library

-
+

The accounts within this library

+
- - <:body as |Body|> - - {{Body.data.account}} - - - - - - -
+ + <:body as |Body|> + + {{Body.data.account}} + + + + + + + +
-
+
-
- {{! HDS Adoption: Replace with Hds::Copy::Snippet }} - {{this.cliCommand}} - -
+
diff --git a/ui/lib/ldap/addon/components/page/overview.hbs b/ui/lib/ldap/addon/components/page/overview.hbs index d08bd127438f..c719e2ec739a 100644 --- a/ui/lib/ldap/addon/components/page/overview.hbs +++ b/ui/lib/ldap/addon/components/page/overview.hbs @@ -33,7 +33,7 @@

-
+
- -
+
fakeWindow.create()); + this.server.put('/auth/saml/sso_service_url', () => ({ + data: { + sso_service_url: 'http://sso-url.hashicorp.com/service', + token_poll_id: '1234', + }, + })); + this.server.put('/auth/saml/token', () => ({ + auth: { client_token: 'root' }, + })); + // ensure clean state + localStorage.removeItem('selectedAuth'); + authPage.logout(); + }); + + hooks.afterEach(function () { + this.openStub.restore(); + }); + + test('it should login with saml when selected from auth methods dropdown', async function (assert) { + assert.expect(1); + + this.server.get('/auth/token/lookup-self', (schema, req) => { + assert.ok(true, 'request made to auth/token/lookup-self after saml callback'); + req.passthrough(); + }); + // select from dropdown or click auth path tab + await waitUntil(() => find('[data-test-select="auth-method"]')); + await fillIn('[data-test-select="auth-method"]', 'saml'); + await click('[data-test-auth-submit]'); + }); + + test('it should login with saml from listed auth mount tab', async function (assert) { + assert.expect(4); + + this.server.get('/sys/internal/ui/mounts', () => ({ + data: { + auth: { + 'test-path/': { description: '', options: {}, type: 'saml' }, + }, + }, + })); + this.server.put('/auth/test-path/sso_service_url', () => { + assert.ok(true, 'role request made to correct non-standard mount path'); + return { + data: { + sso_service_url: 'http://sso-url.hashicorp.com/service', + token_poll_id: '1234', + }, + }; + }); + this.server.put('/auth/test-path/token', () => { + assert.ok(true, 'login request made to correct non-standard mount path'); + return { + auth: { client_token: 'root' }, + }; + }); + this.server.get('/auth/token/lookup-self', (schema, req) => { + assert.ok(true, 'request made to auth/token/lookup-self after oidc callback'); + assert.deepEqual( + req.requestHeaders, + { 'X-Vault-Token': 'root' }, + 'calls lookup-self with returned client token after login' + ); + req.passthrough(); + }); + + // click auth path tab + await waitUntil(() => find('[data-test-auth-method="test-path"]')); + await click('[data-test-auth-method="test-path"]'); + await click('[data-test-auth-submit]'); + }); + + test('it should render API errors from both endpoints', async function (assert) { + assert.expect(3); + + this.server.put('/auth/saml/sso_service_url', (schema, { requestBody }) => { + const { role } = JSON.parse(requestBody); + if (!role) { + return new Response( + 400, + { 'Content-Type': 'application/json' }, + JSON.stringify({ errors: ["missing required 'role' parameter"] }) + ); + } + return { + data: { + sso_service_url: 'http://sso-url.hashicorp.com/service', + token_poll_id: '1234', + }, + }; + }); + this.server.put('/auth/saml/token', (schema, { requestHeaders }) => { + if (requestHeaders['X-Vault-Namespace']) { + return new Response( + 400, + { 'Content-Type': 'application/json' }, + JSON.stringify({ errors: ['something went wrong'] }) + ); + } + return { + auth: { client_token: 'root' }, + }; + }); + this.server.get('/auth/token/lookup-self', (schema, req) => { + assert.ok(true, 'request made to auth/token/lookup-self after saml callback'); + req.passthrough(); + }); + + // select saml auth type + await waitUntil(() => find('[data-test-select="auth-method"]')); + await fillIn('[data-test-select="auth-method"]', 'saml'); + await fillIn('[data-test-auth-form-ns-input]', 'some-ns'); + await click('[data-test-auth-submit]'); + assert + .dom('[data-test-message-error-description]') + .hasText("missing required 'role' parameter", 'shows API error from role fetch'); + + await fillIn('[data-test-role]', 'my-role'); + await click('[data-test-auth-submit]'); + assert + .dom('[data-test-message-error-description]') + .hasText('something went wrong', 'shows API error from login attempt'); + + await fillIn('[data-test-auth-form-ns-input]', ''); + await click('[data-test-auth-submit]'); + }); + + test('it should populate saml auth method on logout', async function (assert) { + authPage.logout(); + // select from dropdown + await waitUntil(() => find('[data-test-select="auth-method"]')); + await fillIn('[data-test-select="auth-method"]', 'saml'); + await click('[data-test-auth-submit]'); + await waitUntil(() => find('[data-test-user-menu-trigger]')); + await click('[data-test-user-menu-trigger]'); + await click('#logout'); + assert + .dom('[data-test-select="auth-method"]') + .hasValue('saml', 'Previous auth method selected on logout'); + }); +}); diff --git a/ui/tests/integration/components/kubernetes/page/configuration-test.js b/ui/tests/integration/components/kubernetes/page/configuration-test.js index 99cf070c7fa5..19129d5b3447 100644 --- a/ui/tests/integration/components/kubernetes/page/configuration-test.js +++ b/ui/tests/integration/components/kubernetes/page/configuration-test.js @@ -56,7 +56,7 @@ module('Integration | Component | kubernetes | Page::Configuration', function (h }; }); - test('it should render tab page header and config cta', async function (assert) { + test('it should render tab page header, config cta and mount config', async function (assert) { await this.renderComponent(); assert.dom('.title svg').hasClass('flight-icon-kubernetes', 'Kubernetes icon renders in title'); assert.dom('.title').hasText('kubernetes-test', 'Mount path renders in title'); @@ -64,6 +64,7 @@ module('Integration | Component | kubernetes | Page::Configuration', function (h .dom('[data-test-toolbar-config-action]') .hasText('Configure Kubernetes', 'Toolbar action has correct text'); assert.dom('[data-test-config-cta]').exists('Config cta renders'); + assert.dom('[data-test-mount-config]').exists('Mount config renders'); }); test('it should render message for inferred configuration', async function (assert) { diff --git a/ui/tests/integration/components/ldap/page/library/details/accounts-test.js b/ui/tests/integration/components/ldap/page/library/details/accounts-test.js index eb7670bc9823..add8a8a69113 100644 --- a/ui/tests/integration/components/ldap/page/library/details/accounts-test.js +++ b/ui/tests/integration/components/ldap/page/library/details/accounts-test.js @@ -77,8 +77,7 @@ module('Integration | Component | ldap | Page::Library::Details::Accounts', func assert.dom('[data-test-checked-out-card]').exists('Accounts checked out card renders'); assert - .dom('[data-test-cli-command]') + .dom('[data-test-code-snippet] code') .hasText('vault lease renew ad/library/test-library/check-out/:lease_id', 'Renew cli command renders'); - assert.dom(`[data-test-cli-command-copy]`).exists('Renew cli command copy button renders'); }); }); diff --git a/ui/tests/integration/components/mount-backend/type-form-test.js b/ui/tests/integration/components/mount-backend/type-form-test.js index 7f89ba8dccc6..14c249194180 100644 --- a/ui/tests/integration/components/mount-backend/type-form-test.js +++ b/ui/tests/integration/components/mount-backend/type-form-test.js @@ -9,11 +9,12 @@ import { click, render } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; import sinon from 'sinon'; import { allEngines, mountableEngines } from 'vault/helpers/mountable-secret-engines'; -import { methods } from 'vault/helpers/mountable-auth-methods'; +import { allMethods, methods } from 'vault/helpers/mountable-auth-methods'; const secretTypes = mountableEngines().map((engine) => engine.type); const allSecretTypes = allEngines().map((engine) => engine.type); const authTypes = methods().map((auth) => auth.type); +const allAuthTypes = allMethods().map((auth) => auth.type); module('Integration | Component | mount-backend/type-form', function (hooks) { setupRenderingTest(hooks); @@ -70,5 +71,12 @@ module('Integration | Component | mount-backend/type-form', function (hooks) { .dom('[data-test-mount-type]') .exists({ count: allSecretTypes.length }, 'Renders all secret engines'); }); + + test('it renders correct items for enterprise auth methods', async function (assert) { + await render(hbs``); + assert + .dom('[data-test-mount-type]') + .exists({ count: allAuthTypes.length }, 'Renders all secret engines'); + }); }); }); diff --git a/vault/external_plugin_container_test.go b/vault/external_plugin_container_test.go index 2e99923b4821..8133c13563e1 100644 --- a/vault/external_plugin_container_test.go +++ b/vault/external_plugin_container_test.go @@ -7,6 +7,7 @@ import ( "context" "encoding/hex" "fmt" + "os/exec" "strings" "testing" @@ -56,6 +57,9 @@ func TestExternalPluginInContainer_MountAndUnmount(t *testing.T) { c, plugin := testClusterWithContainerPlugin(t, tc.pluginType, "v1.0.0") t.Run("default", func(t *testing.T) { + if _, err := exec.LookPath("runsc"); err != nil { + t.Skip("Skipping test as runsc not found on path") + } mountAndUnmountContainerPlugin_WithRuntime(t, c, plugin, "") }) @@ -64,6 +68,9 @@ func TestExternalPluginInContainer_MountAndUnmount(t *testing.T) { }) t.Run("runsc", func(t *testing.T) { + if _, err := exec.LookPath("runsc"); err != nil { + t.Skip("Skipping test as runsc not found on path") + } mountAndUnmountContainerPlugin_WithRuntime(t, c, plugin, "runsc") }) }) @@ -119,6 +126,9 @@ func TestExternalPluginInContainer_GetBackendTypeVersion(t *testing.T) { c, plugin := testClusterWithContainerPlugin(t, tc.pluginType, tc.setRunningVersion) for _, ociRuntime := range []string{"runc", "runsc"} { t.Run(ociRuntime, func(t *testing.T) { + if _, err := exec.LookPath(ociRuntime); err != nil { + t.Skipf("Skipping test as %s not found on path", ociRuntime) + } shaBytes, _ := hex.DecodeString(plugin.ImageSha256) entry := &pluginutil.PluginRunner{ Name: plugin.Name, diff --git a/vault/logical_system.go b/vault/logical_system.go index a2fea6c95d83..95ab7edd2459 100644 --- a/vault/logical_system.go +++ b/vault/logical_system.go @@ -5051,6 +5051,7 @@ func (c *Core) GetSealBackendStatus(ctx context.Context) (*SealBackendStatusResp Healthy: true, }, } + r.Healthy = true } return &r, nil } @@ -6191,15 +6192,15 @@ This path responds to the following HTTP methods. "", }, "plugin-runtime-catalog_cgroup-parent": { - "Optional parent cgroup for the container", + "Parent cgroup to set for each container. This can be used to control the total resource usage for a group of plugins.", "", }, "plugin-runtime-catalog_cpu-nanos": { - "The limit of runtime CPU in nanos", + "CPU limit to set per container in nanos. Defaults to no limit.", "", }, "plugin-runtime-catalog_memory-bytes": { - "The limit of runtime memory in bytes", + "Memory limit to set per container in bytes. Defaults to no limit.", "", }, "leases": { diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go index ae76b13ac1c5..55ae807f9862 100644 --- a/vault/logical_system_test.go +++ b/vault/logical_system_test.go @@ -7,6 +7,7 @@ import ( "context" "encoding/base64" "encoding/hex" + "errors" "fmt" "io/ioutil" "net/http" @@ -21,6 +22,8 @@ import ( "github.com/fatih/structs" "github.com/go-test/deep" "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" semver "github.com/hashicorp/go-version" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" "github.com/hashicorp/vault/helper/builtinplugins" @@ -30,6 +33,7 @@ import ( "github.com/hashicorp/vault/helper/random" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/versions" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/compressutil" "github.com/hashicorp/vault/sdk/helper/consts" @@ -38,8 +42,10 @@ import ( "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault/seal" "github.com/hashicorp/vault/version" "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/require" ) func TestSystemConfigCORS(t *testing.T) { @@ -6110,6 +6116,131 @@ func TestSystemBackend_pluginRuntimeCRUD(t *testing.T) { } } +func TestGetSealBackendStatus(t *testing.T) { + testCases := []struct { + name string + sealOpts seal.TestSealOpts + expectHealthy bool + }{ + { + name: "healthy-autoseal", + sealOpts: seal.TestSealOpts{ + StoredKeys: seal.StoredKeysSupportedGeneric, + Name: "autoseal-test", + WrapperCount: 1, + Generation: 1, + }, + expectHealthy: true, + }, + { + name: "unhealthy-autoseal", + sealOpts: seal.TestSealOpts{ + StoredKeys: seal.StoredKeysSupportedGeneric, + Name: "autoseal-test", + WrapperCount: 1, + Generation: 1, + }, + expectHealthy: false, + }, + } + + ctx := context.Background() + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + testAccess, wrappers := seal.NewTestSeal(&tt.sealOpts) + + c := TestCoreWithSeal(t, NewAutoSeal(testAccess), false) + _, keys, _ := TestCoreInitClusterWrapperSetup(t, c, nil) + for _, key := range keys { + _, err := TestCoreUnseal(c, key) + require.NoError(t, err) + } + + if c.Sealed() { + t.Fatal("vault is sealed") + } + + if !tt.expectHealthy { + // set encryption error and perform encryption to mark seal unhealthy + wrappers[0].SetEncryptError(errors.New("test error encrypting")) + + _, errs := c.seal.GetAccess().Encrypt(context.Background(), []byte("test-plaintext")) + if len(errs) == 0 { + t.Fatalf("expected error on encryption, but got none") + } + } + + resp, err := c.GetSealBackendStatus(ctx) + require.NoError(t, err) + + if resp.Healthy && !tt.expectHealthy { + t.Fatal("expected seal to be unhealthy, but status was healthy") + } else if !resp.Healthy && tt.expectHealthy { + t.Fatal("expected seal to be healthy, but status was unhealthy") + } + + if !tt.expectHealthy && resp.UnhealthySince == "" { + t.Fatal("missing UnhealthySince field in response with unhealthy seal") + } + + if len(resp.Backends) == 0 { + t.Fatal("Backend list in response was empty") + } + + if !tt.expectHealthy && resp.Backends[0].Healthy { + t.Fatal("expected seal to be unhealthy, received healthy status") + } else if tt.expectHealthy && !resp.Backends[0].Healthy { + t.Fatal("expected seal to be healthy, received unhealthy status") + } + + if !tt.expectHealthy && resp.Backends[0].UnhealthySince == "" { + t.Fatal("missing UnhealthySince field in unhealthy seal") + } + }) + } + + shamirSeal := NewDefaultSeal(seal.NewAccess(nil, + &seal.SealGenerationInfo{ + Generation: 1, + Seals: []*configutil.KMS{{Type: wrapping.WrapperTypeShamir.String()}}, + }, + []*seal.SealWrapper{ + { + Wrapper: aeadwrapper.NewShamirWrapper(), + SealConfigType: wrapping.WrapperTypeShamir.String(), + Priority: 1, + }, + }, + )) + + c := TestCoreWithSeal(t, shamirSeal, false) + keys, _, _ := TestCoreInitClusterWrapperSetup(t, c, nil) + for _, key := range keys { + _, err := TestCoreUnseal(c, key) + require.NoError(t, err) + } + + if c.Sealed() { + t.Fatal("vault is sealed") + } + + resp, err := c.GetSealBackendStatus(ctx) + require.NoError(t, err) + + if !resp.Healthy { + t.Fatal("expected healthy seal, got unhealthy") + } + + if len(resp.Backends) != 1 { + t.Fatalf("expected response Backends to contain one seal, got %d", len(resp.Backends)) + } + + if !resp.Backends[0].Healthy { + t.Fatal("expected healthy seal, got unhealthy") + } +} + func TestSystemBackend_pluginRuntime_CannotDeleteRuntimeWithReferencingPlugins(t *testing.T) { if runtime.GOOS != "linux" { t.Skip("Currently plugincontainer only supports linux") diff --git a/vault/plugin_catalog.go b/vault/plugin_catalog.go index 34a0f4b2bab6..68936cbd5e87 100644 --- a/vault/plugin_catalog.go +++ b/vault/plugin_catalog.go @@ -383,6 +383,7 @@ func (c *PluginCatalog) newPluginClient(ctx context.Context, pluginRunner *plugi // Multiplexing support will always be false initially, but will be // adjusted once we query from the plugin whether it can multiplex or not + var spawnedPlugin bool if !extPlugin.multiplexingSupport || len(extPlugin.connections) == 0 { c.logger.Debug("spawning a new plugin process", "plugin_name", pluginRunner.Name, "id", id) client, err := pluginRunner.RunConfig(ctx, @@ -398,6 +399,7 @@ func (c *PluginCatalog) newPluginClient(ctx context.Context, pluginRunner *plugi return nil, err } + spawnedPlugin = true pc.client = client } else { c.logger.Debug("returning existing plugin client for multiplexed plugin", "id", id) @@ -417,6 +419,11 @@ func (c *PluginCatalog) newPluginClient(ctx context.Context, pluginRunner *plugi // Subsequent calls to this will return the same client. rpcClient, err := pc.client.Client() if err != nil { + // Make sure we kill any spawned plugins that didn't make it into our + // map of connections. + if spawnedPlugin { + pc.client.Kill() + } return nil, err } @@ -427,6 +434,11 @@ func (c *PluginCatalog) newPluginClient(ctx context.Context, pluginRunner *plugi muxed, err := pluginutil.MultiplexingSupported(ctx, clientConn, config.Name) if err != nil { + // Make sure we kill any spawned plugins that didn't make it into our + // map of connections. + if spawnedPlugin { + pc.client.Kill() + } return nil, err } diff --git a/vault/request_handling.go b/vault/request_handling.go index d16cf3804173..5bde7a392082 100644 --- a/vault/request_handling.go +++ b/vault/request_handling.go @@ -264,6 +264,20 @@ func (c *Core) fetchACLTokenEntryAndEntity(ctx context.Context, req *logical.Req return acl, te, entity, identityPolicies, nil } +// CheckTokenWithLock calls CheckToken after grabbing the internal stateLock, and also checking that we aren't in the +// process of shutting down. +func (c *Core) CheckTokenWithLock(ctx context.Context, req *logical.Request, unauth bool) (*logical.Auth, *logical.TokenEntry, error) { + c.stateLock.RLock() + defer c.stateLock.RUnlock() + // first check that we aren't shutting down + if c.Sealed() { + return nil, nil, errors.New("core is sealed") + } else if c.activeContext != nil && c.activeContext.Err() != nil { + return nil, nil, c.activeContext.Err() + } + return c.CheckToken(ctx, req, unauth) +} + func (c *Core) CheckToken(ctx context.Context, req *logical.Request, unauth bool) (*logical.Auth, *logical.TokenEntry, error) { defer metrics.MeasureSince([]string{"core", "check_token"}, time.Now()) diff --git a/vault/seal_autoseal.go b/vault/seal_autoseal.go index 6a3a225c7f15..242ae4f382a1 100644 --- a/vault/seal_autoseal.go +++ b/vault/seal_autoseal.go @@ -66,7 +66,7 @@ func NewAutoSeal(lowLevel seal.Access) *autoSeal { func (d *autoSeal) Healthy() bool { d.hcLock.RLock() defer d.hcLock.RUnlock() - return d.allSealsHealthy + return d.Access.AllSealWrappersHealthy() } func (d *autoSeal) SealWrapable() bool { diff --git a/website/content/api-docs/auth/kubernetes.mdx b/website/content/api-docs/auth/kubernetes.mdx index f8327b604c34..9e10d3b433e1 100644 --- a/website/content/api-docs/auth/kubernetes.mdx +++ b/website/content/api-docs/auth/kubernetes.mdx @@ -32,8 +32,8 @@ access the Kubernetes API. - `kubernetes_host` `(string: )` - Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server. - `kubernetes_ca_cert` `(string: "")` - PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API. NOTE: Every line must end with a newline: `\n` If not set, the local CA cert will be used if running in a Kubernetes pod. -- `token_reviewer_jwt` `(string: "")` - A service account JWT used to access the TokenReview - API to validate other JWTs during login. If not set, +- `token_reviewer_jwt` `(string: "")` - A service account JWT (or other token) used as a bearer + token to access the TokenReview API to validate other JWTs during login. If not set, the local service account token is used if running in a Kubernetes pod, otherwise the JWT submitted in the login payload will be used to access the Kubernetes TokenReview API. - `pem_keys` `(array: [])` - Optional list of PEM-formatted public keys or certificates diff --git a/website/content/api-docs/secret/databases/mysql-maria.mdx b/website/content/api-docs/secret/databases/mysql-maria.mdx index 685e4c32890d..bdce2662bdfe 100644 --- a/website/content/api-docs/secret/databases/mysql-maria.mdx +++ b/website/content/api-docs/secret/databases/mysql-maria.mdx @@ -46,16 +46,23 @@ has a number of parameters to further configure a connection. - `password` `(string: "")` - The root credential password used in the connection URL. +- `auth_type` `(string: "")` - If set to `gcp_iam`, will enable IAM authentication to a Google + CloudSQL instance. For more information on authenticating to CloudSQL via IAM, please refer to + Google's official documentation [here.](https://cloud.google.com/sql/docs/postgres/authentication). + +- `service_account_json` `(string: "")` - JSON encoded credentials for a GCP Service Account to use + for IAM authentication. Requires `auth_type` to be `gcp_iam`. + - `tls_certificate_key` `(string: "")` - x509 certificate for connecting to the database. This must be a PEM encoded version of the private key and the certificate combined. - `tls_ca` `(string: "")` - x509 CA file for validating the certificate presented by the MySQL server. Must be PEM encoded. -- `tls_server_name` `(string: "")` - Specifies the subject alternative name should be present in the +- `tls_server_name` `(string: "")` - Specifies the subject alternative name should be present in the server's certificate. -- `tls_skip_verify` `(boolean: false)` - When set to true, disables the server certificate verification. +- `tls_skip_verify` `(boolean: false)` - When set to true, disables the server certificate verification. Setting this to true is not recommended for production. - `username_template` `(string)` - [Template](/vault/docs/concepts/username-templating) describing how diff --git a/website/content/api-docs/secret/databases/postgresql.mdx b/website/content/api-docs/secret/databases/postgresql.mdx index 636cd287a687..894fd81454d0 100644 --- a/website/content/api-docs/secret/databases/postgresql.mdx +++ b/website/content/api-docs/secret/databases/postgresql.mdx @@ -27,11 +27,11 @@ has a number of parameters to further configure a connection. - `connection_url` `(string: )` - Specifies the PostgreSQL DSN. This field can be templated and supports passing the username and password parameters in the following format `{{field_name}}`. Certificate authentication - can be used by setting `?sslmode=` to be any of the applicable values as outlined in + can be used by setting `?sslmode=` to be any of the applicable values as outlined in the [Postgres SQL documentation](https://www.postgresql.org/docs/11/libpq-ssl.html#LIBPQ-SSL-PROTECTION) - and giving the SSL credentials in the `sslrootcert`, `sslcert` and `sslkey` credentials. - A templated connection URL is required when using root credential rotation. This field - supports both format string types, URI and keyword/value. Both formats support multiple + and giving the SSL credentials in the `sslrootcert`, `sslcert` and `sslkey` credentials. + A templated connection URL is required when using root credential rotation. This field + supports both format string types, URI and keyword/value. Both formats support multiple host connection strings. Due to how `pgx` works, parameters such as `sslrootcert`, `sslcert`, `sslkey` are treated as paths on the Vault server. @@ -51,6 +51,13 @@ has a number of parameters to further configure a connection. - `password` `(string: "")` - The root credential password used in the connection URL. +- `auth_type` `(string: "")` - If set to `gcp_iam`, will enable IAM authentication to a Google + CloudSQL instance. For more information on authenticating to CloudSQL via IAM, please refer to + Google's official documentation [here.](https://cloud.google.com/sql/docs/postgres/authentication). + +- `service_account_json` `(string: "")` - JSON encoded credentials for a GCP Service Account to use + for IAM authentication. Requires `auth_type` to be `gcp_iam`. + - `username_template` `(string)` - [Template](/vault/docs/concepts/username-templating) describing how dynamic usernames are generated. diff --git a/website/content/api-docs/system/auth.mdx b/website/content/api-docs/system/auth.mdx index a1721dc45699..3312d5754cef 100644 --- a/website/content/api-docs/system/auth.mdx +++ b/website/content/api-docs/system/auth.mdx @@ -12,8 +12,6 @@ token which can be used for all future requests. ## List auth methods -@include 'alerts/restricted-admin.mdx' - This endpoint lists all enabled auth methods. | Method | Path | @@ -83,8 +81,6 @@ $ curl \ ## Enable auth method -@include 'alerts/restricted-admin.mdx' - This endpoint enables a new auth method. After enabling, the auth method can be accessed and configured via the auth path specified as part of the URL. This auth path will be nested under the `auth` prefix. @@ -186,8 +182,6 @@ $ curl \ ## Read auth method configuration -@include 'alerts/restricted-admin.mdx' - This endpoints returns the configuration of the auth method at the given path. | Method | Path | @@ -237,8 +231,6 @@ $ curl \ ## Disable auth method -@include 'alerts/restricted-admin.mdx' - This endpoint disables the auth method at the given auth path. - **`sudo` required** – This endpoint requires `sudo` capability in addition to @@ -264,8 +256,6 @@ $ curl \ ## Read auth method tuning -@include 'alerts/restricted-admin.mdx' - - This endpoint reads the given auth path's configuration. This endpoint requires `sudo` capability on the final path, but the same functionality can be achieved without `sudo` via `sys/mounts/auth/[auth-path]/tune`._ @@ -303,8 +293,6 @@ $ curl \ ## Tune auth method -@include 'alerts/restricted-admin.mdx' - Tune configuration parameters for a given auth path. _This endpoint requires `sudo` capability on the final path, but the same functionality can be achieved without `sudo` via `sys/mounts/auth/[auth-path]/tune`._ diff --git a/website/content/api-docs/system/capabilities-self.mdx b/website/content/api-docs/system/capabilities-self.mdx index 20a1cc6e17cd..1fb0eb7cf63f 100644 --- a/website/content/api-docs/system/capabilities-self.mdx +++ b/website/content/api-docs/system/capabilities-self.mdx @@ -16,8 +16,6 @@ memberships. ## Query self capabilities -@include 'alerts/restricted-admin.mdx' - This endpoint returns the capabilities of client token on the given paths. The client token is the Vault token with which this API call is made. Multiple paths are taken in at once and the capabilities of the token for each path is diff --git a/website/content/api-docs/system/capabilities.mdx b/website/content/api-docs/system/capabilities.mdx index e655307c68de..db06f1181036 100644 --- a/website/content/api-docs/system/capabilities.mdx +++ b/website/content/api-docs/system/capabilities.mdx @@ -15,8 +15,6 @@ through the entity and entity's group memberships. ## Query token capabilities -@include 'alerts/restricted-admin.mdx' - This endpoint returns the list of capabilities of a given token on the given paths. Multiple paths are taken in at once and the capabilities of the token for each path is returned. For backwards compatibility, if a single path is diff --git a/website/content/api-docs/system/config-control-group.mdx b/website/content/api-docs/system/config-control-group.mdx index d2f3fdd722a2..c7602e682c6f 100644 --- a/website/content/api-docs/system/config-control-group.mdx +++ b/website/content/api-docs/system/config-control-group.mdx @@ -13,8 +13,6 @@ settings. ## Read control group settings -@include 'alerts/restricted-admin.mdx' - This endpoint returns the current Control Group configuration. | Method | Path | @@ -39,8 +37,6 @@ $ curl \ ## Configure control group settings -@include 'alerts/restricted-admin.mdx' - This endpoint allows configuring control groups. | Method | Path | @@ -71,8 +67,6 @@ $ curl \ ## Delete control group settings -@include 'alerts/restricted-admin.mdx' - This endpoint removes any control group configuration. | Method | Path | diff --git a/website/content/api-docs/system/control-group.mdx b/website/content/api-docs/system/control-group.mdx index 12dea2688460..02aa6b6c1340 100644 --- a/website/content/api-docs/system/control-group.mdx +++ b/website/content/api-docs/system/control-group.mdx @@ -7,7 +7,6 @@ description: The '/sys/control-group' endpoint handles the Control Group workflo ## Authorize control group request @include 'alerts/enterprise-and-hcp-plus.mdx' -@include 'alerts/restricted-admin.mdx' This endpoint authorizes a control group request. @@ -49,8 +48,6 @@ $ curl \ ## Check control group request status -@include 'alerts/restricted-admin.mdx' - This endpoint checks the status of a control group request. | Method | Path | diff --git a/website/content/api-docs/system/ha-status.mdx b/website/content/api-docs/system/ha-status.mdx index affb1f95c38c..86183b3255cf 100644 --- a/website/content/api-docs/system/ha-status.mdx +++ b/website/content/api-docs/system/ha-status.mdx @@ -11,8 +11,6 @@ It lists the active node and the peers that it's heard from since it became acti ## HA status -@include 'alerts/restricted-admin.mdx' - This endpoint returns the HA status of the Vault cluster. | Method | Path | diff --git a/website/content/api-docs/system/internal-counters.mdx b/website/content/api-docs/system/internal-counters.mdx index c041376b8a1e..995c42401fc0 100644 --- a/website/content/api-docs/system/internal-counters.mdx +++ b/website/content/api-docs/system/internal-counters.mdx @@ -13,8 +13,6 @@ The `/sys/internal/counters` endpoints are used to return data about the number ## Entities -@include 'alerts/restricted-admin.mdx' - This endpoint returns the total number of Entities. | Method | Path | @@ -53,8 +51,6 @@ $ curl \ ## Tokens -@include 'alerts/restricted-admin.mdx' - This endpoint returns the total number of Tokens. | Method | Path | @@ -93,8 +89,6 @@ $ curl \ ## Client count -@include 'alerts/restricted-admin.mdx' - This endpoint returns client activity information for a given billing period, which is represented by the `start_time` and `end_time` parameters. @@ -726,8 +720,6 @@ $ curl \ ## Partial month client count -@include 'alerts/restricted-admin.mdx' - This endpoint returns the client activity in the current month. The response will have activity attributions per namespace, per mount within each namespaces, and new clients information. @@ -871,8 +863,6 @@ $ curl \ ## Update the client count configuration -@include 'alerts/restricted-admin.mdx' - The `/sys/internal/counters/config` endpoint is used to configure logging of active clients. | Method | Path | @@ -911,8 +901,6 @@ $ curl \ ## Read the client count configuration -@include 'alerts/restricted-admin.mdx' - Reading the configuration shows the current settings, as well as a flag as to whether any data can be queried. - `enabled` `(string)` - returns `default-enabled` or `default-disabled` if the configuration is `default`. @@ -950,8 +938,6 @@ $ curl \ ## Activity export -@include 'alerts/restricted-admin.mdx' - This endpoint returns an export of the clients that had activity within the provided start and end times. The returned set of client information will be deduplicated over the time window and will show the earliest activity logged for diff --git a/website/content/api-docs/system/internal-specs-openapi.mdx b/website/content/api-docs/system/internal-specs-openapi.mdx index 8622612a1321..0ffc62dce9be 100644 --- a/website/content/api-docs/system/internal-specs-openapi.mdx +++ b/website/content/api-docs/system/internal-specs-openapi.mdx @@ -25,8 +25,6 @@ structure, and other endpoints will be modified incrementally. ## Get OpenAPI document -@include 'alerts/restricted-admin.mdx' - This endpoint returns a single OpenAPI document describing all paths visible to the requester. | Method | Path | diff --git a/website/content/api-docs/system/internal-ui-feature.mdx b/website/content/api-docs/system/internal-ui-feature.mdx index d201a483d4ed..138a807e0de2 100644 --- a/website/content/api-docs/system/internal-ui-feature.mdx +++ b/website/content/api-docs/system/internal-ui-feature.mdx @@ -16,8 +16,6 @@ guarantee on backwards compatibility for this endpoint. ## Get enabled feature flags -@include 'alerts/restricted-admin.mdx' - This endpoint lists the enabled feature flags relevant to the UI. | Method | Path | diff --git a/website/content/api-docs/system/internal-ui-mounts.mdx b/website/content/api-docs/system/internal-ui-mounts.mdx index 682e303017e9..c3815a2c32d8 100644 --- a/website/content/api-docs/system/internal-ui-mounts.mdx +++ b/website/content/api-docs/system/internal-ui-mounts.mdx @@ -22,8 +22,6 @@ compatibility for this endpoint. ## Get available visible mounts -@include 'alerts/restricted-admin.mdx' - This endpoint lists all enabled auth methods. | Method | Path | @@ -61,8 +59,6 @@ $ curl \ ## Get single mount details -@include 'alerts/restricted-admin.mdx' - This endpoint lists details for a specific mount path. This is an authenticated endpoint, and is currently only being used internally. diff --git a/website/content/api-docs/system/internal-ui-resultant-acl.mdx b/website/content/api-docs/system/internal-ui-resultant-acl.mdx index 2aa502ed3557..d2990a9cef8e 100644 --- a/website/content/api-docs/system/internal-ui-resultant-acl.mdx +++ b/website/content/api-docs/system/internal-ui-resultant-acl.mdx @@ -15,8 +15,6 @@ intended usage, there is no guarantee on backwards compatibility for this endpoi ## Get resultant-acl -@include 'alerts/restricted-admin.mdx' - This endpoint lists the resultant-acl relevant to the UI. | Method | Path | diff --git a/website/content/api-docs/system/leader.mdx b/website/content/api-docs/system/leader.mdx index a5449dcbc893..e7717859aad1 100644 --- a/website/content/api-docs/system/leader.mdx +++ b/website/content/api-docs/system/leader.mdx @@ -13,8 +13,6 @@ current leader of Vault. ## Read leader status -@include 'alerts/restricted-admin.mdx' - This endpoint returns the high availability status and current leader instance of Vault. diff --git a/website/content/api-docs/system/leases.mdx b/website/content/api-docs/system/leases.mdx index e8756cfc5690..1d9d6709cdc8 100644 --- a/website/content/api-docs/system/leases.mdx +++ b/website/content/api-docs/system/leases.mdx @@ -10,8 +10,6 @@ The `/sys/leases` endpoints are used to view and manage leases in Vault. ## Read lease -@include 'alerts/restricted-admin.mdx' - This endpoint retrieve lease metadata. | Method | Path | @@ -55,8 +53,6 @@ $ curl \ ## List leases -@include 'alerts/restricted-admin.mdx' - This endpoint returns a list of lease ids. **This endpoint requires 'sudo' capability.** @@ -86,8 +82,6 @@ $ curl \ ## Renew lease -@include 'alerts/restricted-admin.mdx' - This endpoint renews a lease, requesting to extend the lease. Token leases cannot be renewed using this endpoint, use instead the auth/token/renew endpoint. @@ -136,8 +130,6 @@ $ curl \ ## Revoke lease -@include 'alerts/restricted-admin.mdx' - This endpoint revokes a lease immediately. | Method | Path | @@ -174,8 +166,6 @@ $ curl \ ## Revoke force -@include 'alerts/restricted-admin.mdx' - This endpoint revokes all secrets or tokens generated under a given prefix immediately. Unlike `/sys/leases/revoke-prefix`, this path ignores backend errors encountered during revocation. This is _potentially very dangerous_ and should @@ -208,8 +198,6 @@ $ curl \ ## Revoke prefix -@include 'alerts/restricted-admin.mdx' - This endpoint revokes all secrets (via a lease ID prefix) or tokens (via the tokens' path property) generated under a given prefix immediately. This requires `sudo` capability and access to it should be tightly controlled as it can be @@ -240,8 +228,6 @@ $ curl \ ## Tidy leases -@include 'alerts/restricted-admin.mdx' - This endpoint cleans up the dangling storage entries for leases: for each lease entry in storage, Vault will verify that it has an associated valid non-expired token in storage, and if not, the lease will be revoked. @@ -265,8 +251,6 @@ $ curl \ ## Lease counts -@include 'alerts/restricted-admin.mdx' - This endpoint returns the total count of a `type` of lease, as well as a count per mount point. Note that it currently only supports type "irrevocable". @@ -297,8 +281,6 @@ $ curl \ ## Leases list -@include 'alerts/restricted-admin.mdx' - This endpoint returns the total count of a `type` of lease, as well as a list of leases per mount point. Note that it currently only supports type "irrevocable". diff --git a/website/content/api-docs/system/license.mdx b/website/content/api-docs/system/license.mdx index 48a531382478..cca96f148fb6 100644 --- a/website/content/api-docs/system/license.mdx +++ b/website/content/api-docs/system/license.mdx @@ -15,8 +15,6 @@ Vault. ## License status -@include 'alerts/restricted-admin.mdx' - This endpoint returns information about licensing. See [license autoloading](/vault/docs/enterprise/license/autoloading) for additional background. In the response: diff --git a/website/content/api-docs/system/managed-keys.mdx b/website/content/api-docs/system/managed-keys.mdx index 3628282ff731..4e6e867b8a09 100644 --- a/website/content/api-docs/system/managed-keys.mdx +++ b/website/content/api-docs/system/managed-keys.mdx @@ -11,8 +11,6 @@ See the [Managed Keys](/vault/docs/enterprise/managed-keys) section for further ## List managed keys. -@include 'alerts/restricted-admin.mdx' - This endpoint lists all the Managed Keys of a certain type within the namespace. | Method | Path | @@ -45,8 +43,6 @@ $ curl \ ## Create/Update managed key -@include 'alerts/restricted-admin.mdx' - An endpoint that will create or update a Managed Key within a given namespace. The :type refers to the backend type that the key is to use, such as `pkcs11`. The :name argument is unique name within all managed key types in the namespace. @@ -266,8 +262,6 @@ $ curl \ ## Read managed key -@include 'alerts/restricted-admin.mdx' - This endpoint returns the managed key configuration at the given path. | Method | Path | @@ -312,8 +306,6 @@ $ curl \ ## Test sign with a managed key -@include 'alerts/restricted-admin.mdx' - This endpoint allows an operator to validate that a managed key configuration works by signing and verifying some randomly generated data. If the call returns a successful HTTP status code, the configuration can be considered valid. @@ -355,8 +347,6 @@ $ curl \ ## Delete managed key -@include 'alerts/restricted-admin.mdx' - This endpoint deletes the managed key at the given path provided it is not listed within any mount point's `allowed_managed_keys`. diff --git a/website/content/api-docs/system/mfa/index.mdx b/website/content/api-docs/system/mfa/index.mdx index dd005369f2f5..0a114c1a6a5f 100644 --- a/website/content/api-docs/system/mfa/index.mdx +++ b/website/content/api-docs/system/mfa/index.mdx @@ -8,8 +8,6 @@ description: >- # `/sys/mfa` -@include 'alerts/restricted-admin.mdx' - The `/sys/mfa` endpoint focuses on managing Multi-factor Authentication (MFA) behaviors in Vault Enterprise MFA. diff --git a/website/content/api-docs/system/monitor.mdx b/website/content/api-docs/system/monitor.mdx index 4529796f0a0e..a44dc4328caa 100644 --- a/website/content/api-docs/system/monitor.mdx +++ b/website/content/api-docs/system/monitor.mdx @@ -13,7 +13,7 @@ some log lines will be dropped. ## Monitor system logs -@include 'alerts/restricted-admin.mdx' +- @include 'alerts/restricted-admin.mdx' This endpoint streams logs back to the client from Vault. Note that unlike most API endpoints in Vault, this one does not return JSON by default. This will send back data in whatever log format Vault has been configured with. By diff --git a/website/content/api-docs/system/mounts.mdx b/website/content/api-docs/system/mounts.mdx index a93e5100c0f7..a7287420fcc1 100644 --- a/website/content/api-docs/system/mounts.mdx +++ b/website/content/api-docs/system/mounts.mdx @@ -10,8 +10,6 @@ The `/sys/mounts` endpoint is used to manage secrets engines in Vault. ## List mounted secrets engines -@include 'alerts/restricted-admin.mdx' - This endpoints lists all the mounted secrets engines. | Method | Path | @@ -121,8 +119,6 @@ are used by this backend. ## Enable secrets engine -@include 'alerts/restricted-admin.mdx' - This endpoint enables a new secrets engine at the given path. | Method | Path | @@ -219,8 +215,6 @@ $ curl \ ## Disable secrets engine -@include 'alerts/restricted-admin.mdx' - This endpoint disables the mount point specified in the URL. | Method | Path | | @@ -255,8 +249,6 @@ in dangling credentials. This is meant for extreme circumstances. ## Get the configuration of a secret engine -@include 'alerts/restricted-admin.mdx' - This endpoint returns the configuration of a specific secret engine. | Method | Path | @@ -318,8 +310,6 @@ $ curl \ ## Read mount configuration -@include 'alerts/restricted-admin.mdx' - This endpoint reads the given mount's configuration. Unlike the `mounts` endpoint, this will return the current time in seconds for each TTL, which may be the system default or a mount-specific value. @@ -348,8 +338,6 @@ $ curl \ ## Tune mount configuration -@include 'alerts/restricted-admin.mdx' - This endpoint tunes configuration parameters for a given mount point. | Method | Path | diff --git a/website/content/api-docs/system/plugins-catalog.mdx b/website/content/api-docs/system/plugins-catalog.mdx index cb1a2e8410e5..10e6c1ecf139 100644 --- a/website/content/api-docs/system/plugins-catalog.mdx +++ b/website/content/api-docs/system/plugins-catalog.mdx @@ -12,8 +12,6 @@ once registered backends can use the plugin by querying the catalog. ## LIST plugins -@include 'alerts/restricted-admin.mdx' - This endpoint lists the plugins in the catalog by type. | Method | Path | @@ -85,8 +83,6 @@ $ curl \ ## LIST plugins -@include 'alerts/restricted-admin.mdx' - This endpoint lists the plugins in the catalog by type. | Method | Path | @@ -122,8 +118,6 @@ $ curl \ ## Register plugin -@include 'alerts/restricted-admin.mdx' - This endpoint registers a new plugin, or updates an existing one with the supplied name. @@ -181,8 +175,6 @@ $ curl \ ## Read plugin -@include 'alerts/restricted-admin.mdx' - This endpoint returns the configuration data for the plugin with the given name. - **`sudo` required** – This endpoint requires `sudo` capability in addition to @@ -229,8 +221,6 @@ $ curl \ ## Remove plugin from catalog -@include 'alerts/restricted-admin.mdx' - This endpoint removes the plugin with the given name. - **`sudo` required** – This endpoint requires `sudo` capability in addition to diff --git a/website/content/api-docs/system/plugins-reload-backend.mdx b/website/content/api-docs/system/plugins-reload-backend.mdx index 4a9f8e851e32..261371876e7b 100644 --- a/website/content/api-docs/system/plugins-reload-backend.mdx +++ b/website/content/api-docs/system/plugins-reload-backend.mdx @@ -13,8 +13,6 @@ provided, all mounted paths that use that plugin backend will be reloaded. ## Reload plugins -@include 'alerts/restricted-admin.mdx' - This endpoint reloads mounted plugin backends. | Method | Path - | diff --git a/website/content/api-docs/system/policies-password.mdx b/website/content/api-docs/system/policies-password.mdx index 1708290cb5ba..9e17f8403d96 100644 --- a/website/content/api-docs/system/policies-password.mdx +++ b/website/content/api-docs/system/policies-password.mdx @@ -18,8 +18,6 @@ as well as the syntax of the policies themselves. ## Create/Update password policy -@include 'alerts/restricted-admin.mdx' - This endpoint adds a new or updates an existing password policy. Once a policy is updated, it takes effect immediately to all associated secret engines. @@ -81,8 +79,6 @@ $ vault write sys/policies/password/my-policy policy=@my-policy.hcl ## List password policies -@include 'alerts/restricted-admin.mdx' - This endpoints list the password policies. | Method | Path | @@ -120,8 +116,6 @@ $ curl \ ## Read password policy -@include 'alerts/restricted-admin.mdx' - This endpoint retrieves information about the named password policy. | Method | Path | @@ -151,8 +145,6 @@ $ curl \ ## Delete password policy -@include 'alerts/restricted-admin.mdx' - This endpoint deletes the password policy with the given name. This does not check if any secret engines are using it prior to deletion, so you should ensure that any engines that are utilizing this password policy are changed to a different policy (or to that engines' @@ -178,8 +170,6 @@ $ curl \ ## Generate password from password policy -@include 'alerts/restricted-admin.mdx' - This endpoint generates a password from the specified existing password policy. | Method | Path | diff --git a/website/content/api-docs/system/policies.mdx b/website/content/api-docs/system/policies.mdx index aca88598c340..8ce662873e8f 100644 --- a/website/content/api-docs/system/policies.mdx +++ b/website/content/api-docs/system/policies.mdx @@ -18,8 +18,6 @@ Vault Open Source or basic Vault Enterprise installations. ## List ACL policies -@include 'alerts/restricted-admin.mdx' - This endpoint lists all configured ACL policies. | Method | Path | @@ -44,8 +42,6 @@ $ curl \ ## Read ACL policy -@include 'alerts/restricted-admin.mdx' - This endpoint retrieves information about the named ACL policy. | Method | Path | @@ -76,8 +72,6 @@ $ curl \ ## Create/Update ACL policy -@include 'alerts/restricted-admin.mdx' - This endpoint adds a new or updates an existing ACL policy. Once a policy is updated, it takes effect immediately to all associated users. @@ -113,8 +107,6 @@ $ curl \ ## Delete ACL policy -@include 'alerts/restricted-admin.mdx' - This endpoint deletes the ACL policy with the given name. This will immediately affect all users associated with this policy. (A deleted policy set on a token acts as an empty policy.) @@ -139,8 +131,6 @@ $ curl \ ## List RGP policies -@include 'alerts/restricted-admin.mdx' - This endpoint lists all configured RGP policies. | Method | Path | @@ -165,8 +155,6 @@ $ curl \ ## Read RGP policy -@include 'alerts/restricted-admin.mdx' - This endpoint retrieves information about the named RGP policy. | Method | Path | @@ -198,8 +186,6 @@ $ curl \ ## Create/Update RGP policy -@include 'alerts/restricted-admin.mdx' - This endpoint adds a new or updates an existing RGP policy. Once a policy is updated, it takes effect immediately to all associated users. @@ -240,8 +226,6 @@ $ curl \ ## Delete RGP policy -@include 'alerts/restricted-admin.mdx' - This endpoint deletes the RGP policy with the given name. This will immediately affect all users associated with this policy. (A deleted policy set on a token acts as an empty policy.) @@ -266,8 +250,6 @@ $ curl \ ## List EGP policies -@include 'alerts/restricted-admin.mdx' - This endpoint lists all configured EGP policies. Since EGP policies act on a path, this endpoint returns two identifiers: @@ -298,8 +280,6 @@ $ curl \ ## Read EGP policy -@include 'alerts/restricted-admin.mdx' - This endpoint retrieves information about the named EGP policy. | Method | Path | @@ -332,8 +312,6 @@ $ curl \ ## Create/Update EGP policy -@include 'alerts/restricted-admin.mdx' - This endpoint adds a new or updates an existing EGP policy. Once a policy is updated, it takes effect immediately to all associated users. @@ -380,8 +358,6 @@ $ curl \ ## Delete EGP policy -@include 'alerts/restricted-admin.mdx' - This endpoint deletes the EGP policy with the given name from all paths on which it was configured. | Method | Path | diff --git a/website/content/api-docs/system/policy.mdx b/website/content/api-docs/system/policy.mdx index af77ffd3b676..52863af36780 100644 --- a/website/content/api-docs/system/policy.mdx +++ b/website/content/api-docs/system/policy.mdx @@ -10,8 +10,6 @@ The `/sys/policy` endpoint is used to manage ACL policies in Vault. ## List policies -@include 'alerts/restricted-admin.mdx' - This endpoint lists all configured policies. | Method | Path | @@ -36,8 +34,6 @@ $ curl \ ## Read policy -@include 'alerts/restricted-admin.mdx' - This endpoint retrieve the policy body for the named policy. | Method | Path | @@ -68,8 +64,6 @@ $ curl \ ## Create/Update policy -@include 'alerts/restricted-admin.mdx' - This endpoint adds a new or updates an existing policy. Once a policy is updated, it takes effect immediately to all associated users. @@ -104,8 +98,6 @@ $ curl \ ## Delete policy -@include 'alerts/restricted-admin.mdx' - This endpoint deletes the policy with the given name. This will immediately affect all users associated with this policy. diff --git a/website/content/api-docs/system/remount.mdx b/website/content/api-docs/system/remount.mdx index b2e7cc2dc41e..3a5745769cf5 100644 --- a/website/content/api-docs/system/remount.mdx +++ b/website/content/api-docs/system/remount.mdx @@ -12,8 +12,6 @@ The Remount documentation details the endpoints required to trigger and monitor ## Move backend -@include 'alerts/restricted-admin.mdx' - The `/sys/remount` endpoint moves an already-mounted backend to a new mount point. Remounting works for both secret engines and auth methods. @@ -85,8 +83,6 @@ $ curl \ ## Monitor migration status -@include 'alerts/restricted-admin.mdx' - This endpoint is used to monitor the status of a mount migration operation, using the ID returned in the response of the `sys/remount` call. The response contains the passed-in ID, the source and target mounts, and a status field that displays `in-progress`, `success` or `failure`. diff --git a/website/content/api-docs/system/seal-status.mdx b/website/content/api-docs/system/seal-status.mdx index 1750f8bc6a2e..feb69e68e3cf 100644 --- a/website/content/api-docs/system/seal-status.mdx +++ b/website/content/api-docs/system/seal-status.mdx @@ -10,8 +10,6 @@ The `/sys/seal-status` endpoint is used to check the seal status of a Vault. ## Seal status -@include 'alerts/restricted-admin.mdx' - This endpoint returns the seal status of the Vault. This is an unauthenticated endpoint. diff --git a/website/content/api-docs/system/tools.mdx b/website/content/api-docs/system/tools.mdx index d34f0d9a9a53..13983b6ea607 100644 --- a/website/content/api-docs/system/tools.mdx +++ b/website/content/api-docs/system/tools.mdx @@ -10,8 +10,6 @@ The `/sys/tools` endpoints are a general set of tools. ## Generate random bytes -@include 'alerts/restricted-admin.mdx' - This endpoint returns high-quality random bytes of the specified length. | Method | Path | @@ -61,8 +59,6 @@ $ curl \ ## Hash data -@include 'alerts/restricted-admin.mdx' - This endpoint returns the cryptographic hash of given data using the specified algorithm. diff --git a/website/content/api-docs/system/user-lockout.mdx b/website/content/api-docs/system/user-lockout.mdx index d7d5a02ae5a6..0b573114e072 100644 --- a/website/content/api-docs/system/user-lockout.mdx +++ b/website/content/api-docs/system/user-lockout.mdx @@ -12,8 +12,6 @@ Refer to the [user lockout](/vault/docs/concepts/user-lockout) overview for more ## List locked users -@include 'alerts/restricted-admin.mdx' - The list endpoint returns information on the users currently locked by Vault. The response will include all child namespaces of the namespace in which the @@ -195,8 +193,6 @@ $ curl \ ## Unlock user -@include 'alerts/restricted-admin.mdx' - The unlock user endpoint frees a locked user with the provided `mount_accessor` and `alias_identifier` in the given namespace. The unlock command is idempotent. Calls to the endpoint succeed even if the user matching the provided `mount_accessor` and `alias_identifier` is not currently locked. diff --git a/website/content/api-docs/system/version-history.mdx b/website/content/api-docs/system/version-history.mdx index 3a3345f3c7cf..da76e0d244d9 100644 --- a/website/content/api-docs/system/version-history.mdx +++ b/website/content/api-docs/system/version-history.mdx @@ -13,8 +13,6 @@ The `/sys/version-history` endpoint is used to retrieve the version history of a ## Read version history -@include 'alerts/restricted-admin.mdx' - This endpoint returns the version history of the Vault. The response will contain the following keys: - `keys`: a list of installed versions in chronological order based on the time installed diff --git a/website/content/api-docs/system/wrapping-lookup.mdx b/website/content/api-docs/system/wrapping-lookup.mdx index 3183ba4a4f73..146c3770ddb0 100644 --- a/website/content/api-docs/system/wrapping-lookup.mdx +++ b/website/content/api-docs/system/wrapping-lookup.mdx @@ -10,8 +10,6 @@ The `/sys/wrapping/lookup` endpoint returns wrapping token properties. ## Wrapping lookup -@include 'alerts/restricted-admin.mdx' - This endpoint looks up wrapping properties for the given token. | Method | Path | diff --git a/website/content/api-docs/system/wrapping-rewrap.mdx b/website/content/api-docs/system/wrapping-rewrap.mdx index 0ced31662898..6da9f02e2034 100644 --- a/website/content/api-docs/system/wrapping-rewrap.mdx +++ b/website/content/api-docs/system/wrapping-rewrap.mdx @@ -13,8 +13,6 @@ refresh its TTL. ## Wrapping rewrap -@include 'alerts/restricted-admin.mdx' - This endpoint rewraps a response-wrapped token. The new token will use the same creation TTL as the original token and contain the same response. The old token will be invalidated. This can be used for long-term storage of a secret in a diff --git a/website/content/api-docs/system/wrapping-unwrap.mdx b/website/content/api-docs/system/wrapping-unwrap.mdx index 176ac9eee0cd..49297a4a8c85 100644 --- a/website/content/api-docs/system/wrapping-unwrap.mdx +++ b/website/content/api-docs/system/wrapping-unwrap.mdx @@ -10,8 +10,6 @@ The `/sys/wrapping/unwrap` endpoint unwraps a wrapped response. ## Wrapping unwrap -@include 'alerts/restricted-admin.mdx' - This endpoint returns the original response inside the given wrapping token. Unlike simply reading `cubbyhole/response` (which is deprecated), this endpoint provides additional validation checks on the token, returns the original value diff --git a/website/content/api-docs/system/wrapping-wrap.mdx b/website/content/api-docs/system/wrapping-wrap.mdx index 8ad5849a07f4..8071097fe82b 100644 --- a/website/content/api-docs/system/wrapping-wrap.mdx +++ b/website/content/api-docs/system/wrapping-wrap.mdx @@ -13,8 +13,6 @@ token. ## Wrapping wrap -@include 'alerts/restricted-admin.mdx' - This endpoint wraps the given user-supplied data inside a response-wrapped token. diff --git a/website/content/docs/configuration/seal/pkcs11.mdx b/website/content/docs/configuration/seal/pkcs11.mdx index e8a13480c4b3..c11af596aed2 100644 --- a/website/content/docs/configuration/seal/pkcs11.mdx +++ b/website/content/docs/configuration/seal/pkcs11.mdx @@ -26,7 +26,7 @@ following: - The presence of a `seal "pkcs11"` block in Vault's configuration file - The presence of the environment variable `VAULT_HSM_LIB` set to the library's - path as well as `VAULT_HSM_TYPE` set to `pkcs11`. If enabling via environment + path as well as `VAULT_SEAL_TYPE` set to `pkcs11`. If enabling via environment variable, all other required values (i.e. `VAULT_HSM_SLOT`) must be also supplied. @@ -188,8 +188,8 @@ Alternatively, the HSM seal can be activated by providing the following environment variables: ```text +VAULT_SEAL_TYPE VAULT_HSM_LIB -VAULT_HSM_TYPE VAULT_HSM_SLOT VAULT_HSM_TOKEN_LABEL VAULT_HSM_PIN diff --git a/website/content/docs/enterprise/namespaces.mdx b/website/content/docs/enterprise/namespaces.mdx index a2d3bc78dfe3..8129eddd1466 100644 --- a/website/content/docs/enterprise/namespaces.mdx +++ b/website/content/docs/enterprise/namespaces.mdx @@ -62,15 +62,16 @@ across any namespace. ### Administrative namespaces -The Vault API includes system backend endpoints, which are mounted under the sys/ path. +The Vault API includes system backend endpoints, which are mounted under the `sys/` path. System endpoints let you interact with the internal features of your Vault instance. For security reasons, some of the system backend endpoints are restricted, and can only be called -from the root namespace or using a token in the root namespace with elevated permissions. +from the root namespace or using a token in the root namespace with elevated permissions. These endpoints +are [documented below](/vault/docs/enterprise/namespaces#root-only-api-paths). By default, Vault allows non-root calls to the less sensitive system backend endpoints. However, there may be instances where a Vault operator needs to provide access to a subset -of the restricted endpoints, like sys/audit-hash and sys/monitor, without granting access -to the full set of privileged sys/ paths. An administrative namespace lets Vault operators grant +of the restricted endpoints, like `sys/audit-hash` and `sys/monitor`, without granting access +to the full set of privileged `sys/` paths. An administrative namespace lets Vault operators grant access to a subset of privileged endpoints by setting a parameter in their Vault configuration file. ## Usage @@ -148,6 +149,8 @@ There are certain API paths that can only be called from the **root** namespace: - `sys/storage/raft` - `sys/quotas` - `sys/plugins` +- `sys/monitor` +- `sys/audit-hash` ## Tutorial diff --git a/website/content/docs/platform/aws/run.mdx b/website/content/docs/platform/aws/run.mdx index 1f4069067ec7..7d9c6a86e10d 100644 --- a/website/content/docs/platform/aws/run.mdx +++ b/website/content/docs/platform/aws/run.mdx @@ -34,9 +34,9 @@ For additional guidance on best practices for running Vault in production, pleas # Getting support -## Open source +## Community Edition -For the Open Source Vault AMI, support can be obtained through the [community channels](https://www.vaultproject.io/community) +For the Community Vault AMI, support can be obtained through the [community channels](https://www.vaultproject.io/community) [See the Vault Project on Github.com](https://github.com/hashicorp/vault) diff --git a/website/content/docs/secrets/databases/index.mdx b/website/content/docs/secrets/databases/index.mdx index 8fdbc7d537fa..49427677d80e 100644 --- a/website/content/docs/secrets/databases/index.mdx +++ b/website/content/docs/secrets/databases/index.mdx @@ -163,9 +163,9 @@ and private key pair to authenticate. | [MongoDB](/vault/docs/secrets/databases/mongodb) | Yes | Yes | Yes | Yes (1.7+) | password | | [MongoDB Atlas](/vault/docs/secrets/databases/mongodbatlas) | No | Yes | Yes | Yes (1.8+) | password, client_certificate | | [MSSQL](/vault/docs/secrets/databases/mssql) | Yes | Yes | Yes | Yes (1.7+) | password | -| [MySQL/MariaDB](/vault/docs/secrets/databases/mysql-maria) | Yes | Yes | Yes | Yes (1.7+) | password | +| [MySQL/MariaDB](/vault/docs/secrets/databases/mysql-maria) | Yes | Yes | Yes | Yes (1.7+) | password, gcp_iam | | [Oracle](/vault/docs/secrets/databases/oracle) | Yes | Yes | Yes | Yes (1.7+) | password | -| [PostgreSQL](/vault/docs/secrets/databases/postgresql) | Yes | Yes | Yes | Yes (1.7+) | password | +| [PostgreSQL](/vault/docs/secrets/databases/postgresql) | Yes | Yes | Yes | Yes (1.7+) | password, gcp_iam | | [Redis](/vault/docs/secrets/databases/redis) | Yes | Yes | Yes | No | password | | [Redis ElastiCache](/vault/docs/secrets/databases/rediselasticache) | No | No | Yes | No | password | | [Redshift](/vault/docs/secrets/databases/redshift) | Yes | Yes | Yes | Yes (1.8+) | password | diff --git a/website/content/docs/secrets/databases/mysql-maria.mdx b/website/content/docs/secrets/databases/mysql-maria.mdx index a101da85f6cb..becab0f2b778 100644 --- a/website/content/docs/secrets/databases/mysql-maria.mdx +++ b/website/content/docs/secrets/databases/mysql-maria.mdx @@ -166,3 +166,56 @@ API](/vault/api-docs/secret/databases/mysql-maria) page. For more information on the database secrets engine's HTTP API please see the [Database secrets engine API](/vault/api-docs/secret/databases) page. + +## Authenticating to Cloud DBs via IAM + +### Google Cloud + +Aside from IAM roles denoted by [Google's CloudSQL documentation](https://cloud.google.com/sql/docs/postgres/add-manage-iam-users#creating-a-database-user), +the following SQL privileges are needed by the service account's DB user for minimum functionality with Vault. +Additional privileges may be needed depending on the SQL configured on the database roles. + +```sql +-- Enable service account to create roles within DB +GRANT CREATEROLE ON . TO "test-user"@"%"; +``` + +### Setup + +1. Enable the database secrets engine if it is not already enabled: + + ```shell-session + $ vault secrets enable database + Success! Enabled the database secrets engine at: database/ + ``` + + By default, the secrets engine will enable at the name of the engine. To + enable the secrets engine at a different path, use the `-path` argument. + +1. Configure Vault with the proper plugin and connection information. Here you can explicitly enable GCP IAM authentication + and use [Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc#how-to) to authenticate. + + ~> **Note**: For Google Cloud IAM, the Protocol is `cloudsql-mysql` instead of `tcp`. + + ```shell-session + $ vault write database/config/my-mysql-database \ + plugin_name="mysql-database-plugin" \ + allowed_roles="my-role" \ + connection_url="user@cloudsql-mysql(project:region:instance)/mysql" \ + auth_type="gcp_iam" + ``` + + You can also configure the connection and authenticate by directly passing in the service account credentials + as an encoded JSON string: + + ```shell-session + $ vault write database/config/my-mysql-database \ + plugin_name="mysql-database-plugin" \ + allowed_roles="my-role" \ + connection_url="user@cloudsql-mysql(project:region:instance)/mysql" \ + auth_type="gcp_iam" \ + service_account_json="@my_credentials.json" + ``` + +Once the connection has been configured and IAM authentication is complete, the steps to set up a role and generate +credentials are the same as the ones listed above. diff --git a/website/content/docs/secrets/databases/postgresql.mdx b/website/content/docs/secrets/databases/postgresql.mdx index c1bcb8b77d78..70186f5c0344 100644 --- a/website/content/docs/secrets/databases/postgresql.mdx +++ b/website/content/docs/secrets/databases/postgresql.mdx @@ -94,3 +94,54 @@ For more information on the database secrets engine's HTTP API please see the [pgxlib]: https://pkg.go.dev/github.com/jackc/pgx/stdlib [pg_conn_docs]: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING + +## Authenticating to Cloud DBs via IAM + +### Google Cloud + +Aside from IAM roles denoted by [Google's CloudSQL documentation](https://cloud.google.com/sql/docs/postgres/add-manage-iam-users#creating-a-database-user), +the following SQL privileges are needed by the service account's DB user for minimum functionality with Vault. +Additional privileges may be needed depending on the SQL configured on the database roles. + +```sql +-- Enable service account to create roles within DB +ALTER USER "" WITH CREATEROLE; +``` + +### Setup + +1. Enable the database secrets engine if it is not already enabled: + + ```shell-session + $ vault secrets enable database + Success! Enabled the database secrets engine at: database/ + ``` + + By default, the secrets engine will enable at the name of the engine. To + enable the secrets engine at a different path, use the `-path` argument. + +1. Configure Vault with the proper plugin and connection information. Here you can explicitly enable GCP IAM authentication + and use [Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc#how-to) to authenticate: + + ```shell-session + $ vault write database/config/my-postgresql-database \ + plugin_name="postgresql-database-plugin" \ + allowed_roles="my-role" \ + connection_url="host=project:us-west1:mydb user=test-user@project.iam dbname=postgres sslmode=disable" \ + auth_type="gcp_iam" + ``` + + You can also configure the connection and authenticate by directly passing in the service account credentials + as an encoded JSON string: + + ```shell-session + $ vault write database/config/my-postgresql-database \ + plugin_name="postgresql-database-plugin" \ + allowed_roles="my-role" \ + connection_url="host=project:region:instance user=test-user@project.iam dbname=postgres sslmode=disable" \ + auth_type="gcp_iam" \ + service_account_json="@my_credentials.json" + ``` + +Once the connection has been configured and IAM authentication is complete, the steps to set up a role and generate +credentials are the same as the ones listed above. diff --git a/website/content/docs/secrets/databases/redis.mdx b/website/content/docs/secrets/databases/redis.mdx index 9ac6b933d6fb..90ccd58d425a 100644 --- a/website/content/docs/secrets/databases/redis.mdx +++ b/website/content/docs/secrets/databases/redis.mdx @@ -42,7 +42,7 @@ more information about setting up the database secrets engine. host="localhost" \ port=6379 \ tls=true \ - ca_cert="$CACERT" + ca_cert="$CACERT" \ username="user" \ password="pass" \ allowed_roles="my-*-role"