diff --git a/.circleci/.gitattributes b/.circleci/.gitattributes new file mode 100644 index 000000000000..2dd06ee5f7cd --- /dev/null +++ b/.circleci/.gitattributes @@ -0,0 +1 @@ +config.yml linguist-generated diff --git a/.circleci/.gitignore b/.circleci/.gitignore new file mode 100644 index 000000000000..3018b3a68132 --- /dev/null +++ b/.circleci/.gitignore @@ -0,0 +1 @@ +.tmp/ diff --git a/.circleci/Makefile b/.circleci/Makefile new file mode 100644 index 000000000000..6d27afccb616 --- /dev/null +++ b/.circleci/Makefile @@ -0,0 +1,73 @@ +SHELL := /usr/bin/env bash +.SHELLFLAGS := -euo pipefail -c + +# CONFIG is the name of the make target someone +# would invoke to update the main config file (config.yml). +CONFIG ?= ci-config +# VERIFY is the name of the make target someone +# would invoke to verify the config file. +VERIFY ?= ci-verify + +CIRCLECI := circleci --skip-update-check + +CCI_INSTALL_LINK := https://circleci.com/docs/2.0/local-cli/\#installation +CCI_INSTALL_MSG := Please install CircleCI CLI. See $(CCI_INSTALL_LINK) +CCI_VERSION := $(shell $(CIRCLECI) version 2> /dev/null) +ifeq ($(CCI_VERSION),) +# Attempting to use the CLI fails with installation instructions. +CIRCLECI := echo '$(CCI_INSTALL_MSG)'; exit 1; \# +endif + +SOURCE_DIR := config +SOURCE_YML := $(shell [ ! -d $(SOURCE_DIR) ] || find $(SOURCE_DIR) -name '*.yml') +CONFIG_SOURCE := Makefile $(SOURCE_YML) | $(SOURCE_DIR) +OUT := config.yml +TMP := .tmp/config-processed +CONFIG_PACKED := .tmp/config-packed + +default: help + +help: + @echo "Usage:" + @echo " make $(CONFIG): recompile config.yml from $(SOURCE_DIR)/" + @echo " make $(VERIFY): verify that config.yml is a true mapping from $(SOURCE_DIR)/" + @echo + @echo "Diagnostics:" + @[ -z "$(CCI_VERSION)" ] || echo " circleci-cli version $(CCI_VERSION)" + @[ -n "$(CCI_VERSION)" ] || echo " $(CCI_INSTALL_MSG)" + +$(SOURCE_DIR): + @echo Source directory $(SOURCE_DIR)/ not found.; exit 1 + +# Make sure our .tmp dir exists. +$(shell [ -d .tmp ] || mkdir .tmp) + +.PHONY: $(CONFIG) +$(CONFIG): $(OUT) + +.PHONY: $(VERIFY) +$(VERIFY): config-up-to-date + @$(CIRCLECI) config validate $(OUT) + +GENERATED_FILE_HEADER := \#\#\# Generated by 'make $(CONFIG)' do not manually edit this file. +define GEN_CONFIG + @$(CIRCLECI) config pack $(SOURCE_DIR) > $(CONFIG_PACKED) + @echo "$(GENERATED_FILE_HEADER)" > $@ + @$(CIRCLECI) config process $(CONFIG_PACKED) >> $@ +endef + +$(OUT): $(CONFIG_SOURCE) + $(GEN_CONFIG) + @echo "$@ updated" + +$(TMP): $(CONFIG_SOURCE) + $(GEN_CONFIG) + +.PHONY: config-up-to-date +config-up-to-date: $(TMP) # Note this must not depend on $(OUT)! + @if diff config.yml $<; then \ + echo "Generated $(OUT) is up to date!"; \ + else \ + echo "Generated $(OUT) is out of date, run make $(CONFIG) to update."; \ + exit 1; \ + fi diff --git a/.circleci/README.md b/.circleci/README.md new file mode 100644 index 000000000000..ea7ed54a2b8b --- /dev/null +++ b/.circleci/README.md @@ -0,0 +1,117 @@ +# CircleCI config + +This directory contains both the source code (under `./config/`) +and the generated single-file `config.yml` +which defines the CircleCI workflows for this project. + +The Makefile in this directory generates the `./config.yml` +in CircleCI 2.0 syntax, +from the tree rooted at `./config/`, +which contains files in CircleCI 2.1 syntax. +CircleCI supports [generating a single config file from many], +using the `$ circleci config pack` command. +It also supports [expanding 2.1 syntax to 2.0 syntax] +using the `$ circleci config process` command. + +[generating a single config file from many]: https://circleci.com/docs/2.0/local-cli/#packing-a-config +[expanding 2.1 syntax to 2.0 syntax]: https://circleci.com/docs/2.0/local-cli/#processing-a-config + +## Prerequisites + +You will need the [CircleCI CLI tool] installed and working, +at least version `0.1.5607`. + +``` +$ circleci version +0.1.5607+f705856 +``` + +NOTE: It is recommended to [download this tool directly from GitHub Releases]. +Do not install it using Homebrew, as this version cannot be easily updated. +It is also not recommended to pipe curl to bash (which CircleCI recommend) for security reasons! + +[CircleCI CLI tool]: https://circleci.com/docs/2.0/local-cli/ +[download this tool directly from GitHub Releases]: https://github.com/CircleCI-Public/circleci-cli/releases + +## How to make changes + +Before making changes, be sure to understand the layout +of the `./config/` file tree, as well as circleci 2.1 syntax. +See the [Syntax and layout] section below. + +To update the config, you should edit, add or remove files +in the `./config/` directory, +and then run `make ci-config`. +If that's successful, +you should then commit every `*.yml` file in the tree rooted in this directory. +That is: you should commit both the source under `./config/` +and the generated file `./config.yml` at the same time, in the same commit. +Do not edit the `./config.yml` file directly, as you will lose your changes +next time `make ci-config` is run. + +[Syntax and layout]: #syntax-and-layout + +### Verifying `./config.yml` + +To check whether or not the current `./config.yml` is up to date with the source, +and whether it is valid, run `$ make ci-verify`. +Note that `$ make ci-verify` should be run in CI, +as well as by a local git commit hook, +to ensure we never commit files that are invalid or out of date. + +#### Example shell session + +```sh +$ make ci-config +config.yml updated +$ git add -A . # The -A makes sure to include deletions/renames etc. +$ git commit -m "ci: blah blah blah" +Changes detected in .circleci/, running 'make -C .circleci ci-verify' +--> Generated config.yml is up to date! +--> Config file at config.yml is valid. +``` + +### Syntax and layout + +It is important to understand the layout of the config directory. +Read the documentation on [packing a config] for a full understanding +of how multiple YAML files are merged by the circleci CLI tool. + +[packing a config]: https://circleci.com/docs/2.0/local-cli/#packing-a-config + +Here is an example file tree (with comments added afterwards): + +```sh +$ tree . +. +├── Makefile +├── README.md # This file. +├── config # The source code for config.yml is rooted here. +│   ├── @config.yml # Files beginning with @ are treated specially by `circleci config pack` +│   ├── commands # Subdirectories of config become top-level keys. +│   │   └── go_test.yml # Filenames (minus .yml) become top-level keys under their parent (in this case "commands"). +│ │ # The contents of go_test.yml therefore are placed at: .commands.go_test: +│   └── jobs # jobs also becomes a top-level key under config... +│   ├── build-go-dev.yml # ...and likewise filenames become keys under their parent. +│   ├── go-mod-download.yml +│   ├── install-ui-dependencies.yml +│   ├── test-go-race.yml +│   ├── test-go.yml +│   └── test-ui.yml +└── config.yml # The generated file in 2.0 syntax. +``` + +About those `@` files... Preceding a filename with `@` +indicates to `$ circleci config pack` that the contents of this YAML file +should be at the top-level, rather than underneath a key named after their filename. +This naming convention is unfortunate as it breaks autocompletion in bash, +but there we go. + +### Why not just use YAML references? + +YAML references only work within a single file, +this is because `circleci config pack` is not a text-level packer, +but rather stitches together the structures defined in each YAML +file according to certain rules. +Therefore it must parse each file separately, +and YAML references are handled by the parser. diff --git a/.circleci/config.yml b/.circleci/config.yml index b98fd7b85b45..d68832ddec33 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,203 +1,579 @@ +### Generated by 'make ci-config' do not manually edit this file. version: 2 - -references: - images: - go: &GOLANG_IMAGE golang:1.12.4-stretch # Pin Go to patch version (ex: 1.2.3) - node: &NODE_IMAGE node:10-stretch # Pin Node.js to major version (ex: 10) - - environment: &ENVIRONMENT - CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) - GO_VERSION: 1.12.4 # Pin Go to patch version (ex: 1.2.3) - GOTESTSUM_VERSION: 0.3.3 # Pin gotestsum to patch version (ex: 1.2.3) - jobs: - install-ui-dependencies: + pre-flight-checks: docker: - - image: *NODE_IMAGE - working_directory: /src/vault/ui + - image: circleci/buildpack-deps + environment: + - CCI_VERSION: 0.1.5691 + shell: /usr/bin/env bash -euo pipefail steps: - - checkout: - path: /src/vault - - restore_cache: - key: yarn-lock-{{ checksum "yarn.lock" }} - - run: - name: Install UI dependencies - command: | - set -eux -o pipefail - - yarn install --ignore-optional - npm rebuild node-sass - - save_cache: - key: yarn-lock-{{ checksum "yarn.lock" }} - paths: - - node_modules - - persist_to_workspace: - root: .. - paths: - - ui/node_modules - - go-mod-vendor: + - checkout + - run: + command: | + export CCI_PATH=/tmp/circleci-cli/$CCI_VERSION + mkdir -p $CCI_PATH + NAME=circleci-cli_${CCI_VERSION}_${ARCH} + URL=$BASE/v${CCI_VERSION}/${NAME}.tar.gz + curl -sSL $URL \ + | tar --overwrite --strip-components=1 -xz -C $CCI_PATH "${NAME}/circleci" + # Add circleci to the path for subsequent steps. + echo "export PATH=$CCI_PATH:\$PATH" >> $BASH_ENV + # Done, print some debug info. + set -x + . $BASH_ENV + which circleci + circleci version + environment: + ARCH: linux_amd64 + BASE: https://github.com/CircleCI-Public/circleci-cli/releases/download + name: Install CircleCI CLI + - run: + command: make ci-verify + install-ui-dependencies: docker: - - image: *GOLANG_IMAGE - working_directory: /go/src/github.com/hashicorp/vault + - image: node:10-stretch + working_directory: /src steps: - - checkout - - restore_cache: - key: go-vendor-modules-v1-{{ checksum "vendor/modules.txt" }} - - run: - name: Fix git url config - command: git config --local url."git@github.com:".insteadof https://github.com/ - - run: - name: Check go mod vendor - command: | - GO111MODULE=on go mod vendor - out=$(git status vendor --porcelain) - if [ "$out" != "" ] ; then - echo "'go mod vendor' was not clean! Please check go modules for updates (notably api and sdk)" - echo "output was:" - echo "$out" - exit 1 - fi - - save_cache: - key: go-vendor-modules-v1-{{ checksum "vendor/modules.txt" }} - paths: - - /root/.cache/go-build + - checkout + - restore_cache: + key: yarn-lock-v1-{{ checksum "ui/yarn.lock" }} + - run: + command: | + set -eux -o pipefail + cd ui + yarn install --ignore-optional + npm rebuild node-sass + name: Install UI dependencies + - save_cache: + key: yarn-lock-v1-{{ checksum "ui/yarn.lock" }} + paths: + - ui/node_modules + go-mod-download: + docker: + - image: golang:1.12.4-stretch + working_directory: /src + steps: + - add_ssh_keys: + fingerprints: + - c6:96:98:82:dc:04:6c:39:dd:ac:83:05:e3:15:1c:98 + - checkout + - restore_cache: + key: go-sum-v1-{{ checksum "go.sum" }} + - run: + command: go mod download + name: Download Go modules + - run: + command: go mod verify + name: Verify checksums of Go modules + - save_cache: + key: go-sum-v1-{{ checksum "go.sum" }} + paths: + - /go/pkg/mod build-go-dev: docker: - - image: *GOLANG_IMAGE - working_directory: /go/src/github.com/hashicorp/vault + - image: golang:1.12.4-stretch + working_directory: /src steps: - - checkout - - attach_workspace: - at: . - - run: - name: Build dev binary - command: | - set -eux -o pipefail - - # Move dev UI assets to expected location - rm -rf ./pkg - mkdir ./pkg - - # Build dev binary - make bootstrap dev - - persist_to_workspace: - root: . - paths: - - bin + - checkout + - restore_cache: + key: go-sum-v1-{{ checksum "go.sum" }} + - attach_workspace: + at: . + - run: + command: | + set -eux -o pipefail + # Move dev UI assets to expected location + rm -rf ./pkg + mkdir ./pkg + + # Build dev binary + make bootstrap dev + name: Build dev binary + - persist_to_workspace: + paths: + - bin + root: . test-ui: docker: - - image: *NODE_IMAGE - working_directory: /src/vault/ui + - image: node:10-stretch + working_directory: /src resource_class: medium+ steps: - - checkout: - path: /src/vault - - attach_workspace: - at: .. - - run: - name: Test UI - command: | - set -eux -o pipefail - - # Install Chrome - wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub \ - | apt-key add - - echo "deb http://dl.google.com/linux/chrome/deb/ stable main" \ - | tee /etc/apt/sources.list.d/google-chrome.list - apt-get update - apt-get -y install google-chrome-stable - rm /etc/apt/sources.list.d/google-chrome.list - rm -rf /var/lib/apt/lists/* /var/cache/apt/* - - # Add ./bin to the PATH so vault binary can be run by Ember tests - export PATH="${PWD}"/../bin:${PATH} - - # Run Ember tests - mkdir -p test-results/qunit - yarn run test-oss - - store_artifacts: - path: test-results - - store_test_results: - path: test-results + - checkout + - restore_cache: + key: yarn-lock-v1-{{ checksum "ui/yarn.lock" }} + - attach_workspace: + at: . + - run: + command: | + set -eux -o pipefail + + # Install Chrome + wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub \ + | apt-key add - + echo "deb http://dl.google.com/linux/chrome/deb/ stable main" \ + | tee /etc/apt/sources.list.d/google-chrome.list + apt-get update + apt-get -y install google-chrome-stable + rm /etc/apt/sources.list.d/google-chrome.list + rm -rf /var/lib/apt/lists/* /var/cache/apt/* + # Add ./bin to the PATH so vault binary can be run by Ember tests + export PATH="${PWD}/bin:${PATH}" + + # Run Ember tests + cd ui + mkdir -p test-results/qunit + yarn run test-oss + name: Test UI + - store_artifacts: + path: ui/test-results + - store_test_results: + path: ui/test-results test-go: machine: true - environment: - <<: *ENVIRONMENT - GO_TAGS: + working_directory: ~/src parallelism: 2 - working_directory: ~/go/src/github.com/hashicorp/vault steps: - - checkout - - attach_workspace: - at: . - - run: - name: Run Go tests - command: | - set -eux -o pipefail - - # Install Go - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - export GOPATH="${HOME}/go" - export PATH="${PATH}:${GOPATH}/bin:/usr/local/go/bin" - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - # Split Go tests by prior test times - package_names=$(go list \ - -tags "${GO_TAGS}" \ - ./... \ - | grep -v /vendor/ \ - | sort \ - | circleci tests split --split-by=timings --timings-type=classname) - - # Install gotestsum - curl -sSL "https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz -C /usr/local/bin gotestsum - - # Run tests - make prep - mkdir -p test-results/go-test - CGO_ENABLED= \ - VAULT_ADDR= \ - VAULT_TOKEN= \ - VAULT_DEV_ROOT_TOKEN_ID= \ - VAULT_ACC= \ - gotestsum --format=short-verbose --junitfile test-results/go-test/results.xml -- \ - -tags "${GO_TAGS}" \ - -timeout=40m \ - -parallel=20 \ - ${package_names} - - store_artifacts: - path: test-results - - store_test_results: - path: test-results + - checkout + - run: + command: | + set -eux -o pipefail -workflows: - version: 2 + sudo mkdir /go + sudo chown -R circleci:circleci /go + name: Allow circleci user to restore Go modules cache + - restore_cache: + key: go-sum-v1-{{ checksum "go.sum" }} + - run: + command: | + set -eux -o pipefail + + # Install Go + curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" + sudo rm -rf /usr/local/go + sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" + rm -f "go${GO_VERSION}.linux-amd64.tar.gz" + export GOPATH=/go + export PATH="${PATH}:${GOPATH}/bin:/usr/local/go/bin" + + # Install CircleCI CLI + curl -sSL \ + "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ + | sudo tar --overwrite -xz \ + -C /usr/local/bin \ + "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" + # Split Go tests by prior test times + package_names=$(go list \ + -tags "${GO_TAGS}" \ + ./... \ + | grep -v /integ \ + | grep -v /vendor/ \ + | sort \ + | circleci tests split --split-by=timings --timings-type=classname) + + # Install gotestsum + curl -sSL "https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_linux_amd64.tar.gz" \ + | sudo tar --overwrite -xz -C /usr/local/bin gotestsum + + # Run tests + make prep + mkdir -p test-results/go-test + CGO_ENABLED= \ + VAULT_ADDR= \ + VAULT_TOKEN= \ + VAULT_DEV_ROOT_TOKEN_ID= \ + VAULT_ACC= \ + gotestsum --format=short-verbose --junitfile test-results/go-test/results.xml -- \ + -tags "${GO_TAGS}" \ + -timeout=40m \ + -parallel=20 \ + \ + ${package_names} + name: Run Go tests + no_output_timeout: 20m + - store_artifacts: + path: test-results + - store_test_results: + path: test-results + environment: + - CIRCLECI_CLI_VERSION: 0.1.5546 + - GO_TAGS: null + - GO_VERSION: 1.12.4 + - GOTESTSUM_VERSION: 0.3.3 + test-go-race: + machine: true + working_directory: ~/src + steps: + - checkout + - run: + command: | + set -eux -o pipefail + + sudo mkdir /go + sudo chown -R circleci:circleci /go + name: Allow circleci user to restore Go modules cache + - restore_cache: + key: go-sum-v1-{{ checksum "go.sum" }} + - run: + command: | + set -eux -o pipefail + + # Install Go + curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" + sudo rm -rf /usr/local/go + sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" + rm -f "go${GO_VERSION}.linux-amd64.tar.gz" + export GOPATH=/go + export PATH="${PATH}:${GOPATH}/bin:/usr/local/go/bin" + + # Install CircleCI CLI + curl -sSL \ + "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ + | sudo tar --overwrite -xz \ + -C /usr/local/bin \ + "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" + + # Split Go tests by prior test times + package_names=$(go list \ + -tags "${GO_TAGS}" \ + ./... \ + | grep -v /integ \ + | grep -v /vendor/ \ + | sort \ + | circleci tests split --split-by=timings --timings-type=classname) + + # Install gotestsum + curl -sSL "https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_linux_amd64.tar.gz" \ + | sudo tar --overwrite -xz -C /usr/local/bin gotestsum + + # Run tests + make prep + mkdir -p test-results/go-test + CGO_ENABLED= \ + VAULT_ADDR= \ + VAULT_TOKEN= \ + VAULT_DEV_ROOT_TOKEN_ID= \ + VAULT_ACC= \ + gotestsum --format=short-verbose --junitfile test-results/go-test/results.xml -- \ + -tags "${GO_TAGS}" \ + -timeout=40m \ + -parallel=20 \ + -race \ + ${package_names} + name: Run Go tests + no_output_timeout: 20m + - store_artifacts: + path: test-results + - store_test_results: + path: test-results + environment: + - CIRCLECI_CLI_VERSION: 0.1.5546 + - GO_TAGS: null + - GO_VERSION: 1.12.4 + - GOTESTSUM_VERSION: 0.3.3 +workflows: ci: jobs: - - install-ui-dependencies - - go-mod-vendor - - build-go-dev: - requires: - - go-mod-vendor - - test-ui: - requires: - - install-ui-dependencies - - build-go-dev - - test-go: - requires: - - build-go-dev + - pre-flight-checks + - install-ui-dependencies: + requires: + - pre-flight-checks + - go-mod-download: + requires: + - pre-flight-checks + - build-go-dev: + requires: + - go-mod-download + - test-ui: + requires: + - install-ui-dependencies + - build-go-dev + - test-go: + requires: + - build-go-dev + - test-go-race: + requires: + - build-go-dev + version: 2 + +# Original config.yml file: +# commands: +# go_test: +# description: run go tests +# parameters: +# extra_flags: +# default: \"\" +# type: string +# steps: +# - run: +# command: | +# set -eux -o pipefail +# +# # Install Go +# curl -sSLO \"https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz\" +# sudo rm -rf /usr/local/go +# sudo tar -C /usr/local -xzf \"go${GO_VERSION}.linux-amd64.tar.gz\" +# rm -f \"go${GO_VERSION}.linux-amd64.tar.gz\" +# export GOPATH=/go +# export PATH=\"${PATH}:${GOPATH}/bin:/usr/local/go/bin\" +# +# # Install CircleCI CLI +# curl -sSL \\ +# \"https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz\" \\ +# | sudo tar --overwrite -xz \\ +# -C /usr/local/bin \\ +# \"circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci\" +# +# # Split Go tests by prior test times +# package_names=$(go list \\ +# -tags \"${GO_TAGS}\" \\ +# ./... \\ +# | grep -v /integ \\ +# | grep -v /vendor/ \\ +# | sort \\ +# | circleci tests split --split-by=timings --timings-type=classname) +# +# # Install gotestsum +# curl -sSL \"https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_linux_amd64.tar.gz\" \\ +# | sudo tar --overwrite -xz -C /usr/local/bin gotestsum +# +# # Run tests +# make prep +# mkdir -p test-results/go-test +# CGO_ENABLED= \\ +# VAULT_ADDR= \\ +# VAULT_TOKEN= \\ +# VAULT_DEV_ROOT_TOKEN_ID= \\ +# VAULT_ACC= \\ +# gotestsum --format=short-verbose --junitfile test-results/go-test/results.xml -- \\ +# -tags \"${GO_TAGS}\" \\ +# -timeout=40m \\ +# -parallel=20 \\ +# << parameters.extra_flags >> \\ +# ${package_names} +# name: Run Go tests +# no_output_timeout: 20m +# restore_go_cache: +# steps: +# - restore_cache: +# key: go-sum-v1-{{ checksum \"go.sum\" }} +# restore_yarn_cache: +# steps: +# - restore_cache: +# key: yarn-lock-v1-{{ checksum \"ui/yarn.lock\" }} +# save_go_cache: +# steps: +# - save_cache: +# key: go-sum-v1-{{ checksum \"go.sum\" }} +# paths: +# - /go/pkg/mod +# save_yarn_cache: +# steps: +# - save_cache: +# key: yarn-lock-v1-{{ checksum \"ui/yarn.lock\" }} +# paths: +# - ui/node_modules +# executors: +# go: +# docker: +# - image: golang:1.12.4-stretch +# working_directory: /src +# go-machine: +# environment: +# CIRCLECI_CLI_VERSION: 0.1.5546 +# GO_TAGS: null +# GO_VERSION: 1.12.4 +# GOTESTSUM_VERSION: 0.3.3 +# machine: true +# working_directory: ~/src +# node: +# docker: +# - image: node:10-stretch +# working_directory: /src +# jobs: +# build-go-dev: +# executor: go +# steps: +# - checkout +# - restore_go_cache +# - attach_workspace: +# at: . +# - run: +# command: | +# set -eux -o pipefail +# +# # Move dev UI assets to expected location +# rm -rf ./pkg +# mkdir ./pkg +# +# # Build dev binary +# make bootstrap dev +# name: Build dev binary +# - persist_to_workspace: +# paths: +# - bin +# root: . +# go-mod-download: +# executor: go +# steps: +# - add_ssh_keys: +# fingerprints: +# - c6:96:98:82:dc:04:6c:39:dd:ac:83:05:e3:15:1c:98 +# - checkout +# - restore_go_cache +# - run: +# command: go mod download +# name: Download Go modules +# - run: +# command: go mod verify +# name: Verify checksums of Go modules +# - save_go_cache +# install-ui-dependencies: +# executor: node +# steps: +# - checkout +# - restore_yarn_cache +# - run: +# command: | +# set -eux -o pipefail +# +# cd ui +# yarn install --ignore-optional +# npm rebuild node-sass +# name: Install UI dependencies +# - save_yarn_cache +# pre-flight-checks: +# description: Ensures nothing obvious is broken for faster failures. +# docker: +# - image: circleci/buildpack-deps +# environment: +# CCI_VERSION: 0.1.5691 +# shell: /usr/bin/env bash -euo pipefail +# steps: +# - checkout +# - run: +# command: | +# export CCI_PATH=/tmp/circleci-cli/$CCI_VERSION +# mkdir -p $CCI_PATH +# NAME=circleci-cli_${CCI_VERSION}_${ARCH} +# URL=$BASE/v${CCI_VERSION}/${NAME}.tar.gz +# curl -sSL $URL \\ +# | tar --overwrite --strip-components=1 -xz -C $CCI_PATH \"${NAME}/circleci\" +# # Add circleci to the path for subsequent steps. +# echo \"export PATH=$CCI_PATH:\\$PATH\" >> $BASH_ENV +# # Done, print some debug info. +# set -x +# . $BASH_ENV +# which circleci +# circleci version +# environment: +# ARCH: linux_amd64 +# BASE: https://github.com/CircleCI-Public/circleci-cli/releases/download +# name: Install CircleCI CLI +# - run: make ci-verify +# test-go: +# executor: go-machine +# parallelism: 2 +# steps: +# - checkout +# - run: +# command: | +# set -eux -o pipefail +# +# sudo mkdir /go +# sudo chown -R circleci:circleci /go +# name: Allow circleci user to restore Go modules cache +# - restore_go_cache +# - go_test +# - store_artifacts: +# path: test-results +# - store_test_results: +# path: test-results +# test-go-race: +# executor: go-machine +# steps: +# - checkout +# - run: +# command: | +# set -eux -o pipefail +# +# sudo mkdir /go +# sudo chown -R circleci:circleci /go +# name: Allow circleci user to restore Go modules cache +# - restore_go_cache +# - go_test: +# extra_flags: -race +# - store_artifacts: +# path: test-results +# - store_test_results: +# path: test-results +# test-ui: +# executor: node +# resource_class: medium+ +# steps: +# - checkout +# - restore_yarn_cache +# - attach_workspace: +# at: . +# - run: +# command: | +# set -eux -o pipefail +# +# # Install Chrome +# wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub \\ +# | apt-key add - +# echo \"deb http://dl.google.com/linux/chrome/deb/ stable main\" \\ +# | tee /etc/apt/sources.list.d/google-chrome.list +# apt-get update +# apt-get -y install google-chrome-stable +# rm /etc/apt/sources.list.d/google-chrome.list +# rm -rf /var/lib/apt/lists/* /var/cache/apt/* +# +# # Add ./bin to the PATH so vault binary can be run by Ember tests +# export PATH=\"${PWD}/bin:${PATH}\" +# +# # Run Ember tests +# cd ui +# mkdir -p test-results/qunit +# yarn run test-oss +# name: Test UI +# - store_artifacts: +# path: ui/test-results +# - store_test_results: +# path: ui/test-results +# references: +# cache: +# go-sum: go-sum-v1-{{ checksum \"go.sum\" }} +# yarn-lock: yarn-lock-v1-{{ checksum \"ui/yarn.lock\" }} +# images: +# go: golang:1.12.4-stretch +# node: node:10-stretch +# version: 2.1 +# workflows: +# ci: +# jobs: +# - pre-flight-checks +# - install-ui-dependencies: +# requires: +# - pre-flight-checks +# - go-mod-download: +# requires: +# - pre-flight-checks +# - build-go-dev: +# requires: +# - go-mod-download +# - test-ui: +# requires: +# - install-ui-dependencies +# - build-go-dev +# - test-go: +# requires: +# - build-go-dev +# - test-go-race: +# requires: +# - build-go-dev \ No newline at end of file diff --git a/.circleci/config/@config.yml b/.circleci/config/@config.yml new file mode 100644 index 000000000000..c4ef96b68b7f --- /dev/null +++ b/.circleci/config/@config.yml @@ -0,0 +1,53 @@ +--- +version: 2.1 + +references: + images: + go: &GOLANG_IMAGE golang:1.12.4-stretch # Pin Go to patch version (ex: 1.2.3) + node: &NODE_IMAGE node:10-stretch # Pin Node.js to major version (ex: 10) + + cache: + go-sum: &GO_SUM_CACHE_KEY go-sum-v1-{{ checksum "go.sum" }} + yarn-lock: &YARN_LOCK_CACHE_KEY yarn-lock-v1-{{ checksum "ui/yarn.lock" }} + +# more commands defined in commands/ +commands: + restore_yarn_cache: + steps: + - restore_cache: + key: *YARN_LOCK_CACHE_KEY + save_yarn_cache: + steps: + - save_cache: + key: *YARN_LOCK_CACHE_KEY + paths: + - ui/node_modules + restore_go_cache: + steps: + - restore_cache: + key: *GO_SUM_CACHE_KEY + save_go_cache: + steps: + - save_cache: + key: *GO_SUM_CACHE_KEY + paths: + - /go/pkg/mod + +executors: + go: + docker: + - image: *GOLANG_IMAGE + working_directory: /src + go-machine: + machine: true + environment: + CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) + GO_VERSION: 1.12.4 # Pin Go to patch version (ex: 1.2.3) + GOTESTSUM_VERSION: 0.3.3 # Pin gotestsum to patch version (ex: 1.2.3) + GO_TAGS: + working_directory: ~/src + node: + docker: + - image: *NODE_IMAGE + working_directory: /src + diff --git a/.circleci/config/commands/go_test.yml b/.circleci/config/commands/go_test.yml new file mode 100644 index 000000000000..bfae0c3d69e4 --- /dev/null +++ b/.circleci/config/commands/go_test.yml @@ -0,0 +1,55 @@ +description: run go tests +parameters: + extra_flags: + type: string + default: "" +steps: + - run: + name: Run Go tests + no_output_timeout: 20m + command: | + set -eux -o pipefail + + # Install Go + curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" + sudo rm -rf /usr/local/go + sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" + rm -f "go${GO_VERSION}.linux-amd64.tar.gz" + export GOPATH=/go + export PATH="${PATH}:${GOPATH}/bin:/usr/local/go/bin" + + # Install CircleCI CLI + curl -sSL \ + "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ + | sudo tar --overwrite -xz \ + -C /usr/local/bin \ + "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" + + # Split Go tests by prior test times + package_names=$(go list \ + -tags "${GO_TAGS}" \ + ./... \ + | grep -v /integ \ + | grep -v /vendor/ \ + | sort \ + | circleci tests split --split-by=timings --timings-type=classname) + + # Install gotestsum + curl -sSL "https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_linux_amd64.tar.gz" \ + | sudo tar --overwrite -xz -C /usr/local/bin gotestsum + + # Run tests + make prep + mkdir -p test-results/go-test + CGO_ENABLED= \ + VAULT_ADDR= \ + VAULT_TOKEN= \ + VAULT_DEV_ROOT_TOKEN_ID= \ + VAULT_ACC= \ + gotestsum --format=short-verbose --junitfile test-results/go-test/results.xml -- \ + -tags "${GO_TAGS}" \ + -timeout=40m \ + -parallel=20 \ + << parameters.extra_flags >> \ + ${package_names} + diff --git a/.circleci/config/jobs/build-go-dev.yml b/.circleci/config/jobs/build-go-dev.yml new file mode 100644 index 000000000000..59729bc69883 --- /dev/null +++ b/.circleci/config/jobs/build-go-dev.yml @@ -0,0 +1,21 @@ +executor: go +steps: + - checkout + - restore_go_cache + - attach_workspace: + at: . + - run: + name: Build dev binary + command: | + set -eux -o pipefail + + # Move dev UI assets to expected location + rm -rf ./pkg + mkdir ./pkg + + # Build dev binary + make bootstrap dev + - persist_to_workspace: + root: . + paths: + - bin diff --git a/.circleci/config/jobs/go-mod-download.yml b/.circleci/config/jobs/go-mod-download.yml new file mode 100644 index 000000000000..adfaf0ad8043 --- /dev/null +++ b/.circleci/config/jobs/go-mod-download.yml @@ -0,0 +1,15 @@ +executor: go +steps: + - add_ssh_keys: + fingerprints: + # "CircleCI SSH Checkout" SSH key associated with hashicorp-ci GitHub user + - "c6:96:98:82:dc:04:6c:39:dd:ac:83:05:e3:15:1c:98" + - checkout + - restore_go_cache + - run: + name: Download Go modules + command: go mod download + - run: + name: Verify checksums of Go modules + command: go mod verify + - save_go_cache diff --git a/.circleci/config/jobs/install-ui-dependencies.yml b/.circleci/config/jobs/install-ui-dependencies.yml new file mode 100644 index 000000000000..2b04e176bb59 --- /dev/null +++ b/.circleci/config/jobs/install-ui-dependencies.yml @@ -0,0 +1,13 @@ +executor: node +steps: + - checkout + - restore_yarn_cache + - run: + name: Install UI dependencies + command: | + set -eux -o pipefail + + cd ui + yarn install --ignore-optional + npm rebuild node-sass + - save_yarn_cache diff --git a/.circleci/config/jobs/pre-flight-checks.yml b/.circleci/config/jobs/pre-flight-checks.yml new file mode 100644 index 000000000000..bbb44b092e19 --- /dev/null +++ b/.circleci/config/jobs/pre-flight-checks.yml @@ -0,0 +1,28 @@ +description: Ensures nothing obvious is broken for faster failures. +docker: + - image: circleci/buildpack-deps +shell: /usr/bin/env bash -euo pipefail +environment: + CCI_VERSION: 0.1.5691 +steps: + - checkout + - run: + name: Install CircleCI CLI + environment: + ARCH: linux_amd64 + BASE: https://github.com/CircleCI-Public/circleci-cli/releases/download + command: | + export CCI_PATH=/tmp/circleci-cli/$CCI_VERSION + mkdir -p $CCI_PATH + NAME=circleci-cli_${CCI_VERSION}_${ARCH} + URL=$BASE/v${CCI_VERSION}/${NAME}.tar.gz + curl -sSL $URL \ + | tar --overwrite --strip-components=1 -xz -C $CCI_PATH "${NAME}/circleci" + # Add circleci to the path for subsequent steps. + echo "export PATH=$CCI_PATH:\$PATH" >> $BASH_ENV + # Done, print some debug info. + set -x + . $BASH_ENV + which circleci + circleci version + - run: make ci-verify diff --git a/.circleci/config/jobs/test-go-race.yml b/.circleci/config/jobs/test-go-race.yml new file mode 100644 index 000000000000..df16fc616441 --- /dev/null +++ b/.circleci/config/jobs/test-go-race.yml @@ -0,0 +1,17 @@ +executor: go-machine +steps: + - checkout + - run: + name: Allow circleci user to restore Go modules cache + command: | + set -eux -o pipefail + + sudo mkdir /go + sudo chown -R circleci:circleci /go + - restore_go_cache + - go_test: + extra_flags: "-race" + - store_artifacts: + path: test-results + - store_test_results: + path: test-results diff --git a/.circleci/config/jobs/test-go.yml b/.circleci/config/jobs/test-go.yml new file mode 100644 index 000000000000..031f7bc24993 --- /dev/null +++ b/.circleci/config/jobs/test-go.yml @@ -0,0 +1,17 @@ +executor: go-machine +parallelism: 2 +steps: + - checkout + - run: + name: Allow circleci user to restore Go modules cache + command: | + set -eux -o pipefail + + sudo mkdir /go + sudo chown -R circleci:circleci /go + - restore_go_cache + - go_test + - store_artifacts: + path: test-results + - store_test_results: + path: test-results diff --git a/.circleci/config/jobs/test-ui.yml b/.circleci/config/jobs/test-ui.yml new file mode 100644 index 000000000000..813f800d20e2 --- /dev/null +++ b/.circleci/config/jobs/test-ui.yml @@ -0,0 +1,33 @@ +executor: node +resource_class: medium+ +steps: + - checkout + - restore_yarn_cache + - attach_workspace: + at: . + - run: + name: Test UI + command: | + set -eux -o pipefail + + # Install Chrome + wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub \ + | apt-key add - + echo "deb http://dl.google.com/linux/chrome/deb/ stable main" \ + | tee /etc/apt/sources.list.d/google-chrome.list + apt-get update + apt-get -y install google-chrome-stable + rm /etc/apt/sources.list.d/google-chrome.list + rm -rf /var/lib/apt/lists/* /var/cache/apt/* + + # Add ./bin to the PATH so vault binary can be run by Ember tests + export PATH="${PWD}/bin:${PATH}" + + # Run Ember tests + cd ui + mkdir -p test-results/qunit + yarn run test-oss + - store_artifacts: + path: ui/test-results + - store_test_results: + path: ui/test-results diff --git a/.circleci/config/workflows/ci.yml b/.circleci/config/workflows/ci.yml new file mode 100644 index 000000000000..2d4f65f2f5ba --- /dev/null +++ b/.circleci/config/workflows/ci.yml @@ -0,0 +1,21 @@ +jobs: + - pre-flight-checks + - install-ui-dependencies: + requires: + - pre-flight-checks + - go-mod-download: + requires: + - pre-flight-checks + - build-go-dev: + requires: + - go-mod-download + - test-ui: + requires: + - install-ui-dependencies + - build-go-dev + - test-go: + requires: + - build-go-dev + - test-go-race: + requires: + - build-go-dev diff --git a/.gitignore b/.gitignore index fdeb251eb12b..7e29834e2e17 100644 --- a/.gitignore +++ b/.gitignore @@ -93,7 +93,6 @@ ui/vault-ui-integration-server.pid # for building static assets node_modules -package-lock.json # Website website/.bundle diff --git a/.hooks/pre-commit b/.hooks/pre-commit new file mode 100755 index 000000000000..17309e55a9d7 --- /dev/null +++ b/.hooks/pre-commit @@ -0,0 +1,144 @@ +#!/usr/bin/env bash + +# READ THIS BEFORE MAKING CHANGES: +# +# If you want to add a new pre-commit check, here are the rules: +# +# 1. Create a bash function for your check (see e.g. ui_lint below). +# NOTE: Each function will be called in a sub-shell so you can freely +# change directory without worrying about interference. +# 2. Add the name of the function to the CHECKS variable. +# 3. If no changes relevant to your new check are staged, then +# do not output anything at all - this would be annoying noise. +# In this case, call 'return 0' from your check function to return +# early without blocking the commit. +# 4. If any non-trivial check-specific thing has to be invoked, +# then output '==> [check description]' as the first line of +# output. Each sub-check should output '--> [subcheck description]' +# after it has run, indicating success or failure. +# 5. Call 'block [reason]' to block the commit. This ensures the last +# line of output calls out that the commit was blocked - which may not +# be obvious from random error messages generated in 4. +# +# At the moment, there are no automated tests for this hook, so please run it +# locally to check you have not broken anything - breaking this will interfere +# with other peoples' workflows significantly, so be sure, check everything twice. + +set -euo pipefail + +# Call block to block the commit with a message. +block() { + echo "$@" + echo "Commit blocked - see errors above." + exit 1 +} + +# Add all check functions to this space separated list. +# They are executed in this order (see end of file). +CHECKS="ui_lint circleci_verify" + +MIN_CIRCLECI_VERSION=0.1.5575 + +# Run ui linter if changes in that dir detected. +ui_lint() { + local DIR=ui LINTER=node_modules/.bin/lint-staged + + # Silently succeed if no changes staged for $DIR + if git diff --name-only --cached --exit-code -- $DIR/; then + return 0 + fi + + # Silently succeed if the linter has not been installed. + # We assume that if you're doing UI dev, you will have installed the linter + # by running yarn. + if [ ! -x $DIR/$LINTER ]; then + return 0 + fi + + echo "==> Changes detected in $DIR/: Running linter..." + + # Run the linter from the UI dir. + cd $DIR + $LINTER || block "UI lint failed" +} + +# Check .circleci/config.yml is up to date and valid, and that all changes are +# included together in this commit. +circleci_verify() { + # Change to the root dir of the repo. + cd "$(git rev-parse --show-toplevel)" + + # Fail early if we accidentally used '.yaml' instead of '.yml' + if ! git diff --name-only --cached --exit-code -- '.circleci/***.yaml'; then + # This is just for consistency, as I keep making this mistake - Sam. + block "ERROR: File(s) with .yaml extension detected. Please rename them .yml instead." + fi + + # Succeed early if no changes to yml files in .circleci/ are currently staged. + # make ci-verify is slow so we really don't want to run it unnecessarily. + if git diff --name-only --cached --exit-code -- '.circleci/***.yml'; then + return 0 + fi + # Make sure to add no explicit output before this line, as it would just be noise + # for those making non-circleci changes. + echo "==> Verifying config changes in .circleci/" + echo "--> OK: All files are .yml not .yaml" + + # Ensure commit includes _all_ files in .circleci/ + # So not only are the files up to date, but we are also committing them in one go. + if ! git diff --name-only --exit-code -- '.circleci/***.yml'; then + echo "ERROR: Some .yml diffs in .circleci/ are staged, others not." + block "Please commit the entire .circleci/ directory together, or omit it altogether." + fi + + echo "--> OK: All .yml files in .circleci are staged." + + if ! REASON=$(check_circleci_cli_version); then + echo "*** WARNING: Unable to verify changes in .circleci/:" + echo "--> $REASON" + # We let this pass if there is no valid circleci version installed. + return 0 + fi + + if ! make -C .circleci ci-verify; then + block "ERROR: make ci-verify failed" + fi + + echo "--> OK: make ci-verify succeeded." +} + +check_circleci_cli_version() { + if ! command -v circleci > /dev/null 2>&1; then + echo "circleci cli not installed." + return 1 + fi + + CCI="circleci --skip-update-check" + + if ! THIS_VERSION=$($CCI version) > /dev/null 2>&1; then + # Guards against very old versions that do not have --skip-update-check. + echo "The installed circleci cli is too old. Please upgrade to at least $MIN_CIRCLECI_VERSION." + return 1 + fi + + # SORTED_MIN is the lower of the THIS_VERSION and MIN_CIRCLECI_VERSION. + if ! SORTED_MIN="$(printf "%s\n%s" "$MIN_CIRCLECI_VERSION" "$THIS_VERSION" | sort -V | head -n1)"; then + echo "Failed to sort versions. Please open an issue to report this." + return 1 + fi + + if [ "$THIS_VERSION" != "${THIS_VERSION#$MIN_CIRCLECI_VERSION}" ]; then + return 0 # OK - Versions have the same prefix, so we consider them equal. + elif [ "$SORTED_MIN" = "$MIN_CIRCLECI_VERSION" ]; then + return 0 # OK - MIN_CIRCLECI_VERSION is lower than THIS_VERSION. + fi + + # Version too low. + echo "The installed circleci cli v$THIS_VERSION is too old. Please upgrade to at least $MIN_CIRCLECI_VERSION" + return 1 +} + +for CHECK in $CHECKS; do + # Force each check into a subshell to avoid crosstalk. + ( $CHECK ) || exit $? +done diff --git a/.travis.yml b/.travis.yml index 1a6a61165882..5219a17a7bf6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -39,7 +39,7 @@ branches: env: - TEST_COMMAND='make dev test-ember' - - TEST_COMMAND='make dev ember-ci-test' + - TEST_COMMAND='make dev test-ui-browserstack' - TEST_COMMAND='travis_wait 75 make testtravis' - TEST_COMMAND='travis_wait 75 make testracetravis' - GO111MODULE=on diff --git a/CHANGELOG.md b/CHANGELOG.md index b6e3e84ced38..721af0ba0fec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,35 +1,202 @@ -## Next +## 1.2-beta2 (Unreleased) CHANGES: - * autoseal/aws: The user-configured regions on the AWSKMS seal stanza - will now be preferred over regions set in the enclosing environment. - This is a _breaking_ change. + * auth/approle: AppRole uses new, common token fields for values that overlap + with other auth backends. `period` and `policies` will continue to work, + with priority being given to the `token_` prefixed versions of those + parameters. They will also be returned when doing a read on the role if they + were used to provide values initially. + * auth/approle: `"default"` is no longer automatically added to the `policies` + parameter. This was a no-op since it would always be added anyways by + Vault's core; however, this can now be explicitly disabled with the new + `token_no_default_policy` field. + * auth/approle: `bound_cidr_list` is no longer returned when reading a role + + FEATURES: + + * **Vault API explorer**: The Vault UI now includes an embedded API explorer + where you can browse the endpoints avaliable to you and make requests. To try + it out, open the Web CLI and type `api`. + +IMPROVEMENTS: + + * agent: Allow EC2 nonce to be passed in [GH-6953] + * agent: Add optional `namespace` parameter, which sets the default namespace + for the auto-auth functionality [GH-6988] + * audit/file: Dramatically speed up file operations by changing + locking/marshaling order [GH-7024] + * auth/token: Allow the support of the identity system for the token backend + via token roles [GH-6267] + * cli: `path-help` now allows `-format=json` to be specified, which will + output OpenAPI [GH-7006] + * secrets/kv: Add optional `delete_version_after` parameter, which takes a + duration and can be set on the mount and/or the metadata for a specific key + [GH-7005] + +## 1.2-beta1 (June 25th, 2019) + +CHANGES: + + * auth/token: Token store roles use new, common token fields for the values + that overlap with other auth backends. `period`, `explicit_max_ttl`, and + `bound_cidrs` will continue to work, with priority being given to the + `token_` prefixed versions of those parameters. They will also be returned + when doing a read on the role if they were used to provide values initially; + however, in Vault 1.4 if `period` or `explicit_max_ttl` is zero they will no + longer be returned. (`explicit_max_ttl` was already not returned if empty.) + * Due to underlying changes in Go version 1.12 and Go > 1.11.5, Vault is now + stricter about what characters it will accept in path names. Whereas before + it would filter out unprintable characters (and this could be turned off), + control characters and other invalid characters are now rejected within Go's + HTTP library before the request is passed to Vault, and this cannot be + disabled. To continue using these (e.g. for already-written paths), they + must be properly percent-encoded (e.g. `\r` becomes `%0D`, `\x00` becomes + `%00`, and so on). + * The user-configured regions on the AWSKMS seal stanza will now be preferred + over regions set in the enclosing environment. This is a _breaking_ change. + * All values in audit logs now are omitted if they are empty. This helps + reduce the size of audit log entries by not reproducing keys in each entry + that commonly don't contain any value, which can help in cases where audit + log entries are above the maximum UDP packet size and others. + * Both PeriodicFunc and WALRollback functions will be called if both are + provided. Previously WALRollback would only be called if PeriodicFunc was + not set. See [GH-6717](https://github.com/hashicorp/vault/pull/6717) for + details. + * Vault now uses Go's official dependency management system, Go Modules, to + manage dependencies. As a result to both reduce transitive dependencies for + API library users and plugin authors, and to work around various conflicts, + we have moved various helpers around, mostly under an `sdk/` submodule. A + couple of functions have also moved from plugin helper code to the `api/` + submodule. If you are a plugin author, take a look at some of our official + plugins and the paths they are importing for guidance. + +FEATURES: + + * **Combined DB credential rotation**: Alternative mode for the Combined DB + Secret Engine to automatically rotate existing database account credentials + and set Vault as the source of truth for credentials. + * **Identity Tokens**: Vault's Identity system can now generate OIDC-compliant + ID tokens. These customizable tokens allow encapsulating a signed, verifiable + snapshot of identity information and metadata. They can be use by other + applications—even those without Vault authorization—as a way of establishing + identity based on a Vault entity. + * **Pivotal Cloud Foundry plugin**: New auth method using Pivotal Cloud + Foundry certificates for Vault authentication. + * **ElasticSearch database plugin**: New ElasticSearch database plugin issues + unique, short-lived ElasticSearch credentials. + * **New UI Features**: An HTTP Request Volume Page and new UI for editing LDAP + Users and Groups have been added. + * **HA support for Postgres**: PostgreSQL versions >= 9.5 may now but used as + and HA storage backend. + * **KMIP secrets engine (Enterprise)**: Allows Vault to operate as a KMIP Server, + seamlessly brokering cryptographic operations for traditional infrastructure. -IMPROVEMENTS: +IMPROVEMENTS: + * auth/jwt: A JWKS endpoint may now be configured for signature verification [JWT-43] + * auth/jwt: `bound_claims` will now match received claims that are lists if any element + of the list is one of the expected values [JWT-50] + * auth/jwt: Leeways for `nbf` and `exp` are now configurable, as is clock skew + leeway [JWT-53] + * auth/kubernetes: Allow service names/namespaces to be configured as globs + [KUBEAUTH-58] + * auth/token: Add a large set of token configuration options to token store + roles [GH-6662] + * identity: Allow a group alias' canonical ID to be modified + * namespaces: Namespaces can now be created and deleted from performance + replication secondaries + * replication: Client TLS authentication is now supported when enabling or + updating a replication secondary + * secrets/database: Cassandra operations will now cancel on client timeout + [GH-6954] + * storage/postgres: LIST now performs better on large datasets [GH-6546] * ui: KV v1 and v2 will now gracefully degrade allowing a write without read workflow in the UI [GH-6570] + * ui: Many visual improvements with the addition of Toolbars [GH-6626], the restyling + of the Confirm Action component [GH-6741], and using a new set of glyphs for our + Icon component [GH-6736] + * ui: Lazy loading parts of the application so that the total initial payload is + smaller [GH-6718] + * ui: Tabbing to auto-complete in filters will first complete a common prefix if there + is one [GH-6759] + * ui: Removing jQuery from the application makes the initial JS payload smaller [GH-6768] + +BUG FIXES: -BUG FIXES: + * auth/aws: Fix a case where a panic could stem from a malformed assumed-role ARN + when parsing this value [GH-6917] + * auth/aws: Fix an error complaining about a read-only view that could occur + during updating of a role when on a performance replication secondary + [GH-6926] + * auth/jwt: Fix a regression introduced in 1.1.1 that disabled checking of client_id + for OIDC logins [JWT-54] + * auth/jwt: Fix a panic during OIDC CLI logins that could occur if the Vault server + response is empty [JWT-55] + * identity: Fix a case where modifying aliases of an entity could end up + moving the entity into the wrong namespace + * namespaces: Fix a behavior (currently only known to be benign) where we + wouldn't delete policies through the official functions before wiping the + namespaces on deletion + * ui: Fix timestamp on some transit keys [GH-6827] + +## 1.1.3 (June 5th, 2019) + +IMPROVEMENTS: + + * agent: Now supports proxying request query parameters [GH-6772] + * core: Mount table output now includes a UUID indicating the storage path [GH-6633] + * core: HTTP server timeout values are now configurable [GH-6666] + * replication: Improve performance of the reindex operation on secondary clusters + when mount filters are in use + * replication: Replication status API now returns the state and progress of a reindex + +BUG FIXES: + * api: Return the Entity ID in the secret output [GH-6819] + * auth/jwt: Consider bound claims when considering if there is at least one + bound constraint [JWT-49] * auth/okta: Fix handling of group names containing slashes [GH-6665] + * cli: Add deprecated stored-shares flag back to the init command [GH-6677] + * cli: Fix a panic when the KV command would return no data [GH-6675] + * cli: Fix issue causing CLI list operations to not return proper format when + there is an empty response [GH-6776] * core: Correctly honor non-HMAC request keys when auditing requests [GH-6653] - * core: Fix the `x-vault-unauthenticated` value in OpenAPI for a number of endpoints [GH-6654] - * core: Fix issue where some OpenAPI parameters were incorrectly listed as being sent - as a header [GH-6679] + * core: Fix the `x-vault-unauthenticated` value in OpenAPI for a number of + endpoints [GH-6654] + * core: Fix issue where some OpenAPI parameters were incorrectly listed as + being sent as a header [GH-6679] + * core: Fix issue that would allow duplicate mount names to be used [GH-6771] + * namespaces: Fix behavior when using `root` instead of `root/` as the + namespace header value * pki: fix a panic when a client submits a null value [GH-5679] - * replication: Fix an issue causing startup problems if a namespace policy - wasn't replicated properly - * storage/consul: recognize `https://` address even if schema not specified [GH-6602] - * storage/dynamodb: Fix an issue where a deleted lock key in DynamoDB (HA) could cause - constant switching of the active node [GH-6637] - * storage/dynamodb: Eliminate a high-CPU condition that could occur if an error was - received from the DynamoDB API [GH-6640] * replication: Properly update mount entry cache on a secondary to apply all new values after a tune - * ui: fix an issue where sensitive input values weren't being saved to the + * replication: Properly close connection on bootstrap error + * replication: Fix an issue causing startup problems if a namespace policy + wasn't replicated properly + * replication: Fix longer than necessary WAL replay during an initial reindex + * replication: Fix error during mount filter invalidation on DR secondary clusters + * secrets/ad: Make time buffer configurable [AD-35] + * secrets/gcp: Check for nil config when getting credentials [SGCP-35] + * secrets/gcp: Fix error checking in some cases where the returned value could + be 403 instead of 404 [SGCP-37] + * secrets/gcpkms: Disable key rotation when deleting a key [GCPKMS-10] + * storage/consul: recognize `https://` address even if schema not specified + [GH-6602] + * storage/dynamodb: Fix an issue where a deleted lock key in DynamoDB (HA) + could cause constant switching of the active node [GH-6637] + * storage/dynamodb: Eliminate a high-CPU condition that could occur if an + error was received from the DynamoDB API [GH-6640] + * storage/gcs: Correctly use configured chunk size values [GH-6655] + * storage/mssql: Use the correct database when pre-created schemas exist + [GH-6356] + * ui: Fix issue with select arrows on drop down menus [GH-6627] + * ui: Fix an issue where sensitive input values weren't being saved to the server [GH-6586] + * ui: Fix web cli parsing when using quoted values [GH-6755] + * ui: Fix a namespace workflow mapping identities from external namespaces by + allowing arbitrary input in search-select component [GH-6728] ## 1.1.2 (April 18th, 2019) @@ -67,7 +234,7 @@ SECURITY: CHANGES: * auth/jwt: Disallow logins of role_type "oidc" via the `/login` path [JWT-38] - * core/acl: New ordering defines which policy wins when there are multiple + * core/acl: New ordering defines which policy wins when there are multiple inexact matches and at least one path contains `+`. `+*` is now illegal in policy paths. The previous behavior simply selected any matching segment-wildcard path that matched. [GH-6532] @@ -75,21 +242,21 @@ CHANGES: previously possible from a performance secondary. These have been resolved, and these operations may now be run from a performance secondary. -IMPROVEMENTS: +IMPROVEMENTS: * agent: Allow AppRole auto-auth without a secret-id [GH-6324] * auth/gcp: Cache clients to improve performance and reduce open file usage * auth/jwt: Bounds claims validiation will now allow matching the received - claims against a list of expected values [JWT-41] + claims against a list of expected values [JWT-41] * secret/gcp: Cache clients to improve performance and reduce open file usage * replication: Mounting/unmounting/remounting/mount-tuning is now supported from a performance secondary cluster * ui: Suport for authentication via the RADIUS auth method [GH-6488] * ui: Navigating away from secret list view will clear any page-specific filter that was applied [GH-6511] - * ui: Improved the display when OIDC auth errors [GH-6553] + * ui: Improved the display when OIDC auth errors [GH-6553] -BUG FIXES: +BUG FIXES: * agent: Allow auto-auth to be used with caching without having to define any sinks [GH-6468] @@ -128,7 +295,7 @@ BUG FIXES: * ui: add polyfill to load UI in IE11 [GH-6567] * ui: Fix issue where some elements would fail to work properly if using ACLs with segment-wildcard paths (`/+/` segments) [GH-6525] - + ## 1.1.0 (March 18th, 2019) CHANGES: @@ -181,9 +348,9 @@ IMPROVEMENTS: * core/metrics: Prometheus pull support using a new sys/metrics endpoint. [GH-5308] * core: On non-windows platforms a SIGUSR2 will make the server log a dump of all running goroutines' stack traces for debugging purposes [GH-6240] - * replication: The inital replication indexing process on newly initialized or upgraded + * replication: The initial replication indexing process on newly initialized or upgraded clusters now runs asynchronously - * sentinel: Add token namespace id and path, available in rules as + * sentinel: Add token namespace id and path, available in rules as token.namespace.id and token.namespace.path * ui: The UI is now leveraging OpenAPI definitions to pull in fields for various forms. This means, it will not be necessary to add fields on the go and JS sides in the future. @@ -231,7 +398,7 @@ SECURITY: be read. Upgrading to this version or 1.1 will fix this issue and cause the replicated data to be deleted from filtered secondaries. More information was sent to customer contacts on file. - + ## 1.0.3 (February 12th, 2019) CHANGES: @@ -244,10 +411,10 @@ CHANGES: entity either by name or by id [GH-6105] * The Vault UI's navigation and onboarding wizard now only displays items that are permitted in a users' policy [GH-5980, GH-6094] - * An issue was fixed that caused recovery keys to not work on secondary - clusters when using a different unseal mechanism/key than the primary. This + * An issue was fixed that caused recovery keys to not work on secondary + clusters when using a different unseal mechanism/key than the primary. This would be hit if the cluster was rekeyed or initialized after 1.0. We recommend - rekeying the recovery keys on the primary cluster if you meet the above + rekeying the recovery keys on the primary cluster if you meet the above requirements. FEATURES: @@ -287,7 +454,7 @@ BUG FIXES: a performance standby very quickly, before an associated entity has been replicated. If the entity is not found in this scenario, the request will forward to the active node. - * replication: Fix issue where recovery keys would not work on secondary + * replication: Fix issue where recovery keys would not work on secondary clusters if using a different unseal mechanism than the primary. * replication: Fix a "failed to register lease" error when using performance standbys @@ -328,9 +495,9 @@ IMPROVEMENTS: * auth/aws: AWS EC2 authentication can optionally create entity aliases by image ID [GH-5846] - * autoseal/gcpckms: Reduce the required permissions for the GCPCKMS autounseal + * autoseal/gcpckms: Reduce the required permissions for the GCPCKMS autounseal [GH-5999] - * physical/foundationdb: TLS support added. [GH-5800] + * physical/foundationdb: TLS support added. [GH-5800] BUG FIXES: @@ -354,7 +521,7 @@ BUG FIXES: * ui (enterprise): properly display perf-standby count on the license page [GH-5971] * ui: fix disappearing nested secrets and go to the nearest parent when deleting a secret - [GH-5976] - * ui: fix error where deleting an item via the context menu would fail if the + * ui: fix error where deleting an item via the context menu would fail if the item name contained dots [GH-6018] * ui: allow saving of kv secret after an errored save attempt [GH-6022] * ui: fix display of kv-v1 secret containing a key named "keys" [GH-6023] @@ -457,7 +624,7 @@ CHANGES: undocumented, but were retained for backwards compatibility. They shouldn't be used due to the possibility of those paths being logged, so at this point they are simply being removed. - * Vault will no longer accept updates when the storage key has invalid UTF-8 + * Vault will no longer accept updates when the storage key has invalid UTF-8 character encoding [GH-5819] * Mount/Auth tuning the `options` map on backends will now upsert any provided values, and keep any of the existing values in place if not provided. The @@ -523,7 +690,7 @@ IMPROVEMENTS: * ui: Improved banner and popup design [GH-5672] * ui: Added token type to auth method mount config [GH-5723] * ui: Display additonal wrap info when unwrapping. [GH-5664] - * ui: Empty states have updated styling and link to relevant actions and + * ui: Empty states have updated styling and link to relevant actions and documentation [GH-5758] * ui: Allow editing of KV V2 data when a token doesn't have capabilities to read secret metadata [GH-5879] @@ -543,7 +710,7 @@ BUG FIXES: [[GH-16]](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/16) * storage/gcs: Send md5 of values to GCS to avoid potential corruption [GH-5804] - * secrets/kv: Fix issue where storage version would get incorrectly downgraded + * secrets/kv: Fix issue where storage version would get incorrectly downgraded [GH-5809] * secrets/kv: Disallow empty paths on a `kv put` while accepting empty paths for all other operations for backwards compatibility @@ -575,7 +742,7 @@ BUG FIXES: * ui: Fix bug where editing secrets as JSON doesn't save properly [GH-5660] * ui: Fix issue where IE 11 didn't render the UI and also had a broken form when trying to use tool/hash [GH-5714] - + ## 0.11.4 (October 23rd, 2018) CHANGES: @@ -588,7 +755,7 @@ FEATURES: * **Transit Key Trimming**: Keys in transit secret engine can now be trimmed to remove older unused key versions - * **Web UI support for KV Version 2**: Browse, delete, undelete and destroy + * **Web UI support for KV Version 2**: Browse, delete, undelete and destroy individual secret versions in the UI * **Azure Existing Service Principal Support**: Credentials can now be generated against an existing service principal @@ -642,7 +809,7 @@ IMPROVEMENTS: BUG FIXES: - * auth/ldap: Fix panic if specific values were given to be escaped [GH-5471] + * auth/ldap: Fix panic if specific values were given to be escaped [GH-5471] * cli/auth: Fix panic if `vault auth` was given no parameters [GH-5473] * secret/database/mongodb: Fix panic that could occur at high load [GH-5463] * secret/pki: Fix CA generation not allowing OID SANs [GH-5459] @@ -667,7 +834,7 @@ FEATURES: credentials it is using [GH-5140] * **Storage Backend Migrator**: A new `operator migrate` command allows offline migration of data between two storage backends - * **AliCloud KMS Auto Unseal and Seal Wrap Support (Enterprise)**: AliCloud KMS can now be used a support seal for + * **AliCloud KMS Auto Unseal and Seal Wrap Support (Enterprise)**: AliCloud KMS can now be used a support seal for Auto Unseal and Seal Wrapping BUG FIXES: @@ -680,16 +847,16 @@ BUG FIXES: * replication: Fix DR API when using a token [GH-5398] * identity: Ensure old group alias is removed when a new one is written [GH-5350] * storage/alicloud: Don't call uname on package init [GH-5358] - * secrets/jwt: Fix issue where request context would be canceled too early + * secrets/jwt: Fix issue where request context would be canceled too early * ui: fix need to have update for aws iam creds generation [GF-5294] * ui: fix calculation of token expiry [GH-5435] - + IMPROVEMENTS: * auth/aws: The identity alias name can now configured to be either IAM unique ID of the IAM Principal, or ARN of the caller identity [GH-5247] * auth/cert: Add allowed_organizational_units support [GH-5252] - * cli: Format TTLs for non-secret responses [GH-5367] + * cli: Format TTLs for non-secret responses [GH-5367] * identity: Support operating on entities and groups by their names [GH-5355] * plugins: Add `env` parameter when registering plugins to the catalog to allow operators to include environment variables during plugin execution. [GH-5359] @@ -697,13 +864,13 @@ IMPROVEMENTS: * secrets/aws: Allow specifying STS role-default TTLs [GH-5138] * secrets/pki: Add configuration support for setting NotBefore [GH-5325] * core: Support for passing the Vault token via an Authorization Bearer header [GH-5397] - * replication: Reindex process now runs in the background and does not block other + * replication: Reindex process now runs in the background and does not block other vault operations * storage/zookeeper: Enable TLS based communication with Zookeeper [GH-4856] * ui: you can now init a cluster with a seal config [GH-5428] * ui: added the option to force promote replication clusters [GH-5438] * replication: Allow promotion of a secondary when data is syncing with a "force" flag - + ## 0.11.1.1 (September 17th, 2018) (Enterprise Only) BUG FIXES: @@ -762,11 +929,11 @@ BUG FIXES: * secrets/pki: Fix sign-verbatim losing extra Subject attributes [GH-5245] * secrets/pki: Remove certificates from store when tidying revoked certificates and simplify API [GH-5231] - * ui: JSON editor will not coerce input to an object, and will now show an + * ui: JSON editor will not coerce input to an object, and will now show an error about Vault expecting an object [GH-5271] * ui: authentication form will now default to any methods that have been tuned to show up for unauthenticated users [GH-5281] - + ## 0.11.0 (August 28th, 2018) @@ -817,7 +984,7 @@ FEATURES: single Vault Enterprise infrastructure. Through namespaces, Vault administrators can support tenant isolation for teams and individuals as well as empower those individuals to self-manage their own tenant - environment. + environment. * **Performance Standbys (Enterprise)**: Standby nodes can now service requests that do not modify storage. This provides near-horizontal scaling of a cluster in some workloads, and is the intra-cluster analogue of @@ -828,14 +995,14 @@ FEATURES: grant access to Vault. See the [plugin repository](https://github.com/hashicorp/vault-plugin-auth-alicloud) for more information. - * **Azure Secrets Plugin**: There is now a plugin (pulled in to Vault) that + * **Azure Secrets Plugin**: There is now a plugin (pulled in to Vault) that allows generating credentials to allow access to Azure. See the [plugin repository](https://github.com/hashicorp/vault-plugin-secrets-azure) for more information. * **HA Support for MySQL Storage**: MySQL storage now supports HA. * **ACL Templating**: ACL policies can now be templated using identity Entity, Groups, and Metadata. - * **UI Onboarding wizards**: The Vault UI can provide contextual help and + * **UI Onboarding wizards**: The Vault UI can provide contextual help and guidance, linking out to relevant links or guides on vaultproject.io for various workflows in Vault. @@ -907,7 +1074,7 @@ FEATURES: * **FoundationDB Storage**: You can now use FoundationDB for storing Vault data. * **UI Control Group Workflow (enterprise)**: The UI will now detect control - group responses and provides a workflow to view the status of the request + group responses and provides a workflow to view the status of the request and to authorize requests. * **Vault Agent (Beta)**: Vault Agent is a daemon that can automatically authenticate for you across a variety of authentication methods, provide @@ -936,7 +1103,7 @@ IMPROVEMENTS: * secrets/ssh: Allow Vault to work with single-argument SSH flags [GH-4825] * secrets/ssh: SSH executable path can now be configured in the CLI [GH-4937] * storage/swift: Add additional configuration options [GH-4901] - * ui: Choose which auth methods to show to unauthenticated users via + * ui: Choose which auth methods to show to unauthenticated users via `listing_visibility` in the auth method edit forms [GH-4854] * ui: Authenticate users automatically by passing a wrapped token to the UI via the new `wrapped_token` query parameter [GH-4854] @@ -954,22 +1121,22 @@ BUG FIXES: * core: Fix issue releasing the leader lock in some circumstances [GH-4915] * core: Fix a panic that could happen if the server was shut down while still starting up - * core: Fix deadlock that would occur if a leadership loss occurs at the same + * core: Fix deadlock that would occur if a leadership loss occurs at the same time as a seal operation [GH-4932] - * core: Fix issue with auth mounts failing to renew tokens due to policies + * core: Fix issue with auth mounts failing to renew tokens due to policies changing [GH-4960] * auth/radius: Fix issue where some radius logins were being canceled too early [GH-4941] - * core: Fix accidental seal of vault of we lose leadership during startup + * core: Fix accidental seal of vault of we lose leadership during startup [GH-4924] - * core: Fix standby not being able to forward requests larger than 4MB + * core: Fix standby not being able to forward requests larger than 4MB [GH-4844] * core: Avoid panic while processing group memberships [GH-4841] * identity: Fix a race condition creating aliases [GH-4965] * plugins: Fix being unable to send very large payloads to or from plugins [GH-4958] * physical/azure: Long list responses would sometimes be truncated [GH-4983] - * replication: Allow replication status requests to be processed while in + * replication: Allow replication status requests to be processed while in merkle sync * replication: Ensure merkle reindex flushes all changes to storage immediately * replication: Fix a case where a network interruption could cause a secondary @@ -979,7 +1146,7 @@ BUG FIXES: * secrets/database: Fix panic during DB creds revocation [GH-4846] * ui: Fix usage of cubbyhole backend in the UI [GH-4851] * ui: Fix toggle state when a secret is JSON-formatted [GH-4913] - * ui: Fix coercion of falsey values to empty string when editing secrets as + * ui: Fix coercion of falsey values to empty string when editing secrets as JSON [GH-4977] ## 0.10.3 (June 20th, 2018) @@ -1120,7 +1287,7 @@ IMPROVEMENTS: * auth/ldap: Obfuscate error messages pre-bind for greater security [GH-4700] * cli: `vault login` now supports a `-no-print` flag to suppress printing token information but still allow storing into the token helper [GH-4454] - * core/pkcs11 (enterprise): Add support for CKM_AES_CBC_PAD, CKM_RSA_PKCS, and + * core/pkcs11 (enterprise): Add support for CKM_AES_CBC_PAD, CKM_RSA_PKCS, and CKM_RSA_PKCS_OAEP mechanisms * core/pkcs11 (enterprise): HSM slots can now be selected by token label instead of just slot number @@ -1148,7 +1315,7 @@ IMPROVEMENTS: * ui: Identity interface now lists groups by name [GH-4655] * ui: Permission denied errors still render the sidebar in the Access section [GH-4658] - * replication: Improve performance of index page flushes and WAL garbage + * replication: Improve performance of index page flushes and WAL garbage collecting BUG FIXES: @@ -1259,7 +1426,7 @@ IMPROVEMENTS: the rate of writes committed * secret/ssh: Update dynamic key install script to use shell locking to avoid concurrent modifications [GH-4358] - * ui: Access to `sys/mounts` is no longer needed to use the UI - the list of + * ui: Access to `sys/mounts` is no longer needed to use the UI - the list of engines will show you the ones you implicitly have access to (because you have access to to secrets in those engines) [GH-4439] @@ -1284,16 +1451,16 @@ BUG FIXES: interface properly [GH-4398] * ui: Corrected the saving of mount tune ttls for auth methods [GH-4431] * ui: Credentials generation no longer checks capabilities before making - api calls. This should fix needing "update" capabilites to read IAM + api calls. This should fix needing "update" capabilites to read IAM credentials in the AWS secrets engine [GH-4446] ## 0.10.0 (April 10th, 2018) SECURITY: - * Log sanitization for Combined Database Secret Engine: In certain failure - scenarios with incorrectly formatted connection urls, the raw connection - errors were being returned to the user with the configured database + * Log sanitization for Combined Database Secret Engine: In certain failure + scenarios with incorrectly formatted connection urls, the raw connection + errors were being returned to the user with the configured database credentials. Errors are now sanitized before being returned to the user. DEPRECATIONS/CHANGES: @@ -1368,7 +1535,7 @@ FEATURES: * HA for Google Cloud Storage: The GCS storage type now supports HA. * UI support for identity: Add and edit entities, groups, and their associated aliases. - * UI auth method support: Enable, disable, and configure all of the built-in + * UI auth method support: Enable, disable, and configure all of the built-in authentication methods. * UI (Enterprise): View and edit Sentinel policies. @@ -1401,17 +1568,17 @@ BUG FIXES: * secret/pki: When tidying if a value is unexpectedly nil, delete it and move on [GH-4214] * storage/s3: Fix panic if S3 returns no Content-Length header [GH-4222] - * ui: Fixed an issue where the UI was checking incorrect paths when operating - on transit keys. Capabilities are now checked when attempting to encrypt / + * ui: Fixed an issue where the UI was checking incorrect paths when operating + on transit keys. Capabilities are now checked when attempting to encrypt / decrypt, etc. * ui: Fixed IE 11 layout issues and JS errors that would stop the application from running. - * ui: Fixed the link that gets rendered when a user doesn't have permissions + * ui: Fixed the link that gets rendered when a user doesn't have permissions to view the root of a secret engine. The link now sends them back to the list of secret engines. - * replication: Fix issue with DR secondaries when using mount specified local + * replication: Fix issue with DR secondaries when using mount specified local paths. - * cli: Fix an issue where generating a dr operation token would not output the + * cli: Fix an issue where generating a dr operation token would not output the token [GH-4328] ## 0.9.6 (March 20th, 2018) diff --git a/Makefile b/Makefile index d288f549fff5..f605c8191f40 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ EXTERNAL_TOOLS=\ GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor) GO_VERSION_MIN=1.11 -CGO_ENABLED=0 +CGO_ENABLED?=0 ifneq ($(FDB_ENABLED), ) CGO_ENABLED=1 BUILD_TAGS+=foundationdb @@ -102,8 +102,17 @@ vet: prep: fmtcheck @sh -c "'$(CURDIR)/scripts/goversioncheck.sh' '$(GO_VERSION_MIN)'" @go generate $(go list ./... | grep -v /vendor/) + @# Remove old (now broken) husky git hooks. + @[ ! -d .git/hooks ] || grep -l '^# husky$$' .git/hooks/* | xargs rm -f @if [ -d .git/hooks ]; then cp .hooks/* .git/hooks/; fi +.PHONY: ci-config +ci-config: + @$(MAKE) -C .circleci ci-config +.PHONY: ci-verify +ci-verify: + @$(MAKE) -C .circleci ci-verify + # bootstrap the build by downloading additional tools bootstrap: @for tool in $(EXTERNAL_TOOLS) ; do \ @@ -128,7 +137,20 @@ test-ember: @echo "--> Running ember tests" @cd ui && yarn run test-oss -ember-ci-test: +ember-ci-test: # Deprecated, to be removed soon. + @echo "ember-ci-test is deprecated in favour of test-ui-browserstack" + @exit 1 + +check-vault-in-path: + @VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \ + [ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \ + printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)" + +check-browserstack-creds: + @[ -n "$$BROWSERSTACK_ACCESS_KEY" ] || { echo "BROWSERSTACK_ACCESS_KEY not set"; exit 1; } + @[ -n "$$BROWSERSTACK_USERNAME" ] || { echo "BROWSERSTACK_USERNAME not set"; exit 1; } + +test-ui-browserstack: check-vault-in-path check-browserstack-creds @echo "--> Installing JavaScript assets" @cd ui && yarn --ignore-optional @echo "--> Running ember tests in Browserstack" @@ -158,11 +180,13 @@ proto: protoc helper/forwarding/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/logical/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/physical/types.proto --go_out=plugins=grpc,paths=source_relative:. + protoc physical/raft/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/identity/mfa/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/identity/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/database/dbplugin/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/plugin/pb/*.proto --go_out=plugins=grpc,paths=source_relative:. - sed -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/protobuf:"/sentinel:"" protobuf:"/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go + sed -i -e 's/Id/ID/' vault/request_forwarding_service.pb.go + sed -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/protobuf:"/sentinel:"" protobuf:"/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go sed -i -e 's/Iv/IV/' -e 's/Hmac/HMAC/' sdk/physical/types.pb.go fmtcheck: @@ -204,6 +228,6 @@ hana-database-plugin: mongodb-database-plugin: @CGO_ENABLED=0 go build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin -.PHONY: bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin static-assets ember-dist ember-dist-dev static-dist static-dist-dev assetcheck +.PHONY: bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin static-assets ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path check-browserstack-creds test-ui-browserstack .NOTPARALLEL: ember-dist ember-dist-dev static-assets diff --git a/README.md b/README.md index 444678722d5c..ecddc43505a7 100644 --- a/README.md +++ b/README.md @@ -63,9 +63,11 @@ first need [Go](https://www.golang.org) installed on your machine (version 1.12.1+ is *required*). For local dev first make sure Go is properly installed, including setting up a -[GOPATH](https://golang.org/doc/code.html#GOPATH). Next, clone this repository -into `$GOPATH/src/github.com/hashicorp/vault`. You can then download any -required build tools by bootstrapping your environment: +[GOPATH](https://golang.org/doc/code.html#GOPATH). Ensure that `$GOPATH/bin` is in +your path as some distributions bundle old version of build tools. Next, clone this +repository. Vault uses [Go Modules](https://github.com/golang/go/wiki/Modules), +so it is recommended that you clone the repository ***outside*** of the GOPATH. +You can then download any required build tools by bootstrapping your environment: ```sh $ make bootstrap diff --git a/api/auth_token.go b/api/auth_token.go index ed594eee8528..6807c89c3878 100644 --- a/api/auth_token.go +++ b/api/auth_token.go @@ -272,4 +272,5 @@ type TokenCreateRequest struct { NumUses int `json:"num_uses"` Renewable *bool `json:"renewable,omitempty"` Type string `json:"type"` + EntityAlias string `json:"entity_alias"` } diff --git a/api/go.mod b/api/go.mod index 6f8d42cb2ea6..17ebcaf907bd 100644 --- a/api/go.mod +++ b/api/go.mod @@ -8,12 +8,12 @@ require ( github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-multierror v1.0.0 - github.com/hashicorp/go-retryablehttp v0.5.3 - github.com/hashicorp/go-rootcerts v1.0.0 + github.com/hashicorp/go-retryablehttp v0.5.4 + github.com/hashicorp/go-rootcerts v1.0.1 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/vault/sdk v0.1.8 + github.com/hashicorp/vault/sdk v0.1.12-0.20190703041151-fef78ae6c93d github.com/mitchellh/mapstructure v1.1.2 - golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 gopkg.in/square/go-jose.v2 v2.3.1 ) diff --git a/api/go.sum b/api/go.sum index 24e7dfde4e0d..4e55eb8c12b3 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,6 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= @@ -9,16 +8,12 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -28,52 +23,39 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.0.0 h1:/gQ1sNR8/LHpoxKRQq4PmLBuacfZb4tC93e9B30o/7c= -github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -95,8 +77,9 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -106,27 +89,21 @@ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= diff --git a/api/help.go b/api/help.go index 472ca0395ead..321bd597c1a2 100644 --- a/api/help.go +++ b/api/help.go @@ -24,6 +24,7 @@ func (c *Client) Help(path string) (*Help, error) { } type Help struct { - Help string `json:"help"` - SeeAlso []string `json:"see_also"` + Help string `json:"help"` + SeeAlso []string `json:"see_also"` + OpenAPI map[string]interface{} `json:"openapi"` } diff --git a/api/response.go b/api/response.go index f224972d5878..aed2a52e0869 100644 --- a/api/response.go +++ b/api/response.go @@ -42,32 +42,27 @@ func (r *Response) Error() error { r.Body.Close() r.Body = ioutil.NopCloser(bodyBuf) + // Build up the error object + respErr := &ResponseError{ + HTTPMethod: r.Request.Method, + URL: r.Request.URL.String(), + StatusCode: r.StatusCode, + } + // Decode the error response if we can. Note that we wrap the bodyBuf // in a bytes.Reader here so that the JSON decoder doesn't move the // read pointer for the original buffer. var resp ErrorResponse if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil { - // Ignore the decoding error and just drop the raw response - return fmt.Errorf( - "Error making API request.\n\n"+ - "URL: %s %s\n"+ - "Code: %d. Raw Message:\n\n%s", - r.Request.Method, r.Request.URL.String(), - r.StatusCode, bodyBuf.String()) + // Store the fact that we couldn't decode the errors + respErr.RawError = true + respErr.Errors = []string{bodyBuf.String()} + } else { + // Store the decoded errors + respErr.Errors = resp.Errors } - var errBody bytes.Buffer - errBody.WriteString(fmt.Sprintf( - "Error making API request.\n\n"+ - "URL: %s %s\n"+ - "Code: %d. Errors:\n\n", - r.Request.Method, r.Request.URL.String(), - r.StatusCode)) - for _, err := range resp.Errors { - errBody.WriteString(fmt.Sprintf("* %s", err)) - } - - return fmt.Errorf(errBody.String()) + return respErr } // ErrorResponse is the raw structure of errors when they're returned by the @@ -75,3 +70,51 @@ func (r *Response) Error() error { type ErrorResponse struct { Errors []string } + +// ResponseError is the error returned when Vault responds with an error or +// non-success HTTP status code. If a request to Vault fails because of a +// network error a different error message will be returned. ResponseError gives +// access to the underlying errors and status code. +type ResponseError struct { + // HTTPMethod is the HTTP method for the request (PUT, GET, etc). + HTTPMethod string + + // URL is the URL of the request. + URL string + + // StatusCode is the HTTP status code. + StatusCode int + + // RawError marks that the underlying error messages returned by Vault were + // not parsable. The Errors slice will contain the raw response body as the + // first and only error string if this value is set to true. + RawError bool + + // Errors are the underlying errors returned by Vault. + Errors []string +} + +// Error returns a human-readable error string for the response error. +func (r *ResponseError) Error() string { + errString := "Errors" + if r.RawError { + errString = "Raw Message" + } + + var errBody bytes.Buffer + errBody.WriteString(fmt.Sprintf( + "Error making API request.\n\n"+ + "URL: %s %s\n"+ + "Code: %d. %s:\n\n", + r.HTTPMethod, r.URL, r.StatusCode, errString)) + + if r.RawError && len(r.Errors) == 1 { + errBody.WriteString(r.Errors[0]) + } else { + for _, err := range r.Errors { + errBody.WriteString(fmt.Sprintf("* %s", err)) + } + } + + return errBody.String() +} diff --git a/api/secret.go b/api/secret.go index aaca78c8e348..d5b9ce9729eb 100644 --- a/api/secret.go +++ b/api/secret.go @@ -293,6 +293,7 @@ type SecretAuth struct { IdentityPolicies []string `json:"identity_policies"` Metadata map[string]string `json:"metadata"` Orphan bool `json:"orphan"` + EntityID string `json:"entity_id"` LeaseDuration int `json:"lease_duration"` Renewable bool `json:"renewable"` diff --git a/api/sys_raft.go b/api/sys_raft.go new file mode 100644 index 000000000000..6897dc0a7eb2 --- /dev/null +++ b/api/sys_raft.go @@ -0,0 +1,130 @@ +package api + +import ( + "context" + "io" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/consts" +) + +// RaftJoinResponse represents the response of the raft join API +type RaftJoinResponse struct { + Joined bool `json:"joined"` +} + +// RaftJoinRequest represents the parameters consumed by the raft join API +type RaftJoinRequest struct { + LeaderAPIAddr string `json:"leader_api_addr"` + LeaderCACert string `json:"leader_ca_cert":` + LeaderClientCert string `json:"leader_client_cert"` + LeaderClientKey string `json:"leader_client_key"` + Retry bool `json:"retry"` +} + +// RaftJoin adds the node from which this call is invoked from to the raft +// cluster represented by the leader address in the parameter. +func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { + r := c.c.NewRequest("POST", "/v1/sys/storage/raft/join") + + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RaftJoinResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +// RaftSnapshot invokes the API that takes the snapshot of the raft cluster and +// writes it to the supplied io.Writer. +func (c *Sys) RaftSnapshot(snapWriter io.Writer) error { + r := c.c.NewRequest("GET", "/v1/sys/storage/raft/snapshot") + r.URL.RawQuery = r.Params.Encode() + + req, err := http.NewRequest(http.MethodGet, r.URL.RequestURI(), nil) + if err != nil { + return err + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.URL.Host + + if r.Headers != nil { + for header, vals := range r.Headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + + if len(r.ClientToken) != 0 { + req.Header.Set(consts.AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + // Avoiding the use of RawRequestWithContext which reads the response body + // to determine if the body contains error message. + var result *Response + resp, err := c.c.config.HttpClient.Do(req) + if resp == nil { + return nil + } + + result = &Response{Response: resp} + if err := result.Error(); err != nil { + return err + } + + _, err = io.Copy(snapWriter, resp.Body) + if err != nil { + return err + } + + return nil +} + +// RaftSnapshotRestore reads the snapshot from the io.Reader and installs that +// snapshot, returning the cluster to the state defined by it. +func (c *Sys) RaftSnapshotRestore(snapReader io.Reader, force bool) error { + path := "/v1/sys/storage/raft/snapshot" + if force { + path = "/v1/sys/storage/raft/snapshot-force" + } + r := c.c.NewRequest("POST", path) + + r.Body = snapReader + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} diff --git a/api/sys_rekey.go b/api/sys_rekey.go index 55f1a703d41c..153e486c6d60 100644 --- a/api/sys_rekey.go +++ b/api/sys_rekey.go @@ -234,7 +234,7 @@ func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { } func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-backup") + r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-key-backup") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() @@ -275,7 +275,7 @@ func (c *Sys) RekeyDeleteBackup() error { } func (c *Sys) RekeyDeleteRecoveryBackup() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-backup") + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-key-backup") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() diff --git a/audit/audit.go b/audit/audit.go index beaa90918584..6f1e7208e823 100644 --- a/audit/audit.go +++ b/audit/audit.go @@ -16,13 +16,13 @@ type Backend interface { // request is authorized but before the request is executed. The arguments // MUST not be modified in anyway. They should be deep copied if this is // a possibility. - LogRequest(context.Context, *LogInput) error + LogRequest(context.Context, *logical.LogInput) error // LogResponse is used to synchronously log a response. This is done after // the request is processed but before the response is sent. The arguments // MUST not be modified in anyway. They should be deep copied if this is // a possibility. - LogResponse(context.Context, *LogInput) error + LogResponse(context.Context, *logical.LogInput) error // GetHash is used to return the given data with the backend's hash, // so that a caller can determine if a value in the audit log matches @@ -36,16 +36,6 @@ type Backend interface { Invalidate(context.Context) } -// LogInput contains the input parameters passed into LogRequest and LogResponse -type LogInput struct { - Auth *logical.Auth - Request *logical.Request - Response *logical.Response - OuterErr error - NonHMACReqDataKeys []string - NonHMACRespDataKeys []string -} - // BackendConfig contains configuration parameters used in the factory func to // instantiate audit backends type BackendConfig struct { diff --git a/audit/format.go b/audit/format.go index 7cbf771a70a9..329e7ba7125a 100644 --- a/audit/format.go +++ b/audit/format.go @@ -2,6 +2,7 @@ package audit import ( "context" + "crypto/tls" "fmt" "io" "strings" @@ -13,12 +14,14 @@ import ( "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/copystructure" ) type AuditFormatWriter interface { + // WriteRequest writes the request entry to the writer or returns an error. WriteRequest(io.Writer, *AuditRequestEntry) error + // WriteResponse writes the response entry to the writer or returns an error. WriteResponse(io.Writer, *AuditResponseEntry) error + // Salt returns a non-nil salt or an error. Salt(context.Context) (*salt.Salt, error) } @@ -30,7 +33,7 @@ type AuditFormatter struct { var _ Formatter = (*AuditFormatter)(nil) -func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config FormatterConfig, in *LogInput) error { +func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { if in == nil || in.Request == nil { return fmt.Errorf("request to request-audit a nil request") } @@ -51,65 +54,27 @@ func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config // Set these to the input values at first auth := in.Auth req := in.Request + var connState *tls.ConnectionState + if auth == nil { + auth = new(logical.Auth) + } - if !config.Raw { - // Before we copy the structure we must nil out some data - // otherwise we will cause reflection to panic and die - if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { - origState := in.Request.Connection.ConnState - in.Request.Connection.ConnState = nil - defer func() { - in.Request.Connection.ConnState = origState - }() - } - - // Copy the auth structure - if in.Auth != nil { - cp, err := copystructure.Copy(in.Auth) - if err != nil { - return err - } - auth = cp.(*logical.Auth) - } + if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { + connState = in.Request.Connection.ConnState + } - cp, err := copystructure.Copy(in.Request) + if !config.Raw { + auth, err = HashAuth(salt, auth, config.HMACAccessor) if err != nil { return err } - req = cp.(*logical.Request) - - // Hash any sensitive information - if auth != nil { - // Cache and restore accessor in the auth - var authAccessor string - if !config.HMACAccessor && auth.Accessor != "" { - authAccessor = auth.Accessor - } - if err := Hash(salt, auth, nil); err != nil { - return err - } - if authAccessor != "" { - auth.Accessor = authAccessor - } - } - // Cache and restore accessor in the request - var clientTokenAccessor string - if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" { - clientTokenAccessor = req.ClientTokenAccessor - } - if err := Hash(salt, req, in.NonHMACReqDataKeys); err != nil { + req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) + if err != nil { return err } - if clientTokenAccessor != "" { - req.ClientTokenAccessor = clientTokenAccessor - } } - // If auth is nil, make an empty one - if auth == nil { - auth = new(logical.Auth) - } var errString string if in.OuterErr != nil { errString = in.OuterErr.Error() @@ -120,11 +85,15 @@ func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config return err } + reqType := in.Type + if reqType == "" { + reqType = "request" + } reqEntry := &AuditRequestEntry{ - Type: "request", + Type: reqType, Error: errString, - Auth: AuditAuth{ + Auth: &AuditAuth{ ClientToken: auth.ClientToken, Accessor: auth.Accessor, DisplayName: auth.DisplayName, @@ -132,27 +101,29 @@ func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config TokenPolicies: auth.TokenPolicies, IdentityPolicies: auth.IdentityPolicies, ExternalNamespacePolicies: auth.ExternalNamespacePolicies, + NoDefaultPolicy: auth.NoDefaultPolicy, Metadata: auth.Metadata, EntityID: auth.EntityID, RemainingUses: req.ClientTokenRemainingUses, TokenType: auth.TokenType.String(), }, - Request: AuditRequest{ + Request: &AuditRequest{ ID: req.ID, ClientToken: req.ClientToken, ClientTokenAccessor: req.ClientTokenAccessor, Operation: req.Operation, - Namespace: AuditNamespace{ + Namespace: &AuditNamespace{ ID: ns.ID, Path: ns.Path, }, - Path: req.Path, - Data: req.Data, - PolicyOverride: req.PolicyOverride, - RemoteAddr: getRemoteAddr(req), - ReplicationCluster: req.ReplicationCluster, - Headers: req.Headers, + Path: req.Path, + Data: req.Data, + PolicyOverride: req.PolicyOverride, + RemoteAddr: getRemoteAddr(req), + ReplicationCluster: req.ReplicationCluster, + Headers: req.Headers, + ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), }, } @@ -167,7 +138,7 @@ func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config return f.AuditFormatWriter.WriteRequest(w, reqEntry) } -func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config FormatterConfig, in *LogInput) error { +func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { if in == nil || in.Request == nil { return fmt.Errorf("request to response-audit a nil request") } @@ -186,104 +157,36 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config } // Set these to the input values at first - auth := in.Auth - req := in.Request - resp := in.Response - - if !config.Raw { - // Before we copy the structure we must nil out some data - // otherwise we will cause reflection to panic and die - if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { - origState := in.Request.Connection.ConnState - in.Request.Connection.ConnState = nil - defer func() { - in.Request.Connection.ConnState = origState - }() - } + auth, req, resp := in.Auth, in.Request, in.Response + if auth == nil { + auth = new(logical.Auth) + } + if resp == nil { + resp = new(logical.Response) + } + var connState *tls.ConnectionState - // Copy the auth structure - if in.Auth != nil { - cp, err := copystructure.Copy(in.Auth) - if err != nil { - return err - } - auth = cp.(*logical.Auth) - } + if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { + connState = in.Request.Connection.ConnState + } - cp, err := copystructure.Copy(in.Request) + if !config.Raw { + auth, err = HashAuth(salt, auth, config.HMACAccessor) if err != nil { return err } - req = cp.(*logical.Request) - - if in.Response != nil { - cp, err := copystructure.Copy(in.Response) - if err != nil { - return err - } - resp = cp.(*logical.Response) - } - // Hash any sensitive information - - // Cache and restore accessor in the auth - if auth != nil { - var accessor string - if !config.HMACAccessor && auth.Accessor != "" { - accessor = auth.Accessor - } - if err := Hash(salt, auth, nil); err != nil { - return err - } - if accessor != "" { - auth.Accessor = accessor - } - } - - // Cache and restore accessor in the request - var clientTokenAccessor string - if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" { - clientTokenAccessor = req.ClientTokenAccessor - } - if err := Hash(salt, req, in.NonHMACReqDataKeys); err != nil { + req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) + if err != nil { return err } - if clientTokenAccessor != "" { - req.ClientTokenAccessor = clientTokenAccessor - } - // Cache and restore accessor in the response - if resp != nil { - var accessor, wrappedAccessor, wrappingAccessor string - if !config.HMACAccessor && resp != nil && resp.Auth != nil && resp.Auth.Accessor != "" { - accessor = resp.Auth.Accessor - } - if !config.HMACAccessor && resp != nil && resp.WrapInfo != nil && resp.WrapInfo.WrappedAccessor != "" { - wrappedAccessor = resp.WrapInfo.WrappedAccessor - wrappingAccessor = resp.WrapInfo.Accessor - } - if err := Hash(salt, resp, in.NonHMACRespDataKeys); err != nil { - return err - } - if accessor != "" { - resp.Auth.Accessor = accessor - } - if wrappedAccessor != "" { - resp.WrapInfo.WrappedAccessor = wrappedAccessor - } - if wrappingAccessor != "" { - resp.WrapInfo.Accessor = wrappingAccessor - } + resp, err = HashResponse(salt, resp, config.HMACAccessor, in.NonHMACRespDataKeys) + if err != nil { + return err } } - // If things are nil, make empty to avoid panics - if auth == nil { - auth = new(logical.Auth) - } - if resp == nil { - resp = new(logical.Response) - } var errString string if in.OuterErr != nil { errString = in.OuterErr.Error() @@ -304,6 +207,7 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config TokenPolicies: resp.Auth.TokenPolicies, IdentityPolicies: resp.Auth.IdentityPolicies, ExternalNamespacePolicies: resp.Auth.ExternalNamespacePolicies, + NoDefaultPolicy: resp.Auth.NoDefaultPolicy, Metadata: resp.Auth.Metadata, NumUses: resp.Auth.NumUses, EntityID: resp.Auth.EntityID, @@ -334,10 +238,14 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config } } + respType := in.Type + if respType == "" { + respType = "response" + } respEntry := &AuditResponseEntry{ - Type: "response", + Type: respType, Error: errString, - Auth: AuditAuth{ + Auth: &AuditAuth{ ClientToken: auth.ClientToken, Accessor: auth.Accessor, DisplayName: auth.DisplayName, @@ -345,30 +253,32 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config TokenPolicies: auth.TokenPolicies, IdentityPolicies: auth.IdentityPolicies, ExternalNamespacePolicies: auth.ExternalNamespacePolicies, + NoDefaultPolicy: auth.NoDefaultPolicy, Metadata: auth.Metadata, RemainingUses: req.ClientTokenRemainingUses, EntityID: auth.EntityID, TokenType: auth.TokenType.String(), }, - Request: AuditRequest{ + Request: &AuditRequest{ ID: req.ID, ClientToken: req.ClientToken, ClientTokenAccessor: req.ClientTokenAccessor, Operation: req.Operation, - Namespace: AuditNamespace{ + Namespace: &AuditNamespace{ ID: ns.ID, Path: ns.Path, }, - Path: req.Path, - Data: req.Data, - PolicyOverride: req.PolicyOverride, - RemoteAddr: getRemoteAddr(req), - ReplicationCluster: req.ReplicationCluster, - Headers: req.Headers, + Path: req.Path, + Data: req.Data, + PolicyOverride: req.PolicyOverride, + RemoteAddr: getRemoteAddr(req), + ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), + ReplicationCluster: req.ReplicationCluster, + Headers: req.Headers, }, - Response: AuditResponse{ + Response: &AuditResponse{ Auth: respAuth, Secret: respSecret, Data: resp.Data, @@ -392,36 +302,37 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config // AuditRequestEntry is the structure of a request audit log entry in Audit. type AuditRequestEntry struct { - Time string `json:"time,omitempty"` - Type string `json:"type"` - Auth AuditAuth `json:"auth"` - Request AuditRequest `json:"request"` - Error string `json:"error"` + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` + Auth *AuditAuth `json:"auth,omitempty"` + Request *AuditRequest `json:"request,omitempty"` + Error string `json:"error,omitempty"` } // AuditResponseEntry is the structure of a response audit log entry in Audit. type AuditResponseEntry struct { - Time string `json:"time,omitempty"` - Type string `json:"type"` - Auth AuditAuth `json:"auth"` - Request AuditRequest `json:"request"` - Response AuditResponse `json:"response"` - Error string `json:"error"` + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` + Auth *AuditAuth `json:"auth,omitempty"` + Request *AuditRequest `json:"request,omitempty"` + Response *AuditResponse `json:"response,omitempty"` + Error string `json:"error,omitempty"` } type AuditRequest struct { - ID string `json:"id"` - ReplicationCluster string `json:"replication_cluster,omitempty"` - Operation logical.Operation `json:"operation"` - ClientToken string `json:"client_token"` - ClientTokenAccessor string `json:"client_token_accessor"` - Namespace AuditNamespace `json:"namespace"` - Path string `json:"path"` - Data map[string]interface{} `json:"data"` - PolicyOverride bool `json:"policy_override"` - RemoteAddr string `json:"remote_address"` - WrapTTL int `json:"wrap_ttl"` - Headers map[string][]string `json:"headers"` + ID string `json:"id,omitempty"` + ReplicationCluster string `json:"replication_cluster,omitempty"` + Operation logical.Operation `json:"operation,omitempty"` + ClientToken string `json:"client_token,omitempty"` + ClientTokenAccessor string `json:"client_token_accessor,omitempty"` + Namespace *AuditNamespace `json:"namespace,omitempty"` + Path string `json:"path,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + PolicyOverride bool `json:"policy_override,omitempty"` + RemoteAddr string `json:"remote_address,omitempty"` + WrapTTL int `json:"wrap_ttl,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` + ClientCertificateSerialNumber string `json:"client_certificate_serial_number,omitempty"` } type AuditResponse struct { @@ -431,40 +342,41 @@ type AuditResponse struct { Warnings []string `json:"warnings,omitempty"` Redirect string `json:"redirect,omitempty"` WrapInfo *AuditResponseWrapInfo `json:"wrap_info,omitempty"` - Headers map[string][]string `json:"headers"` + Headers map[string][]string `json:"headers,omitempty"` } type AuditAuth struct { - ClientToken string `json:"client_token"` - Accessor string `json:"accessor"` - DisplayName string `json:"display_name"` - Policies []string `json:"policies"` + ClientToken string `json:"client_token,omitempty"` + Accessor string `json:"accessor,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Policies []string `json:"policies,omitempty"` TokenPolicies []string `json:"token_policies,omitempty"` IdentityPolicies []string `json:"identity_policies,omitempty"` ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies,omitempty"` - Metadata map[string]string `json:"metadata"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` NumUses int `json:"num_uses,omitempty"` RemainingUses int `json:"remaining_uses,omitempty"` - EntityID string `json:"entity_id"` - TokenType string `json:"token_type"` + EntityID string `json:"entity_id,omitempty"` + TokenType string `json:"token_type,omitempty"` } type AuditSecret struct { - LeaseID string `json:"lease_id"` + LeaseID string `json:"lease_id,omitempty"` } type AuditResponseWrapInfo struct { - TTL int `json:"ttl"` - Token string `json:"token"` - Accessor string `json:"accessor"` - CreationTime string `json:"creation_time"` - CreationPath string `json:"creation_path"` + TTL int `json:"ttl,omitempty"` + Token string `json:"token,omitempty"` + Accessor string `json:"accessor,omitempty"` + CreationTime string `json:"creation_time,omitempty"` + CreationPath string `json:"creation_path,omitempty"` WrappedAccessor string `json:"wrapped_accessor,omitempty"` } type AuditNamespace struct { - ID string `json:"id"` - Path string `json:"path"` + ID string `json:"id,omitempty"` + Path string `json:"path,omitempty"` } // getRemoteAddr safely gets the remote address avoiding a nil pointer @@ -475,6 +387,14 @@ func getRemoteAddr(req *logical.Request) string { return "" } +func getClientCertificateSerialNumber(connState *tls.ConnectionState) string { + if connState == nil || len(connState.VerifiedChains) == 0 || len(connState.VerifiedChains[0]) == 0 { + return "" + } + + return connState.VerifiedChains[0][0].SerialNumber.String() +} + // parseVaultTokenFromJWT returns a string iff the token was a JWT and we could // extract the original token ID from inside func parseVaultTokenFromJWT(token string) *string { diff --git a/audit/format_json_test.go b/audit/format_json_test.go index f11934bf53df..a1e32a11035a 100644 --- a/audit/format_json_test.go +++ b/audit/format_json_test.go @@ -38,11 +38,13 @@ func TestFormatJSON_formatRequest(t *testing.T) { }{ "auth, request": { &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + EntityID: "foobarentity", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, }, &logical.Request{ Operation: logical.UpdateOperation, @@ -63,11 +65,13 @@ func TestFormatJSON_formatRequest(t *testing.T) { }, "auth, request with prefix": { &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, }, &logical.Request{ Operation: logical.UpdateOperation, @@ -99,7 +103,7 @@ func TestFormatJSON_formatRequest(t *testing.T) { config := FormatterConfig{ HMACAccessor: false, } - in := &LogInput{ + in := &logical.LogInput{ Auth: tc.Auth, Request: tc.Req, OuterErr: tc.Err, @@ -117,7 +121,7 @@ func TestFormatJSON_formatRequest(t *testing.T) { if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedjson); err != nil { t.Fatalf("bad json: %s", err) } - expectedjson.Request.Namespace = AuditNamespace{ID: "root"} + expectedjson.Request.Namespace = &AuditNamespace{ID: "root"} var actualjson = new(AuditRequestEntry) if err := jsonutil.DecodeJSON([]byte(buf.String())[len(tc.Prefix):], &actualjson); err != nil { @@ -139,5 +143,5 @@ func TestFormatJSON_formatRequest(t *testing.T) { } } -const testFormatJSONReqBasicStrFmt = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"client_token":"%s","accessor":"bar","display_name":"testtoken","policies":["root"],"metadata":null,"entity_id":"","token_type":"service"},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"} +const testFormatJSONReqBasicStrFmt = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"client_token":"%s","accessor":"bar","display_name":"testtoken","policies":["root"],"no_default_policy":true,"metadata":null,"entity_id":"foobarentity","token_type":"service"},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"} ` diff --git a/audit/format_jsonx_test.go b/audit/format_jsonx_test.go index ce4b85a7c7b6..6c46d3d9db38 100644 --- a/audit/format_jsonx_test.go +++ b/audit/format_jsonx_test.go @@ -37,15 +37,20 @@ func TestFormatJSONx_formatRequest(t *testing.T) { }{ "auth, request": { &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + EntityID: "foobarentity", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, }, &logical.Request{ - Operation: logical.UpdateOperation, - Path: "/foo", + ID: "request", + ClientToken: "foo", + ClientTokenAccessor: "bar", + Operation: logical.UpdateOperation, + Path: "/foo", Connection: &logical.Connection{ RemoteAddr: "127.0.0.1", }, @@ -55,24 +60,30 @@ func TestFormatJSONx_formatRequest(t *testing.T) { Headers: map[string][]string{ "foo": []string{"bar"}, }, + PolicyOverride: true, }, errors.New("this is an error"), "", "", - fmt.Sprintf(`bar%stesttokenrootservicethis is an errorbarrootupdate/foofalse127.0.0.160request`, - fooSalted), + fmt.Sprintf(`bar%stesttokenfoobarentitytruerootservicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, + fooSalted, fooSalted), }, "auth, request with prefix": { &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + NoDefaultPolicy: true, + EntityID: "foobarentity", + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, }, &logical.Request{ - Operation: logical.UpdateOperation, - Path: "/foo", + ID: "request", + ClientToken: "foo", + ClientTokenAccessor: "bar", + Operation: logical.UpdateOperation, + Path: "/foo", Connection: &logical.Connection{ RemoteAddr: "127.0.0.1", }, @@ -82,12 +93,13 @@ func TestFormatJSONx_formatRequest(t *testing.T) { Headers: map[string][]string{ "foo": []string{"bar"}, }, + PolicyOverride: true, }, errors.New("this is an error"), "", "@cee: ", - fmt.Sprintf(`bar%stesttokenrootservicethis is an errorbarrootupdate/foofalse127.0.0.160request`, - fooSalted), + fmt.Sprintf(`bar%stesttokenfoobarentitytruerootservicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, + fooSalted, fooSalted), }, } @@ -103,7 +115,7 @@ func TestFormatJSONx_formatRequest(t *testing.T) { OmitTime: true, HMACAccessor: false, } - in := &LogInput{ + in := &logical.LogInput{ Auth: tc.Auth, Request: tc.Req, OuterErr: tc.Err, diff --git a/audit/format_test.go b/audit/format_test.go index 264fb334c1fd..4f3cc5cbb2e5 100644 --- a/audit/format_test.go +++ b/audit/format_test.go @@ -41,11 +41,11 @@ func TestFormatRequestErrors(t *testing.T) { AuditFormatWriter: &noopFormatWriter{}, } - if err := formatter.FormatRequest(context.Background(), ioutil.Discard, config, &LogInput{}); err == nil { + if err := formatter.FormatRequest(context.Background(), ioutil.Discard, config, &logical.LogInput{}); err == nil { t.Fatal("expected error due to nil request") } - in := &LogInput{ + in := &logical.LogInput{ Request: &logical.Request{}, } if err := formatter.FormatRequest(context.Background(), nil, config, in); err == nil { @@ -59,11 +59,11 @@ func TestFormatResponseErrors(t *testing.T) { AuditFormatWriter: &noopFormatWriter{}, } - if err := formatter.FormatResponse(context.Background(), ioutil.Discard, config, &LogInput{}); err == nil { + if err := formatter.FormatResponse(context.Background(), ioutil.Discard, config, &logical.LogInput{}); err == nil { t.Fatal("expected error due to nil request") } - in := &LogInput{ + in := &logical.LogInput{ Request: &logical.Request{}, } if err := formatter.FormatResponse(context.Background(), nil, config, in); err == nil { diff --git a/audit/formatter.go b/audit/formatter.go index 7702a1ee5d64..c27035768d35 100644 --- a/audit/formatter.go +++ b/audit/formatter.go @@ -3,6 +3,8 @@ package audit import ( "context" "io" + + "github.com/hashicorp/vault/sdk/logical" ) // Formatter is an interface that is responsible for formating a @@ -11,8 +13,8 @@ import ( // // It is recommended that you pass data through Hash prior to formatting it. type Formatter interface { - FormatRequest(context.Context, io.Writer, FormatterConfig, *LogInput) error - FormatResponse(context.Context, io.Writer, FormatterConfig, *LogInput) error + FormatRequest(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error + FormatResponse(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error } type FormatterConfig struct { diff --git a/audit/hashstructure.go b/audit/hashstructure.go index e5035581cf43..70db3f26ed10 100644 --- a/audit/hashstructure.go +++ b/audit/hashstructure.go @@ -1,9 +1,9 @@ package audit import ( + "encoding/json" "errors" "reflect" - "strings" "time" "github.com/hashicorp/vault/sdk/helper/salt" @@ -19,107 +19,157 @@ func HashString(salter *salt.Salt, data string) string { return salter.GetIdentifiedHMAC(data) } -// Hash will hash the given type. This has built-in support for auth, -// requests, and responses. If it is a type that isn't recognized, then -// it will be passed through. -// -// The structure is modified in-place. -func Hash(salter *salt.Salt, raw interface{}, nonHMACDataKeys []string) error { +// HashAuth returns a hashed copy of the logical.Auth input. +func HashAuth(salter *salt.Salt, in *logical.Auth, HMACAccessor bool) (*logical.Auth, error) { + if in == nil { + return nil, nil + } + fn := salter.GetIdentifiedHMAC + auth := *in - switch s := raw.(type) { - case *logical.Auth: - if s == nil { - return nil - } - if s.ClientToken != "" { - s.ClientToken = fn(s.ClientToken) - } - if s.Accessor != "" { - s.Accessor = fn(s.Accessor) - } + if auth.ClientToken != "" { + auth.ClientToken = fn(auth.ClientToken) + } + if HMACAccessor && auth.Accessor != "" { + auth.Accessor = fn(auth.Accessor) + } + return &auth, nil +} - case *logical.Request: - if s == nil { - return nil - } - if s.Auth != nil { - if err := Hash(salter, s.Auth, nil); err != nil { - return err - } - } +// HashRequest returns a hashed copy of the logical.Request input. +func HashRequest(salter *salt.Salt, in *logical.Request, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Request, error) { + if in == nil { + return nil, nil + } - if s.ClientToken != "" { - s.ClientToken = fn(s.ClientToken) - } + fn := salter.GetIdentifiedHMAC + req := *in - if s.ClientTokenAccessor != "" { - s.ClientTokenAccessor = fn(s.ClientTokenAccessor) + if req.Auth != nil { + cp, err := copystructure.Copy(req.Auth) + if err != nil { + return nil, err } - data, err := HashStructure(s.Data, fn, nonHMACDataKeys) + req.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) if err != nil { - return err + return nil, err } + } - s.Data = data.(map[string]interface{}) + if req.ClientToken != "" { + req.ClientToken = fn(req.ClientToken) + } + if HMACAccessor && req.ClientTokenAccessor != "" { + req.ClientTokenAccessor = fn(req.ClientTokenAccessor) + } - case *logical.Response: - if s == nil { - return nil - } + data, err := hashMap(fn, req.Data, nonHMACDataKeys) + if err != nil { + return nil, err + } + + req.Data = data + return &req, nil +} + +func hashMap(fn func(string) string, data map[string]interface{}, nonHMACDataKeys []string) (map[string]interface{}, error) { + if data == nil { + return nil, nil + } - if s.Auth != nil { - if err := Hash(salter, s.Auth, nil); err != nil { - return err + copy, err := copystructure.Copy(data) + if err != nil { + return nil, err + } + newData := copy.(map[string]interface{}) + for k, v := range newData { + if o, ok := v.(logical.OptMarshaler); ok { + marshaled, err := o.MarshalJSONWithOptions(&logical.MarshalOptions{ + ValueHasher: fn, + }) + if err != nil { + return nil, err } + newData[k] = json.RawMessage(marshaled) } + } - if s.WrapInfo != nil { - if err := Hash(salter, s.WrapInfo, nil); err != nil { - return err - } + if err := HashStructure(newData, fn, nonHMACDataKeys); err != nil { + return nil, err + } + + return newData, nil +} + +// HashResponse returns a hashed copy of the logical.Request input. +func HashResponse(salter *salt.Salt, in *logical.Response, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Response, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + resp := *in + + if resp.Auth != nil { + cp, err := copystructure.Copy(resp.Auth) + if err != nil { + return nil, err } - data, err := HashStructure(s.Data, fn, nonHMACDataKeys) + resp.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) if err != nil { - return err + return nil, err } + } - s.Data = data.(map[string]interface{}) + data, err := hashMap(fn, resp.Data, nonHMACDataKeys) + if err != nil { + return nil, err + } + resp.Data = data - case *wrapping.ResponseWrapInfo: - if s == nil { - return nil + if resp.WrapInfo != nil { + var err error + resp.WrapInfo, err = HashWrapInfo(salter, resp.WrapInfo, HMACAccessor) + if err != nil { + return nil, err } + } - s.Token = fn(s.Token) - s.Accessor = fn(s.Accessor) + return &resp, nil +} - if s.WrappedAccessor != "" { - s.WrappedAccessor = fn(s.WrappedAccessor) +// HashWrapInfo returns a hashed copy of the wrapping.ResponseWrapInfo input. +func HashWrapInfo(salter *salt.Salt, in *wrapping.ResponseWrapInfo, HMACAccessor bool) (*wrapping.ResponseWrapInfo, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + wrapinfo := *in + + wrapinfo.Token = fn(wrapinfo.Token) + + if HMACAccessor { + wrapinfo.Accessor = fn(wrapinfo.Accessor) + + if wrapinfo.WrappedAccessor != "" { + wrapinfo.WrappedAccessor = fn(wrapinfo.WrappedAccessor) } } - return nil + return &wrapinfo, nil } // HashStructure takes an interface and hashes all the values within // the structure. Only _values_ are hashed: keys of objects are not. // // For the HashCallback, see the built-in HashCallbacks below. -func HashStructure(s interface{}, cb HashCallback, ignoredKeys []string) (interface{}, error) { - s, err := copystructure.Copy(s) - if err != nil { - return nil, err - } - +func HashStructure(s interface{}, cb HashCallback, ignoredKeys []string) error { walker := &hashWalker{Callback: cb, IgnoredKeys: ignoredKeys} - if err := reflectwalk.Walk(s, walker); err != nil { - return nil, err - } - - return s, nil + return reflectwalk.Walk(s, walker) } // HashCallback is the callback called for HashStructure to hash @@ -134,18 +184,25 @@ type hashWalker struct { // to be hashed. If there is an error, walking will be halted // immediately and the error returned. Callback HashCallback - // IgnoreKeys are the keys that wont have the HashCallback applied IgnoredKeys []string - - key []string - lastValue reflect.Value - loc reflectwalk.Location - cs []reflect.Value - csKey []reflect.Value - csData interface{} - sliceIndex int - unknownKeys []string + // MapElem appends the key itself (not the reflect.Value) to key. + // The last element in key is the most recently entered map key. + // Since Exit pops the last element of key, only nesting to another + // structure increases the size of this slice. + key []string + lastValue reflect.Value + // Enter appends to loc and exit pops loc. The last element of loc is thus + // the current location. + loc []reflectwalk.Location + // Map and Slice append to cs, Exit pops the last element off cs. + // The last element in cs is the most recently entered map or slice. + cs []reflect.Value + // MapElem and SliceElem append to csKey. The last element in csKey is the + // most recently entered map key or slice index. Since Exit pops the last + // element of csKey, only nesting to another structure increases the size of + // this slice. + csKey []reflect.Value } // hashTimeType stores a pre-computed reflect.Type for a time.Time so @@ -155,12 +212,12 @@ type hashWalker struct { var hashTimeType = reflect.TypeOf(time.Time{}) func (w *hashWalker) Enter(loc reflectwalk.Location) error { - w.loc = loc + w.loc = append(w.loc, loc) return nil } func (w *hashWalker) Exit(loc reflectwalk.Location) error { - w.loc = reflectwalk.None + w.loc = w.loc[:len(w.loc)-1] switch loc { case reflectwalk.Map: @@ -183,7 +240,6 @@ func (w *hashWalker) Map(m reflect.Value) error { } func (w *hashWalker) MapElem(m, k, v reflect.Value) error { - w.csData = k w.csKey = append(w.csKey, k) w.key = append(w.key, k.String()) w.lastValue = v @@ -197,7 +253,6 @@ func (w *hashWalker) Slice(s reflect.Value) error { func (w *hashWalker) SliceElem(i int, elem reflect.Value) error { w.csKey = append(w.csKey, reflect.ValueOf(i)) - w.sliceIndex = i return nil } @@ -207,20 +262,37 @@ func (w *hashWalker) Struct(v reflect.Value) error { return nil } - // If we aren't in a map value, return an error to prevent a panic - if v.Interface() != w.lastValue.Interface() { - return errors.New("time.Time value in a non map key cannot be hashed for audits") + if len(w.loc) < 3 { + // The last element of w.loc is reflectwalk.Struct, by definition. + // If len(w.loc) < 3 that means hashWalker.Walk was given a struct + // value and this is the very first step in the walk, and we don't + // currently support structs as inputs, + return errors.New("structs as direct inputs not supported") } - // Create a string value of the time. IMPORTANT: this must never change - // across Vault versions or the hash value of equivalent time.Time will - // change. - strVal := v.Interface().(time.Time).Format(time.RFC3339Nano) + // Second to last element of w.loc is location that contains this struct. + switch w.loc[len(w.loc)-2] { + case reflectwalk.MapValue: + // Create a string value of the time. IMPORTANT: this must never change + // across Vault versions or the hash value of equivalent time.Time will + // change. + strVal := v.Interface().(time.Time).Format(time.RFC3339Nano) - // Set the map value to the string instead of the time.Time object - m := w.cs[len(w.cs)-1] - mk := w.csData.(reflect.Value) - m.SetMapIndex(mk, reflect.ValueOf(strVal)) + // Set the map value to the string instead of the time.Time object + m := w.cs[len(w.cs)-1] + mk := w.csKey[len(w.cs)-1] + m.SetMapIndex(mk, reflect.ValueOf(strVal)) + case reflectwalk.SliceElem: + // Create a string value of the time. IMPORTANT: this must never change + // across Vault versions or the hash value of equivalent time.Time will + // change. + strVal := v.Interface().(time.Time).Format(time.RFC3339Nano) + + // Set the map value to the string instead of the time.Time object + s := w.cs[len(w.cs)-1] + si := int(w.csKey[len(w.cs)-1].Int()) + s.Slice(si, si+1).Index(0).Set(reflect.ValueOf(strVal)) + } // Skip this entry so that we don't walk the struct. return reflectwalk.SkipEntry @@ -230,13 +302,15 @@ func (w *hashWalker) StructField(reflect.StructField, reflect.Value) error { return nil } +// Primitive calls Callback to transform strings in-place, except for map keys. +// Strings hiding within interfaces are also transformed. func (w *hashWalker) Primitive(v reflect.Value) error { if w.Callback == nil { return nil } // We don't touch map keys - if w.loc == reflectwalk.MapKey { + if w.loc[len(w.loc)-1] == reflectwalk.MapKey { return nil } @@ -244,7 +318,6 @@ func (w *hashWalker) Primitive(v reflect.Value) error { // We only care about strings if v.Kind() == reflect.Interface { - setV = v v = v.Elem() } if v.Kind() != reflect.String { @@ -260,25 +333,17 @@ func (w *hashWalker) Primitive(v reflect.Value) error { replaceVal := w.Callback(v.String()) resultVal := reflect.ValueOf(replaceVal) - switch w.loc { - case reflectwalk.MapKey: - m := w.cs[len(w.cs)-1] - - // Delete the old value - var zero reflect.Value - m.SetMapIndex(w.csData.(reflect.Value), zero) - - // Set the new key with the existing value - m.SetMapIndex(resultVal, w.lastValue) - - // Set the key to be the new key - w.csData = resultVal + switch w.loc[len(w.loc)-1] { case reflectwalk.MapValue: // If we're in a map, then the only way to set a map value is // to set it directly. m := w.cs[len(w.cs)-1] - mk := w.csData.(reflect.Value) + mk := w.csKey[len(w.cs)-1] m.SetMapIndex(mk, resultVal) + case reflectwalk.SliceElem: + s := w.cs[len(w.cs)-1] + si := int(w.csKey[len(w.cs)-1].Int()) + s.Slice(si, si+1).Index(0).Set(resultVal) default: // Otherwise, we should be addressable setV.Set(resultVal) @@ -286,34 +351,3 @@ func (w *hashWalker) Primitive(v reflect.Value) error { return nil } - -func (w *hashWalker) removeCurrent() { - // Append the key to the unknown keys - w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) - - for i := 1; i <= len(w.cs); i++ { - c := w.cs[len(w.cs)-i] - switch c.Kind() { - case reflect.Map: - // Zero value so that we delete the map key - var val reflect.Value - - // Get the key and delete it - k := w.csData.(reflect.Value) - c.SetMapIndex(k, val) - return - } - } - - panic("No container found for removeCurrent") -} - -func (w *hashWalker) replaceCurrent(v reflect.Value) { - c := w.cs[len(w.cs)-2] - switch c.Kind() { - case reflect.Map: - // Get the key and delete it - k := w.csKey[len(w.csKey)-1] - c.SetMapIndex(k, v) - } -} diff --git a/audit/hashstructure_test.go b/audit/hashstructure_test.go index 2f50ef613688..0a361c373b0b 100644 --- a/audit/hashstructure_test.go +++ b/audit/hashstructure_test.go @@ -3,7 +3,9 @@ package audit import ( "context" "crypto/sha256" + "encoding/json" "fmt" + "github.com/go-test/deep" "reflect" "testing" "time" @@ -111,25 +113,85 @@ func TestHashString(t *testing.T) { } } -func TestHash(t *testing.T) { - now := time.Now() - +func TestHashAuth(t *testing.T) { cases := []struct { - Input interface{} - Output interface{} - NonHMACDataKeys []string + Input *logical.Auth + Output *logical.Auth + HMACAccessor bool }{ { &logical.Auth{ClientToken: "foo"}, &logical.Auth{ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a"}, - nil, + false, }, + { + &logical.Auth{ + LeaseOptions: logical.LeaseOptions{ + TTL: 1 * time.Hour, + }, + + ClientToken: "foo", + }, + &logical.Auth{ + LeaseOptions: logical.LeaseOptions{ + TTL: 1 * time.Hour, + }, + + ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a", + }, + false, + }, + } + + inmemStorage := &logical.InmemStorage{} + inmemStorage.Put(context.Background(), &logical.StorageEntry{ + Key: "salt", + Value: []byte("foo"), + }) + localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }) + if err != nil { + t.Fatalf("Error instantiating salt: %s", err) + } + for _, tc := range cases { + input := fmt.Sprintf("%#v", tc.Input) + out, err := HashAuth(localSalt, tc.Input, tc.HMACAccessor) + if err != nil { + t.Fatalf("err: %s\n\n%s", err, input) + } + if !reflect.DeepEqual(out, tc.Output) { + t.Fatalf("bad:\nInput:\n%s\nOutput:\n%#v\nExpected output:\n%#v", input, out, tc.Output) + } + } +} + +type testOptMarshaler struct { + S string + I int +} + +func (o *testOptMarshaler) MarshalJSONWithOptions(options *logical.MarshalOptions) ([]byte, error) { + return json.Marshal(&testOptMarshaler{S: options.ValueHasher(o.S), I: o.I}) +} + +var _ logical.OptMarshaler = &testOptMarshaler{} + +func TestHashRequest(t *testing.T) { + cases := []struct { + Input *logical.Request + Output *logical.Request + NonHMACDataKeys []string + HMACAccessor bool + }{ { &logical.Request{ Data: map[string]interface{}{ "foo": "bar", "baz": "foobar", "private_key_type": certutil.PrivateKeyType("rsa"), + "om": &testOptMarshaler{S: "bar", I: 1}, }, }, &logical.Request{ @@ -137,10 +199,47 @@ func TestHash(t *testing.T) { "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", "baz": "foobar", "private_key_type": "hmac-sha256:995230dca56fffd310ff591aa404aab52b2abb41703c787cfa829eceb4595bf1", + "om": json.RawMessage(`{"S":"hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317","I":1}`), }, }, []string{"baz"}, + false, }, + } + + inmemStorage := &logical.InmemStorage{} + inmemStorage.Put(context.Background(), &logical.StorageEntry{ + Key: "salt", + Value: []byte("foo"), + }) + localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }) + if err != nil { + t.Fatalf("Error instantiating salt: %s", err) + } + for _, tc := range cases { + input := fmt.Sprintf("%#v", tc.Input) + out, err := HashRequest(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys) + if err != nil { + t.Fatalf("err: %s\n\n%s", err, input) + } + if diff := deep.Equal(out, tc.Output); len(diff) > 0 { + t.Fatalf("bad:\nInput:\n%s\nDiff:\n%#v", input, diff) + } + } +} + +func TestHashResponse(t *testing.T) { + now := time.Now() + + cases := []struct { + Input *logical.Response + Output *logical.Response + NonHMACDataKeys []string + HMACAccessor bool + }{ { &logical.Response{ Data: map[string]interface{}{ @@ -149,6 +248,7 @@ func TestHash(t *testing.T) { // Responses can contain time values, so test that with // a known fixed value. "bar": now, + "om": &testOptMarshaler{S: "bar", I: 1}, }, WrapInfo: &wrapping.ResponseWrapInfo{ TTL: 60, @@ -163,6 +263,7 @@ func TestHash(t *testing.T) { "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", "baz": "foobar", "bar": now.Format(time.RFC3339Nano), + "om": json.RawMessage(`{"S":"hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317","I":1}`), }, WrapInfo: &wrapping.ResponseWrapInfo{ TTL: 60, @@ -173,28 +274,7 @@ func TestHash(t *testing.T) { }, }, []string{"baz"}, - }, - { - "foo", - "foo", - nil, - }, - { - &logical.Auth{ - LeaseOptions: logical.LeaseOptions{ - TTL: 1 * time.Hour, - }, - - ClientToken: "foo", - }, - &logical.Auth{ - LeaseOptions: logical.LeaseOptions{ - TTL: 1 * time.Hour, - }, - - ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a", - }, - nil, + true, }, } @@ -212,16 +292,12 @@ func TestHash(t *testing.T) { } for _, tc := range cases { input := fmt.Sprintf("%#v", tc.Input) - if err := Hash(localSalt, tc.Input, tc.NonHMACDataKeys); err != nil { + out, err := HashResponse(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys) + if err != nil { t.Fatalf("err: %s\n\n%s", err, input) } - if _, ok := tc.Input.(*logical.Response); ok { - if !reflect.DeepEqual(tc.Input.(*logical.Response).WrapInfo, tc.Output.(*logical.Response).WrapInfo) { - t.Fatalf("bad:\nInput:\n%s\nTest case input:\n%#v\nTest case output:\n%#v", input, tc.Input.(*logical.Response).WrapInfo, tc.Output.(*logical.Response).WrapInfo) - } - } - if !reflect.DeepEqual(tc.Input, tc.Output) { - t.Fatalf("bad:\nInput:\n%s\nTest case input:\n%#v\nTest case output:\n%#v", input, tc.Input, tc.Output) + if diff := deep.Equal(out, tc.Output); len(diff) > 0 { + t.Fatalf("bad:\nInput:\n%s\nDiff:\n%#v", input, diff) } } } @@ -230,8 +306,8 @@ func TestHashWalker(t *testing.T) { replaceText := "foo" cases := []struct { - Input interface{} - Output interface{} + Input map[string]interface{} + Output map[string]interface{} }{ { map[string]interface{}{ @@ -253,14 +329,68 @@ func TestHashWalker(t *testing.T) { } for _, tc := range cases { - output, err := HashStructure(tc.Input, func(string) string { + err := HashStructure(tc.Input, func(string) string { return replaceText - }, []string{}) + }, nil) if err != nil { t.Fatalf("err: %s\n\n%#v", err, tc.Input) } - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("bad:\n\n%#v\n\n%#v", tc.Input, output) + if !reflect.DeepEqual(tc.Input, tc.Output) { + t.Fatalf("bad:\n\n%#v\n\n%#v", tc.Input, tc.Output) + } + } +} + +func TestHashWalker_TimeStructs(t *testing.T) { + replaceText := "bar" + + now := time.Now() + cases := []struct { + Input map[string]interface{} + Output map[string]interface{} + }{ + // Should not touch map keys of type time.Time. + { + map[string]interface{}{ + "hello": map[time.Time]struct{}{ + now: {}, + }, + }, + map[string]interface{}{ + "hello": map[time.Time]struct{}{ + now: {}, + }, + }, + }, + // Should handle map values of type time.Time. + { + map[string]interface{}{ + "hello": now, + }, + map[string]interface{}{ + "hello": now.Format(time.RFC3339Nano), + }, + }, + // Should handle slice values of type time.Time. + { + map[string]interface{}{ + "hello": []interface{}{"foo", now, "foo2"}, + }, + map[string]interface{}{ + "hello": []interface{}{"foobar", now.Format(time.RFC3339Nano), "foo2bar"}, + }, + }, + } + + for _, tc := range cases { + err := HashStructure(tc.Input, func(s string) string { + return s + replaceText + }, nil) + if err != nil { + t.Fatalf("err: %v\n\n%#v", err, tc.Input) + } + if !reflect.DeepEqual(tc.Input, tc.Output) { + t.Fatalf("bad:\n\n%#v\n\n%#v", tc.Input, tc.Output) } } } diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go index 4023f12d45b2..ebe38a00c5c7 100644 --- a/builtin/audit/file/backend.go +++ b/builtin/audit/file/backend.go @@ -1,14 +1,16 @@ package file import ( + "bytes" "context" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "strconv" "strings" "sync" + "sync/atomic" "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/audit" @@ -87,12 +89,17 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err mode: mode, saltConfig: conf.SaltConfig, saltView: conf.SaltView, + salt: new(atomic.Value), formatConfig: audit.FormatterConfig{ Raw: logRaw, HMACAccessor: hmacAccessor, }, } + // Ensure we are working with the right type by explicitly storing a nil of + // the right type + b.salt.Store((*salt.Salt)(nil)) + switch format { case "json": b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ @@ -137,7 +144,7 @@ type Backend struct { mode os.FileMode saltMutex sync.RWMutex - salt *salt.Salt + salt *atomic.Value saltConfig *salt.Config saltView logical.Storage } @@ -145,23 +152,27 @@ type Backend struct { var _ audit.Backend = (*Backend)(nil) func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { - b.saltMutex.RLock() - if b.salt != nil { - defer b.saltMutex.RUnlock() - return b.salt, nil + s := b.salt.Load().(*salt.Salt) + if s != nil { + return s, nil } - b.saltMutex.RUnlock() + b.saltMutex.Lock() defer b.saltMutex.Unlock() - if b.salt != nil { - return b.salt, nil + + s = b.salt.Load().(*salt.Salt) + if s != nil { + return s, nil } - salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) + + newSalt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) if err != nil { + b.salt.Store((*salt.Salt)(nil)) return nil, err } - b.salt = salt - return salt, nil + + b.salt.Store(newSalt) + return newSalt, nil } func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { @@ -169,68 +180,82 @@ func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { if err != nil { return "", err } + return audit.HashString(salt, data), nil } -func (b *Backend) LogRequest(ctx context.Context, in *audit.LogInput) error { - b.fileLock.Lock() - defer b.fileLock.Unlock() - +func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { + var writer io.Writer switch b.path { case "stdout": - return b.formatter.FormatRequest(ctx, os.Stdout, b.formatConfig, in) + writer = os.Stdout case "discard": - return b.formatter.FormatRequest(ctx, ioutil.Discard, b.formatConfig, in) + return nil } - if err := b.open(); err != nil { + buf := bytes.NewBuffer(make([]byte, 0, 2000)) + err := b.formatter.FormatRequest(ctx, buf, b.formatConfig, in) + if err != nil { return err } - if err := b.formatter.FormatRequest(ctx, b.f, b.formatConfig, in); err == nil { + return b.log(ctx, buf, writer) +} + +func (b *Backend) log(ctx context.Context, buf *bytes.Buffer, writer io.Writer) error { + reader := bytes.NewReader(buf.Bytes()) + + b.fileLock.Lock() + + if writer == nil { + if err := b.open(); err != nil { + b.fileLock.Unlock() + return err + } + writer = b.f + } + + if _, err := reader.WriteTo(writer); err == nil { + b.fileLock.Unlock() return nil + } else if b.path == "stdout" { + b.fileLock.Unlock() + return err } - // Opportunistically try to re-open the FD, once per call + // If writing to stdout there's no real reason to think anything would have + // changed so return above. Otherwise, opportunistically try to re-open the + // FD, once per call. b.f.Close() b.f = nil if err := b.open(); err != nil { + b.fileLock.Unlock() return err } - return b.formatter.FormatRequest(ctx, b.f, b.formatConfig, in) + reader.Seek(0, io.SeekStart) + _, err := reader.WriteTo(writer) + b.fileLock.Unlock() + return err } -func (b *Backend) LogResponse(ctx context.Context, in *audit.LogInput) error { - - b.fileLock.Lock() - defer b.fileLock.Unlock() - +func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { + var writer io.Writer switch b.path { case "stdout": - return b.formatter.FormatResponse(ctx, os.Stdout, b.formatConfig, in) + writer = os.Stdout case "discard": - return b.formatter.FormatResponse(ctx, ioutil.Discard, b.formatConfig, in) - } - - if err := b.open(); err != nil { - return err - } - - if err := b.formatter.FormatResponse(ctx, b.f, b.formatConfig, in); err == nil { return nil } - // Opportunistically try to re-open the FD, once per call - b.f.Close() - b.f = nil - - if err := b.open(); err != nil { + buf := bytes.NewBuffer(make([]byte, 0, 6000)) + err := b.formatter.FormatResponse(ctx, buf, b.formatConfig, in) + if err != nil { return err } - return b.formatter.FormatResponse(ctx, b.f, b.formatConfig, in) + return b.log(ctx, buf, writer) } // The file lock must be held before calling this @@ -291,5 +316,5 @@ func (b *Backend) Reload(_ context.Context) error { func (b *Backend) Invalidate(_ context.Context) { b.saltMutex.Lock() defer b.saltMutex.Unlock() - b.salt = nil + b.salt.Store((*salt.Salt)(nil)) } diff --git a/builtin/audit/file/backend_test.go b/builtin/audit/file/backend_test.go index ed335f790a63..0410f9e0cdc5 100644 --- a/builtin/audit/file/backend_test.go +++ b/builtin/audit/file/backend_test.go @@ -7,8 +7,10 @@ import ( "path/filepath" "strconv" "testing" + "time" "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" ) @@ -90,3 +92,52 @@ func TestAuditFile_fileModeExisting(t *testing.T) { t.Fatalf("File mode does not match.") } } + +func BenchmarkAuditFile_request(b *testing.B) { + config := map[string]string{ + "path": "/dev/null", + } + sink, err := Factory(context.Background(), &audit.BackendConfig{ + Config: config, + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + }) + if err != nil { + b.Fatal(err) + } + + in := &logical.LogInput{ + Auth: &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + }, + Request: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": []string{"bar"}, + }, + }, + } + + ctx := namespace.RootContext(nil) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := sink.LogRequest(ctx, in); err != nil { + panic(err) + } + } + }) +} diff --git a/builtin/audit/socket/backend.go b/builtin/audit/socket/backend.go index 3eb9fdde61b3..ddec20cc43f1 100644 --- a/builtin/audit/socket/backend.go +++ b/builtin/audit/socket/backend.go @@ -131,7 +131,7 @@ func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { return audit.HashString(salt, data), nil } -func (b *Backend) LogRequest(ctx context.Context, in *audit.LogInput) error { +func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { var buf bytes.Buffer if err := b.formatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { return err @@ -154,7 +154,7 @@ func (b *Backend) LogRequest(ctx context.Context, in *audit.LogInput) error { return err } -func (b *Backend) LogResponse(ctx context.Context, in *audit.LogInput) error { +func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { var buf bytes.Buffer if err := b.formatter.FormatResponse(ctx, &buf, b.formatConfig, in); err != nil { return err diff --git a/builtin/audit/syslog/backend.go b/builtin/audit/syslog/backend.go index b72c06a91f9f..ee3eb78f98dc 100644 --- a/builtin/audit/syslog/backend.go +++ b/builtin/audit/syslog/backend.go @@ -118,7 +118,7 @@ func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { return audit.HashString(salt, data), nil } -func (b *Backend) LogRequest(ctx context.Context, in *audit.LogInput) error { +func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { var buf bytes.Buffer if err := b.formatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { return err @@ -129,7 +129,7 @@ func (b *Backend) LogRequest(ctx context.Context, in *audit.LogInput) error { return err } -func (b *Backend) LogResponse(ctx context.Context, in *audit.LogInput) error { +func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { var buf bytes.Buffer if err := b.formatter.FormatResponse(ctx, &buf, b.formatConfig, in); err != nil { return err diff --git a/builtin/credential/approle/path_login.go b/builtin/credential/approle/path_login.go index 671789f9a442..be1347973415 100644 --- a/builtin/credential/approle/path_login.go +++ b/builtin/credential/approle/path_login.go @@ -264,16 +264,12 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat } // Parse the CIDRs we should be binding the token to. - var tokenBoundCIDRStrings []string - if entry != nil { - tokenBoundCIDRStrings = entry.TokenBoundCIDRs - } - if len(tokenBoundCIDRStrings) == 0 { - tokenBoundCIDRStrings = role.TokenBoundCIDRs - } - tokenBoundCIDRs, err := parseutil.ParseAddrs(tokenBoundCIDRStrings) - if err != nil { - return logical.ErrorResponse(err.Error()), nil + tokenBoundCIDRs := role.TokenBoundCIDRs + if entry != nil && len(entry.TokenBoundCIDRs) > 0 { + tokenBoundCIDRs, err = parseutil.ParseAddrs(entry.TokenBoundCIDRs) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } } // For some reason, if metadata was set to nil while processing secret ID @@ -286,32 +282,18 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat metadata["role_name"] = role.name auth := &logical.Auth{ - NumUses: role.TokenNumUses, - Period: role.Period, InternalData: map[string]interface{}{ "role_name": role.name, }, Metadata: metadata, - Policies: role.Policies, - LeaseOptions: logical.LeaseOptions{ - Renewable: true, - TTL: role.TokenTTL, - MaxTTL: role.TokenMaxTTL, - }, Alias: &logical.Alias{ Name: role.RoleID, }, - BoundCIDRs: tokenBoundCIDRs, } + role.PopulateTokenAuth(auth) - switch role.TokenType { - case "default": - auth.TokenType = logical.TokenTypeDefault - case "batch": - auth.TokenType = logical.TokenTypeBatch - case "service": - auth.TokenType = logical.TokenTypeService - } + // Allow for overridden token bound CIDRs + auth.BoundCIDRs = tokenBoundCIDRs return &logical.Response{ Auth: auth, @@ -341,7 +323,7 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, data resp := &logical.Response{Auth: req.Auth} resp.Auth.TTL = role.TokenTTL resp.Auth.MaxTTL = role.TokenMaxTTL - resp.Auth.Period = role.Period + resp.Auth.Period = role.TokenPeriod return resp, nil } diff --git a/builtin/credential/approle/path_login_test.go b/builtin/credential/approle/path_login_test.go index da6d4efaba79..7fde6f451cba 100644 --- a/builtin/credential/approle/path_login_test.go +++ b/builtin/credential/approle/path_login_test.go @@ -89,6 +89,7 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { if err == nil { t.Fatal("expected error due to mismatching subnet relationship") } + roleSecretIDReq.Data["token_bound_cidrs"] = "10.0.0.0/24" resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) if err != nil || (resp != nil && resp.IsError()) { diff --git a/builtin/credential/approle/path_role.go b/builtin/credential/approle/path_role.go index d17bcc26c244..e53a0fe8ccef 100644 --- a/builtin/credential/approle/path_role.go +++ b/builtin/credential/approle/path_role.go @@ -13,13 +13,17 @@ import ( "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" ) // roleStorageEntry stores all the options that are set on an role type roleStorageEntry struct { + tokenutil.TokenParams + // Name of the role. This field is not persisted on disk. After the role is // read out of disk, the sanitized version of name is set in this field for // subsequent use of role name elsewhere. @@ -33,7 +37,7 @@ type roleStorageEntry struct { // of the role HMACKey string `json:"hmac_key" mapstructure:"hmac_key"` - // Policies that are to be required by the token to access this role + // Policies that are to be required by the token to access this role. Deprecated. Policies []string `json:"policies" mapstructure:"policies"` // Number of times the SecretID generated against this role can be @@ -44,15 +48,6 @@ type roleStorageEntry struct { // SecretID generated against the role will expire SecretIDTTL time.Duration `json:"secret_id_ttl" mapstructure:"secret_id_ttl"` - // TokenNumUses defines the number of allowed uses of the token issued - TokenNumUses int `json:"token_num_uses" mapstructure:"token_num_uses"` - - // Duration before which an issued token must be renewed - TokenTTL time.Duration `json:"token_ttl" mapstructure:"token_ttl"` - - // Duration after which an issued token should not be allowed to be renewed - TokenMaxTTL time.Duration `json:"token_max_ttl" mapstructure:"token_max_ttl"` - // A constraint, if set, requires 'secret_id' credential to be presented during login BindSecretID bool `json:"bind_secret_id" mapstructure:"bind_secret_id"` @@ -67,14 +62,11 @@ type roleStorageEntry struct { // A constraint, if set, specifies the CIDR blocks from which logins should be allowed SecretIDBoundCIDRs []string `json:"secret_id_bound_cidrs" mapstructure:"secret_id_bound_cidrs"` - // A constraint, if set, specifies the CIDR blocks from which token use should be allowed - TokenBoundCIDRs []string `json:"token_bound_cidrs" mapstructure:"token_bound_cidrs"` - // Period, if set, indicates that the token generated using this role // should never expire. The token should be renewed within the duration - // specified by this value. The renewal duration will be fixed if the - // value is not modified on the role. If the `Period` in the role is modified, - // a token will pick up the new value during its next renewal. + // specified by this value. The renewal duration will be fixed if the value + // is not modified on the role. If the `Period` in the role is modified, a + // token will pick up the new value during its next renewal. Deprecated. Period time.Duration `json:"period" mapstructure:"period"` // LowerCaseRoleName enforces the lower casing of role names for all the @@ -84,9 +76,6 @@ type roleStorageEntry struct { // SecretIDPrefix is the storage prefix for persisting secret IDs. This // differs based on whether the secret IDs are cluster local or not. SecretIDPrefix string `json:"secret_id_prefix" mapstructure:"secret_id_prefix"` - - // TokenType is the type of token to generate - TokenType string `json:"token_type" mapstructure:"token_type"` } // roleIDStorageEntry represents the reverse mapping from RoleID to Role @@ -116,104 +105,90 @@ type roleIDStorageEntry struct { // role//secret-id-accessor/lookup - For reading secret_id using accessor // role//secret-id-accessor/destroy - For deleting secret_id using accessor func rolePaths(b *backend) []*framework.Path { - return []*framework.Path{ - &framework.Path{ - Pattern: "role/?", - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ListOperation: b.pathRoleList, + defTokenFields := tokenutil.TokenFields() + + p := &framework.Path{ + Pattern: "role/" + framework.GenericNameRegex("role_name"), + Fields: map[string]*framework.FieldSchema{ + "role_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role.", }, - HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]), - HelpDescription: strings.TrimSpace(roleHelp["role-list"][1]), - }, - &framework.Path{ - Pattern: "role/" + framework.GenericNameRegex("role_name"), - Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ - Type: framework.TypeString, - Description: "Name of the role.", - }, - "bind_secret_id": &framework.FieldSchema{ - Type: framework.TypeBool, - Default: true, - Description: "Impose secret_id to be presented when logging in using this role. Defaults to 'true'.", - }, - // Deprecated - "bound_cidr_list": &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Deprecated: Please use "secret_id_bound_cidrs" instead. Comma separated string or list -of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, - }, - "secret_id_bound_cidrs": &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of + "bind_secret_id": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: true, + Description: "Impose secret_id to be presented when logging in using this role. Defaults to 'true'.", + }, + + "bound_cidr_list": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Use "secret_id_bound_cidrs" instead.`, + Deprecated: true, + }, + + "secret_id_bound_cidrs": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, - }, - "token_bound_cidrs": &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of -IP addresses which can use the returned token.`, - }, - "policies": &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Default: "default", - Description: "Comma separated list of policies on the role.", - }, - "secret_id_num_uses": &framework.FieldSchema{ - Type: framework.TypeInt, - Description: `Number of times a SecretID can access the role, after which the SecretID + }, + + "policies": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + + "secret_id_num_uses": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: `Number of times a SecretID can access the role, after which the SecretID will expire. Defaults to 0 meaning that the the secret_id is of unlimited use.`, - }, - "secret_id_ttl": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `Duration in seconds after which the issued SecretID should expire. Defaults + }, + + "secret_id_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `Duration in seconds after which the issued SecretID should expire. Defaults to 0, meaning no expiration.`, - }, - "token_num_uses": &framework.FieldSchema{ - Type: framework.TypeInt, - Description: `Number of times issued tokens can be used`, - }, - "token_ttl": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `Duration in seconds after which the issued token should expire. Defaults -to 0, in which case the value will fall back to the system/mount defaults.`, - }, - "token_max_ttl": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `Duration in seconds after which the issued token should not be allowed to -be renewed. Defaults to 0, in which case the value will fall back to the system/mount defaults.`, - }, - "period": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Default: 0, - Description: `If set, indicates that the token generated using this role -should never expire. The token should be renewed within the -duration specified by this value. At each renewal, the token's -TTL will be set to the value of this parameter.`, - }, - "role_id": &framework.FieldSchema{ - Type: framework.TypeString, - Description: "Identifier of the role. Defaults to a UUID.", - }, - "local_secret_ids": &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `If set, the secret IDs generated using this role will be cluster local. This + }, + + "period": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, + }, + + "role_id": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Identifier of the role. Defaults to a UUID.", + }, + + "local_secret_ids": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `If set, the secret IDs generated using this role will be cluster local. This can only be set during role creation and once set, it can't be reset later.`, - }, - "token_type": &framework.FieldSchema{ - Type: framework.TypeString, - Default: "default", - Description: `The type of token to generate ("service" or "batch"), or "default" to use the default`, - }, }, - ExistenceCheck: b.pathRoleExistenceCheck, + }, + ExistenceCheck: b.pathRoleExistenceCheck, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.CreateOperation: b.pathRoleCreateUpdate, + logical.UpdateOperation: b.pathRoleCreateUpdate, + logical.ReadOperation: b.pathRoleRead, + logical.DeleteOperation: b.pathRoleDelete, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role"][1]), + } + + tokenutil.AddTokenFields(p.Fields) + + return []*framework.Path{ + p, + &framework.Path{ + Pattern: "role/?", Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.CreateOperation: b.pathRoleCreateUpdate, - logical.UpdateOperation: b.pathRoleCreateUpdate, - logical.ReadOperation: b.pathRoleRead, - logical.DeleteOperation: b.pathRoleDelete, + logical.ListOperation: b.pathRoleList, }, - HelpSynopsis: strings.TrimSpace(roleHelp["role"][0]), - HelpDescription: strings.TrimSpace(roleHelp["role"][1]), + HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-list"][1]), }, &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role_name") + "/local-secret-ids$", @@ -238,8 +213,12 @@ can only be set during role creation and once set, it can't be reset later.`, }, "policies": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, - Default: "default", - Description: "Comma separated list of policies on the role.", + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + "token_policies": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: defTokenFields["token_policies"].Description, }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -285,7 +264,7 @@ IP addresses which can perform the login operation.`, }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathRoleBoundCIDRUpdate, + logical.UpdateOperation: b.pathRoleSecretIDBoundCIDRUpdate, logical.ReadOperation: b.pathRoleSecretIDBoundCIDRRead, logical.DeleteOperation: b.pathRoleSecretIDBoundCIDRDelete, }, @@ -300,13 +279,12 @@ IP addresses which can perform the login operation.`, Description: "Name of the role.", }, "token_bound_cidrs": &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of -IP addresses which can use the returned token.`, + Type: framework.TypeCommaStringSlice, + Description: defTokenFields["token_bound_cidrs"].Description, }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathRoleBoundCIDRUpdate, + logical.UpdateOperation: b.pathRoleTokenBoundCIDRUpdate, logical.ReadOperation: b.pathRoleTokenBoundCIDRRead, logical.DeleteOperation: b.pathRoleTokenBoundCIDRDelete, }, @@ -383,12 +361,13 @@ to 0, meaning no expiration.`, Description: "Name of the role.", }, "period": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Default: 0, - Description: `If set, indicates that the token generated using this role -should never expire. The token should be renewed within the -duration specified by this value. At each renewal, the token's -TTL will be set to the value of this parameter.`, + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, + }, + "token_period": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: defTokenFields["token_period"].Description, }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -408,7 +387,7 @@ TTL will be set to the value of this parameter.`, }, "token_num_uses": &framework.FieldSchema{ Type: framework.TypeInt, - Description: `Number of times issued tokens can be used`, + Description: defTokenFields["token_num_uses"].Description, }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -427,9 +406,8 @@ TTL will be set to the value of this parameter.`, Description: "Name of the role.", }, "token_ttl": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `Duration in seconds after which the issued token should expire. Defaults -to 0, in which case the value will fall back to the system/mount defaults.`, + Type: framework.TypeDurationSecond, + Description: defTokenFields["token_ttl"].Description, }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -448,9 +426,8 @@ to 0, in which case the value will fall back to the system/mount defaults.`, Description: "Name of the role.", }, "token_max_ttl": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `Duration in seconds after which the issued token should not be allowed to -be renewed. Defaults to 0, in which case the value will fall back to the system/mount defaults.`, + Type: framework.TypeDurationSecond, + Description: defTokenFields["token_max_ttl"].Description, }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -500,9 +477,8 @@ list of CIDR blocks listed here should be a subset of the CIDR blocks listed on the role.`, }, "token_bound_cidrs": &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of -IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.`, + Type: framework.TypeCommaStringSlice, + Description: defTokenFields["token_bound_cidrs"].Description, }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -847,6 +823,14 @@ func (b *backend) roleEntry(ctx context.Context, s logical.Storage, roleName str needsUpgrade = true } + if role.TokenPeriod == 0 && role.Period > 0 { + role.TokenPeriod = role.Period + } + + if len(role.TokenPolicies) == 0 && len(role.Policies) > 0 { + role.TokenPolicies = role.Policies + } + if needsUpgrade && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby)) { entry, err := logical.StorageEntryJSON("role/"+strings.ToLower(roleName), &role) if err != nil { @@ -902,6 +886,24 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request return logical.ErrorResponse(fmt.Sprintf("role name %q doesn't exist", roleName)), logical.ErrUnsupportedPath } + var resp *logical.Response + + // Handle a backwards compat case + if tokenTypeRaw, ok := data.Raw["token_type"]; ok { + switch tokenTypeRaw.(string) { + case "default-service": + data.Raw["token_type"] = "service" + resp.AddWarning("default-service has no useful meaning; adjusting to service") + case "default-batch": + data.Raw["token_type"] = "batch" + resp.AddWarning("default-batch has no useful meaning; adjusting to batch") + } + } + + if err := role.ParseTokenFields(req, data); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + localSecretIDsRaw, ok := data.GetOk("local_secret_ids") if ok { switch { @@ -949,36 +951,6 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request } } - if boundCIDRListRaw, ok := data.GetOk("token_bound_cidrs"); ok { - role.TokenBoundCIDRs = boundCIDRListRaw.([]string) - } - - if len(role.TokenBoundCIDRs) != 0 { - valid, err := cidrutil.ValidateCIDRListSlice(role.TokenBoundCIDRs) - if err != nil { - return nil, errwrap.Wrapf("failed to validate CIDR blocks: {{err}}", err) - } - if !valid { - return logical.ErrorResponse("invalid CIDR blocks"), nil - } - } - - if policiesRaw, ok := data.GetOk("policies"); ok { - role.Policies = policyutil.ParsePolicies(policiesRaw) - } else if req.Operation == logical.CreateOperation { - role.Policies = policyutil.ParsePolicies(data.Get("policies")) - } - - periodRaw, ok := data.GetOk("period") - if ok { - role.Period = time.Second * time.Duration(periodRaw.(int)) - } else if req.Operation == logical.CreateOperation { - role.Period = time.Second * time.Duration(data.Get("period").(int)) - } - if role.Period > b.System().MaxLeaseTTL() { - return logical.ErrorResponse(fmt.Sprintf("period of %q is greater than the backend's maximum lease TTL of %q", role.Period.String(), b.System().MaxLeaseTTL().String())), nil - } - if secretIDNumUsesRaw, ok := data.GetOk("secret_id_num_uses"); ok { role.SecretIDNumUses = secretIDNumUsesRaw.(int) } else if req.Operation == logical.CreateOperation { @@ -994,59 +966,21 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request role.SecretIDTTL = time.Second * time.Duration(data.Get("secret_id_ttl").(int)) } - if tokenNumUsesRaw, ok := data.GetOk("token_num_uses"); ok { - role.TokenNumUses = tokenNumUsesRaw.(int) - } else if req.Operation == logical.CreateOperation { - role.TokenNumUses = data.Get("token_num_uses").(int) - } - if role.TokenNumUses < 0 { - return logical.ErrorResponse("token_num_uses cannot be negative"), nil - } - - if tokenTTLRaw, ok := data.GetOk("token_ttl"); ok { - role.TokenTTL = time.Second * time.Duration(tokenTTLRaw.(int)) - } else if req.Operation == logical.CreateOperation { - role.TokenTTL = time.Second * time.Duration(data.Get("token_ttl").(int)) - } - - if tokenMaxTTLRaw, ok := data.GetOk("token_max_ttl"); ok { - role.TokenMaxTTL = time.Second * time.Duration(tokenMaxTTLRaw.(int)) - } else if req.Operation == logical.CreateOperation { - role.TokenMaxTTL = time.Second * time.Duration(data.Get("token_max_ttl").(int)) - } - - tokenType := role.TokenType - if tokenTypeRaw, ok := data.GetOk("token_type"); ok { - tokenType = tokenTypeRaw.(string) - switch tokenType { - case "": - tokenType = "default" - case "service", "batch", "default": - default: - return logical.ErrorResponse(fmt.Sprintf("invalid 'token_type' value %q", tokenType)), nil + // handle upgrade cases + { + if err := tokenutil.UpgradeValue(data, "policies", "token_policies", &role.Policies, &role.TokenPolicies); err != nil { + return logical.ErrorResponse(err.Error()), nil } - } else if req.Operation == logical.CreateOperation { - tokenType = data.Get("token_type").(string) - } - role.TokenType = tokenType - if role.TokenType == "batch" { - if role.Period != 0 { - return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate periodic tokens"), nil - } - if role.TokenNumUses != 0 { - return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate tokens with limited use count"), nil + if err := tokenutil.UpgradeValue(data, "period", "token_period", &role.Period, &role.TokenPeriod); err != nil { + return logical.ErrorResponse(err.Error()), nil } } - // Check that the TokenTTL value provided is less than the TokenMaxTTL. - // Sanitizing the TTL and MaxTTL is not required now and can be performed - // at credential issue time. - if role.TokenMaxTTL > time.Duration(0) && role.TokenTTL > role.TokenMaxTTL { - return logical.ErrorResponse("token_ttl should not be greater than token_max_ttl"), nil + if role.Period > b.System().MaxLeaseTTL() { + return logical.ErrorResponse(fmt.Sprintf("period of %q is greater than the backend's maximum lease TTL of %q", role.Period.String(), b.System().MaxLeaseTTL().String())), nil } - var resp *logical.Response if role.TokenMaxTTL > b.System().MaxLeaseTTL() { resp = &logical.Response{} resp.AddWarning("token_max_ttl is greater than the backend mount's maximum TTL value; issued tokens' max TTL value will be truncated") @@ -1079,27 +1013,26 @@ func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data * } respData := map[string]interface{}{ - "bind_secret_id": role.BindSecretID, - // TODO - remove this deprecated field in future versions, - // and its associated warning below. - "bound_cidr_list": role.SecretIDBoundCIDRs, + "bind_secret_id": role.BindSecretID, "secret_id_bound_cidrs": role.SecretIDBoundCIDRs, - "token_bound_cidrs": role.TokenBoundCIDRs, - "period": role.Period / time.Second, - "policies": role.Policies, "secret_id_num_uses": role.SecretIDNumUses, "secret_id_ttl": role.SecretIDTTL / time.Second, - "token_max_ttl": role.TokenMaxTTL / time.Second, - "token_num_uses": role.TokenNumUses, - "token_ttl": role.TokenTTL / time.Second, "local_secret_ids": false, - "token_type": role.TokenType, } + role.PopulateTokenData(respData) if role.SecretIDPrefix == secretIDLocalPrefix { respData["local_secret_ids"] = true } + // Backwards compat data + if role.Period != 0 { + respData["period"] = role.Period / time.Second + } + if len(role.Policies) > 0 { + respData["policies"] = role.Policies + } + resp := &logical.Response{ Data: respData, } @@ -1107,7 +1040,6 @@ func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data * if err := validateRoleConstraints(role); err != nil { resp.AddWarning("Role does not have any constraints set on it. Updates to this role will require a constraint to be set") } - resp.AddWarning(`The "bound_cidr_list" parameter is deprecated and will be removed in favor of "secret_id_bound_cidrs".`) // For sanity, verify that the index still exists. If the index is missing, // add one and return a warning so it can be reported. @@ -1256,7 +1188,7 @@ func (b *backend) pathRoleSecretIDLookupUpdate(ctx context.Context, req *logical } func (entry *secretIDStorageEntry) ToResponseData() map[string]interface{} { - return map[string]interface{}{ + ret := map[string]interface{}{ "secret_id_accessor": entry.SecretIDAccessor, "secret_id_num_uses": entry.SecretIDNumUses, "secret_id_ttl": entry.SecretIDTTL / time.Second, @@ -1267,6 +1199,10 @@ func (entry *secretIDStorageEntry) ToResponseData() map[string]interface{} { "cidr_list": entry.CIDRList, "token_bound_cidrs": entry.TokenBoundCIDRs, } + if len(entry.TokenBoundCIDRs) == 0 { + ret["token_bound_cidrs"] = []string{} + } + return ret } func (b *backend) pathRoleSecretIDDestroyUpdateDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { @@ -1444,6 +1380,24 @@ func (b *backend) pathRoleSecretIDAccessorDestroyUpdateDelete(ctx context.Contex } func (b *backend) pathRoleBoundCIDRUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + delete(data.Raw, "token_bound_cidrs") + delete(data.Raw, "secret_id_bound_cidrs") + return b.pathRoleBoundCIDRUpdateCommon(ctx, req, data) +} + +func (b *backend) pathRoleSecretIDBoundCIDRUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + delete(data.Raw, "bound_cidr_list") + delete(data.Raw, "token_bound_cidrs") + return b.pathRoleBoundCIDRUpdateCommon(ctx, req, data) +} + +func (b *backend) pathRoleTokenBoundCIDRUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + delete(data.Raw, "bound_cidr_list") + delete(data.Raw, "secret_id_bound_cidrs") + return b.pathRoleBoundCIDRUpdateCommon(ctx, req, data) +} + +func (b *backend) pathRoleBoundCIDRUpdateCommon(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { roleName := data.Get("role_name").(string) if roleName == "" { return logical.ErrorResponse("missing role_name"), nil @@ -1462,24 +1416,27 @@ func (b *backend) pathRoleBoundCIDRUpdate(ctx context.Context, req *logical.Requ return nil, logical.ErrUnsupportedPath } - var cidrs []string if cidrsIfc, ok := data.GetFirst("secret_id_bound_cidrs", "bound_cidr_list"); ok { - cidrs = cidrsIfc.([]string) + cidrs := cidrsIfc.([]string) + if len(cidrs) == 0 { + return logical.ErrorResponse("missing bound_cidr_list"), nil + } + valid, err := cidrutil.ValidateCIDRListSlice(cidrs) + if err != nil { + return logical.ErrorResponse(errwrap.Wrapf("failed to validate CIDR blocks: {{err}}", err).Error()), nil + } + if !valid { + return logical.ErrorResponse("failed to validate CIDR blocks"), nil + } role.SecretIDBoundCIDRs = cidrs + } else if cidrsIfc, ok := data.GetOk("token_bound_cidrs"); ok { - cidrs = cidrsIfc.([]string) + cidrs, err := parseutil.ParseAddrs(cidrsIfc.([]string)) + if err != nil { + return logical.ErrorResponse(errwrap.Wrapf("failed to parse token_bound_cidrs: {{err}}", err).Error()), nil + } role.TokenBoundCIDRs = cidrs } - if len(cidrs) == 0 { - return logical.ErrorResponse("missing bound_cidr_list"), nil - } - valid, err := cidrutil.ValidateCIDRListSlice(cidrs) - if err != nil { - return nil, errwrap.Wrapf("failed to validate CIDR blocks: {{err}}", err) - } - if !valid { - return logical.ErrorResponse("failed to validate CIDR blocks"), nil - } return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") } @@ -1559,15 +1516,15 @@ func (b *backend) pathRoleBoundCIDRDelete(ctx context.Context, req *logical.Requ return nil, nil } - // Deleting a field implies setting the value to it's default value. switch fieldName { case "bound_cidr_list": - role.BoundCIDRList = data.GetDefaultOrZero("bound_cidr_list").([]string) + role.BoundCIDRList = nil case "secret_id_bound_cidrs": - role.SecretIDBoundCIDRs = data.GetDefaultOrZero("secret_id_bound_cidrs").([]string) + role.SecretIDBoundCIDRs = nil case "token_bound_cidrs": - role.TokenBoundCIDRs = data.GetDefaultOrZero("token_bound_cidrs").([]string) + role.TokenBoundCIDRs = nil } + return nil, b.setRoleEntry(ctx, req.Storage, roleName, role, "") } @@ -1706,13 +1663,25 @@ func (b *backend) pathRolePoliciesUpdate(ctx context.Context, req *logical.Reque return nil, logical.ErrUnsupportedPath } - policiesRaw, ok := data.GetOk("policies") + policiesRaw, ok := data.GetOk("token_policies") if !ok { - return logical.ErrorResponse("missing policies"), nil + policiesRaw, ok = data.GetOk("policies") + if ok { + role.Policies = policyutil.ParsePolicies(policiesRaw) + role.TokenPolicies = role.Policies + } else { + return logical.ErrorResponse("missing token_policies"), nil + } + } else { + role.TokenPolicies = policyutil.ParsePolicies(policiesRaw) + _, ok = data.GetOk("policies") + if ok { + role.Policies = role.TokenPolicies + } else { + role.Policies = nil + } } - role.Policies = policyutil.ParsePolicies(policiesRaw) - return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") } @@ -1734,10 +1703,20 @@ func (b *backend) pathRolePoliciesRead(ctx context.Context, req *logical.Request return nil, nil } + p := role.TokenPolicies + if p == nil { + p = []string{} + } + d := map[string]interface{}{ + "token_policies": p, + } + + if len(role.Policies) > 0 { + d["policies"] = role.Policies + } + return &logical.Response{ - Data: map[string]interface{}{ - "policies": role.Policies, - }, + Data: d, }, nil } @@ -1759,7 +1738,8 @@ func (b *backend) pathRolePoliciesDelete(ctx context.Context, req *logical.Reque return nil, nil } - role.Policies = []string{} + role.TokenPolicies = nil + role.Policies = nil return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") } @@ -1985,15 +1965,29 @@ func (b *backend) pathRolePeriodUpdate(ctx context.Context, req *logical.Request return nil, logical.ErrUnsupportedPath } - if periodRaw, ok := data.GetOk("period"); ok { - role.Period = time.Second * time.Duration(periodRaw.(int)) - if role.Period > b.System().MaxLeaseTTL() { - return logical.ErrorResponse(fmt.Sprintf("period of %q is greater than the backend's maximum lease TTL of %q", role.Period.String(), b.System().MaxLeaseTTL().String())), nil + periodRaw, ok := data.GetOk("token_period") + if !ok { + periodRaw, ok = data.GetOk("period") + if ok { + role.Period = time.Second * time.Duration(periodRaw.(int)) + role.TokenPeriod = role.Period + } else { + return logical.ErrorResponse("missing period"), nil } - return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") } else { - return logical.ErrorResponse("missing period"), nil + role.TokenPeriod = time.Second * time.Duration(periodRaw.(int)) + _, ok = data.GetOk("period") + if ok { + role.Period = role.TokenPeriod + } else { + role.Period = 0 + } } + + if role.TokenPeriod > b.System().MaxLeaseTTL() { + return logical.ErrorResponse(fmt.Sprintf("period of %q is greater than the backend's maximum lease TTL of %q", role.Period.String(), b.System().MaxLeaseTTL().String())), nil + } + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") } func (b *backend) pathRolePeriodRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { @@ -2014,10 +2008,16 @@ func (b *backend) pathRolePeriodRead(ctx context.Context, req *logical.Request, return nil, nil } + d := map[string]interface{}{ + "token_period": role.TokenPeriod / time.Second, + } + + if role.Period > 0 { + d["period"] = role.Period / time.Second + } + return &logical.Response{ - Data: map[string]interface{}{ - "period": role.Period / time.Second, - }, + Data: d, }, nil } @@ -2039,7 +2039,8 @@ func (b *backend) pathRolePeriodDelete(ctx context.Context, req *logical.Request return nil, nil } - role.Period = time.Second * time.Duration(data.GetDefaultOrZero("period").(int)) + role.TokenPeriod = 0 + role.Period = 0 return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") } @@ -2338,7 +2339,11 @@ func (b *backend) handleRoleSecretIDCommon(ctx context.Context, req *logical.Req } } // Ensure that the token CIDRs on the secret ID are a subset of that of role's - if err := verifyCIDRRoleSecretIDSubset(secretIDTokenCIDRs, role.TokenBoundCIDRs); err != nil { + var roleCIDRs []string + for _, v := range role.TokenBoundCIDRs { + roleCIDRs = append(roleCIDRs, v.String()) + } + if err := verifyCIDRRoleSecretIDSubset(secretIDTokenCIDRs, roleCIDRs); err != nil { return nil, err } diff --git a/builtin/credential/approle/path_role_test.go b/builtin/credential/approle/path_role_test.go index cb3000fe9f16..24ce69211e51 100644 --- a/builtin/credential/approle/path_role_test.go +++ b/builtin/credential/approle/path_role_test.go @@ -2,12 +2,16 @@ package approle import ( "context" + "encoding/json" "reflect" "strings" "testing" "time" + "github.com/go-test/deep" + "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) @@ -519,7 +523,7 @@ func TestAppRole_RoleReadSetIndex(t *testing.T) { } // Check if the warning is being returned - if !strings.Contains(resp.Warnings[1], "Role identifier was missing an index back to role name.") { + if !strings.Contains(resp.Warnings[0], "Role identifier was missing an index back to role name.") { t.Fatalf("bad: expected a warning in the response") } @@ -1157,7 +1161,6 @@ func TestAppRole_RoleCRUD(t *testing.T) { "token_max_ttl": 500, "token_num_uses": 600, "secret_id_bound_cidrs": []string{"127.0.0.1/32", "127.0.0.1/16"}, - "bound_cidr_list": []string{"127.0.0.1/32", "127.0.0.1/16"}, // returned for backwards compatibility "token_bound_cidrs": []string{}, "token_type": "default", } @@ -1175,8 +1178,8 @@ func TestAppRole_RoleCRUD(t *testing.T) { } expectedStruct.RoleID = actualStruct.RoleID - if !reflect.DeepEqual(expectedStruct, actualStruct) { - t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct) + if diff := deep.Equal(expectedStruct, actualStruct); diff != nil { + t.Fatal(diff) } roleData = map[string]interface{}{ @@ -1313,6 +1316,9 @@ func TestAppRole_RoleCRUD(t *testing.T) { if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1"}) { t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string)) } + if !reflect.DeepEqual(resp.Data["token_policies"].([]string), []string{"a1", "b1", "c1", "d1"}) { + t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string)) + } roleReq.Operation = logical.DeleteOperation resp, err = b.HandleRequest(context.Background(), roleReq) if err != nil || (resp != nil && resp.IsError()) { @@ -1325,10 +1331,10 @@ func TestAppRole_RoleCRUD(t *testing.T) { t.Fatalf("err:%v resp:%#v", err, resp) } - expectedPolicies := []string{"default"} - actualPolicies := resp.Data["policies"].([]string) + expectedPolicies := []string{} + actualPolicies := resp.Data["token_policies"].([]string) if !policyutil.EquivalentPolicies(expectedPolicies, actualPolicies) { - t.Fatalf("bad: policies: expected:%s actual:%s", expectedPolicies, actualPolicies) + t.Fatalf("bad: token_policies: expected:%s actual:%s", expectedPolicies, actualPolicies) } // RUD for secret-id-num-uses field @@ -1491,7 +1497,7 @@ func TestAppRole_RoleCRUD(t *testing.T) { t.Fatalf("err:%v resp:%#v", err, resp) } - if resp.Data["period"].(time.Duration) != 0 { + if resp.Data["token_period"].(time.Duration) != 0 { t.Fatalf("expected value to be reset") } @@ -1637,7 +1643,6 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { "token_num_uses": 600, "token_bound_cidrs": []string{"127.0.0.1/32", "127.0.0.1/16"}, "secret_id_bound_cidrs": []string{"127.0.0.1/32", "127.0.0.1/16"}, - "bound_cidr_list": []string{"127.0.0.1/32", "127.0.0.1/16"}, // provided for backwards compatibility "token_type": "default", } @@ -1753,9 +1758,13 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%v resp:%#v", err, resp) } - if resp.Data["token_bound_cidrs"].([]string)[0] != "127.0.0.1/32" || - resp.Data["token_bound_cidrs"].([]string)[1] != "127.0.0.1/16" { - t.Fatalf("bad: token_bound_cidrs: expected:127.0.0.1/32,127.0.0.1/16 actual:%d\n", resp.Data["token_bound_cidrs"].(int)) + if resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0].String() != "127.0.0.1" || + resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[1].String() != "127.0.0.1/16" { + m, err := json.Marshal(resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)) + if err != nil { + t.Fatal(err) + } + t.Fatalf("bad: token_bound_cidrs: expected:127.0.0.1/32,127.0.0.1/16 actual:%s\n", string(m)) } roleReq.Data = map[string]interface{}{"token_bound_cidrs": []string{"127.0.0.1/20"}} @@ -1771,8 +1780,8 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { t.Fatalf("err:%v resp:%#v", err, resp) } - if resp.Data["token_bound_cidrs"].([]string)[0] != "127.0.0.1/20" { - t.Fatalf("bad: token_bound_cidrs: expected:127.0.0.1/20 actual:%s\n", resp.Data["token_bound_cidrs"].([]string)[0]) + if resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0].String() != "127.0.0.1/20" { + t.Fatalf("bad: token_bound_cidrs: expected:127.0.0.1/20 actual:%s\n", resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0]) } roleReq.Operation = logical.DeleteOperation @@ -1787,7 +1796,7 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { t.Fatalf("err:%v resp:%#v", err, resp) } - if len(resp.Data["token_bound_cidrs"].([]string)) != 0 { + if len(resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)) != 0 { t.Fatalf("expected value to be reset") } @@ -1830,3 +1839,52 @@ func createRole(t *testing.T, b *backend, s logical.Storage, roleName, policies t.Fatalf("err:%v resp:%#v", err, resp) } } + +// TestAppRole_TokenutilUpgrade ensures that when we read values out that are +// values with upgrade logic we see the correct struct entries populated +func TestAppRole_TokenutilUpgrade(t *testing.T) { + s := &logical.InmemStorage{} + + config := logical.TestBackendConfig() + config.StorageView = s + + ctx := context.Background() + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + if b == nil { + t.Fatalf("failed to create backend") + } + if err := b.Setup(ctx, config); err != nil { + t.Fatal(err) + } + + // Hand craft JSON because there is overlap between fields + if err := s.Put(ctx, &logical.StorageEntry{ + Key: "role/foo", + Value: []byte(`{"policies": ["foo"], "period": 300000000000, "token_bound_cidrs": ["127.0.0.1", "10.10.10.10/24"]}`), + }); err != nil { + t.Fatal(err) + } + + fooEntry, err := b.roleEntry(ctx, s, "foo") + if err != nil { + t.Fatal(err) + } + + exp := &roleStorageEntry{ + SecretIDPrefix: "secret_id/", + Policies: []string{"foo"}, + Period: 300 * time.Second, + TokenParams: tokenutil.TokenParams{ + TokenPolicies: []string{"foo"}, + TokenPeriod: 300 * time.Second, + TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}, &sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("10.10.10.10/24")}}, + }, + } + if diff := deep.Equal(fooEntry, exp); diff != nil { + t.Fatal(diff) + } +} diff --git a/builtin/credential/aws/backend.go b/builtin/credential/aws/backend.go index b9c124da81ae..0209e6773c05 100644 --- a/builtin/credential/aws/backend.go +++ b/builtin/credential/aws/backend.go @@ -3,6 +3,7 @@ package awsauth import ( "context" "fmt" + "strings" "sync" "time" @@ -76,6 +77,9 @@ type backend struct { // accounts using their IAM instance profile to get their credentials. defaultAWSAccountID string + // roleCache caches role entries to avoid locking headaches + roleCache *cache.Cache + resolveArnToUniqueIDFunc func(context.Context, logical.Storage, string) (string, error) } @@ -89,6 +93,7 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { iamUserIdToArnCache: cache.New(7*24*time.Hour, 24*time.Hour), tidyBlacklistCASGuard: new(uint32), tidyWhitelistCASGuard: new(uint32), + roleCache: cache.New(cache.NoExpiration, cache.NoExpiration), } b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId @@ -201,13 +206,16 @@ func (b *backend) periodicFunc(ctx context.Context, req *logical.Request) error } func (b *backend) invalidate(ctx context.Context, key string) { - switch key { - case "config/client": + switch { + case key == "config/client": b.configMutex.Lock() defer b.configMutex.Unlock() b.flushCachedEC2Clients() b.flushCachedIAMClients() b.defaultAWSAccountID = "" + case strings.HasPrefix(key, "role"): + // TODO: We could make this better + b.roleCache.Flush() } } diff --git a/builtin/credential/aws/backend_test.go b/builtin/credential/aws/backend_test.go index 030bed7578b6..b8b947350b31 100644 --- a/builtin/credential/aws/backend_test.go +++ b/builtin/credential/aws/backend_test.go @@ -60,7 +60,7 @@ func TestBackend_CreateParseVerifyRoleTag(t *testing.T) { } // read the created role entry - roleEntry, err := b.lockedAWSRole(context.Background(), storage, "abcd-123") + roleEntry, err := b.role(context.Background(), storage, "abcd-123") if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestBackend_CreateParseVerifyRoleTag(t *testing.T) { } // get the entry of the newly created role entry - roleEntry2, err := b.lockedAWSRole(context.Background(), storage, "ami-6789") + roleEntry2, err := b.role(context.Background(), storage, "ami-6789") if err != nil { t.Fatal(err) } diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go index 2c6b4f69dee2..8a83685e8a2e 100644 --- a/builtin/credential/aws/path_login.go +++ b/builtin/credential/aws/path_login.go @@ -24,6 +24,7 @@ import ( uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/awsutil" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" @@ -597,7 +598,7 @@ func (b *backend) pathLoginUpdateEc2(ctx context.Context, req *logical.Request, } // Get the entry for the role used by the instance - roleEntry, err := b.lockedAWSRole(ctx, req.Storage, roleName) + roleEntry, err := b.role(ctx, req.Storage, roleName) if err != nil { return nil, err } @@ -605,6 +606,11 @@ func (b *backend) pathLoginUpdateEc2(ctx context.Context, req *logical.Request, return logical.ErrorResponse(fmt.Sprintf("entry for role %q not found", roleName)), nil } + // Check for a CIDR match. + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, roleEntry.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + if roleEntry.AuthType != ec2AuthType { return logical.ErrorResponse(fmt.Sprintf("auth method ec2 not allowed for role %s", roleName)), nil } @@ -729,14 +735,14 @@ func (b *backend) pathLoginUpdateEc2(ctx context.Context, req *logical.Request, // attacks. shortestMaxTTL := b.System().MaxLeaseTTL() longestMaxTTL := b.System().MaxLeaseTTL() - if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < shortestMaxTTL { - shortestMaxTTL = roleEntry.MaxTTL + if roleEntry.TokenMaxTTL > time.Duration(0) && roleEntry.TokenMaxTTL < shortestMaxTTL { + shortestMaxTTL = roleEntry.TokenMaxTTL } - if roleEntry.MaxTTL > longestMaxTTL { - longestMaxTTL = roleEntry.MaxTTL + if roleEntry.TokenMaxTTL > longestMaxTTL { + longestMaxTTL = roleEntry.TokenMaxTTL } - policies := roleEntry.Policies + policies := roleEntry.TokenPolicies rTagMaxTTL := time.Duration(0) var roleTagResp *roleTagLoginResponse if roleEntry.RoleTag != "" { @@ -812,28 +818,26 @@ func (b *backend) pathLoginUpdateEc2(ctx context.Context, req *logical.Request, return nil, err } - resp := &logical.Response{ - Auth: &logical.Auth{ - Period: roleEntry.Period, - Policies: policies, - Metadata: map[string]string{ - "instance_id": identityDocParsed.InstanceID, - "region": identityDocParsed.Region, - "account_id": identityDocParsed.AccountID, - "role_tag_max_ttl": rTagMaxTTL.String(), - "role": roleName, - "ami_id": identityDocParsed.AmiID, - }, - LeaseOptions: logical.LeaseOptions{ - Renewable: true, - TTL: roleEntry.TTL, - MaxTTL: shortestMaxTTL, - }, - Alias: &logical.Alias{ - Name: identityAlias, - }, + auth := &logical.Auth{ + Metadata: map[string]string{ + "instance_id": identityDocParsed.InstanceID, + "region": identityDocParsed.Region, + "account_id": identityDocParsed.AccountID, + "role_tag_max_ttl": rTagMaxTTL.String(), + "role": roleName, + "ami_id": identityDocParsed.AmiID, }, + Alias: &logical.Alias{ + Name: identityAlias, + }, + } + roleEntry.PopulateTokenAuth(auth) + + resp := &logical.Response{ + Auth: auth, } + resp.Auth.Policies = policies + resp.Auth.LeaseOptions.MaxTTL = shortestMaxTTL // Return the nonce only if reauthentication is allowed and if the nonce // was not supplied by the user. @@ -909,7 +913,7 @@ func (b *backend) handleRoleTagLogin(ctx context.Context, s logical.Storage, rol } // Ensure that the policies on the RoleTag is a subset of policies on the role - if !strutil.StrListSubset(roleEntry.Policies, rTag.Policies) { + if !strutil.StrListSubset(roleEntry.TokenPolicies, rTag.Policies) { return nil, fmt.Errorf("policies on the role tag must be subset of policies on the role") } @@ -951,7 +955,7 @@ func (b *backend) pathLoginRenewIam(ctx context.Context, req *logical.Request, d if roleName == "" { return nil, fmt.Errorf("error retrieving role_name during renewal") } - roleEntry, err := b.lockedAWSRole(ctx, req.Storage, roleName) + roleEntry, err := b.role(ctx, req.Storage, roleName) if err != nil { return nil, err } @@ -1039,9 +1043,9 @@ func (b *backend) pathLoginRenewIam(ctx context.Context, req *logical.Request, d } resp := &logical.Response{Auth: req.Auth} - resp.Auth.TTL = roleEntry.TTL - resp.Auth.MaxTTL = roleEntry.MaxTTL - resp.Auth.Period = roleEntry.Period + resp.Auth.TTL = roleEntry.TokenTTL + resp.Auth.MaxTTL = roleEntry.TokenMaxTTL + resp.Auth.Period = roleEntry.TokenPeriod return resp, nil } @@ -1079,7 +1083,7 @@ func (b *backend) pathLoginRenewEc2(ctx context.Context, req *logical.Request, d } // Ensure that role entry is not deleted - roleEntry, err := b.lockedAWSRole(ctx, req.Storage, storedIdentity.Role) + roleEntry, err := b.role(ctx, req.Storage, storedIdentity.Role) if err != nil { return nil, err } @@ -1098,11 +1102,11 @@ func (b *backend) pathLoginRenewEc2(ctx context.Context, req *logical.Request, d // Re-evaluate the maxTTL bounds shortestMaxTTL := b.System().MaxLeaseTTL() longestMaxTTL := b.System().MaxLeaseTTL() - if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < shortestMaxTTL { - shortestMaxTTL = roleEntry.MaxTTL + if roleEntry.TokenMaxTTL > time.Duration(0) && roleEntry.TokenMaxTTL < shortestMaxTTL { + shortestMaxTTL = roleEntry.TokenMaxTTL } - if roleEntry.MaxTTL > longestMaxTTL { - longestMaxTTL = roleEntry.MaxTTL + if roleEntry.TokenMaxTTL > longestMaxTTL { + longestMaxTTL = roleEntry.TokenMaxTTL } if rTagMaxTTL > time.Duration(0) && rTagMaxTTL < shortestMaxTTL { shortestMaxTTL = rTagMaxTTL @@ -1123,9 +1127,9 @@ func (b *backend) pathLoginRenewEc2(ctx context.Context, req *logical.Request, d } resp := &logical.Response{Auth: req.Auth} - resp.Auth.TTL = roleEntry.TTL + resp.Auth.TTL = roleEntry.TokenTTL resp.Auth.MaxTTL = shortestMaxTTL - resp.Auth.Period = roleEntry.Period + resp.Auth.Period = roleEntry.TokenPeriod return resp, nil } @@ -1205,7 +1209,7 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, roleName = entity.FriendlyName } - roleEntry, err := b.lockedAWSRole(ctx, req.Storage, roleName) + roleEntry, err := b.role(ctx, req.Storage, roleName) if err != nil { return nil, err } @@ -1213,6 +1217,11 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, return logical.ErrorResponse(fmt.Sprintf("entry for role %s not found", roleName)), nil } + // Check for a CIDR match. + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, roleEntry.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + if roleEntry.AuthType != iamAuthType { return logical.ErrorResponse(fmt.Sprintf("auth method iam not allowed for role %s", roleName)), nil } @@ -1285,8 +1294,6 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, } } - policies := roleEntry.Policies - inferredEntityType := "" inferredEntityID := "" if roleEntry.InferredEntityType == ec2EntityType { @@ -1317,38 +1324,32 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, inferredEntityID = entity.SessionInfo } - resp := &logical.Response{ - Auth: &logical.Auth{ - Period: roleEntry.Period, - Policies: policies, - Metadata: map[string]string{ - "client_arn": callerID.Arn, - "canonical_arn": entity.canonicalArn(), - "client_user_id": callerUniqueId, - "auth_type": iamAuthType, - "inferred_entity_type": inferredEntityType, - "inferred_entity_id": inferredEntityID, - "inferred_aws_region": roleEntry.InferredAWSRegion, - "account_id": entity.AccountNumber, - "role_id": roleEntry.RoleID, - }, - InternalData: map[string]interface{}{ - "role_name": roleName, - "role_id": roleEntry.RoleID, - }, - DisplayName: entity.FriendlyName, - LeaseOptions: logical.LeaseOptions{ - Renewable: true, - TTL: roleEntry.TTL, - MaxTTL: roleEntry.MaxTTL, - }, - Alias: &logical.Alias{ - Name: identityAlias, - }, + auth := &logical.Auth{ + Metadata: map[string]string{ + "client_arn": callerID.Arn, + "canonical_arn": entity.canonicalArn(), + "client_user_id": callerUniqueId, + "auth_type": iamAuthType, + "inferred_entity_type": inferredEntityType, + "inferred_entity_id": inferredEntityID, + "inferred_aws_region": roleEntry.InferredAWSRegion, + "account_id": entity.AccountNumber, + "role_id": roleEntry.RoleID, + }, + InternalData: map[string]interface{}{ + "role_name": roleName, + "role_id": roleEntry.RoleID, + }, + DisplayName: entity.FriendlyName, + Alias: &logical.Alias{ + Name: identityAlias, }, } + roleEntry.PopulateTokenAuth(auth) - return resp, nil + return &logical.Response{ + Auth: auth, + }, nil } // These two methods (hasValuesFor*) return two bools @@ -1404,6 +1405,10 @@ func parseIamArn(iamArn string) (*iamEntity, error) { // now, entity.FriendlyName should either be or switch entity.Type { case "assumed-role": + // Check for three parts for assumed role ARNs + if len(parts) < 3 { + return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 3 slash-separated parts", fullParts[5]) + } // Assumed roles don't have paths and have a slightly different format // parts[2] is entity.Path = "" diff --git a/builtin/credential/aws/path_login_test.go b/builtin/credential/aws/path_login_test.go index d0c69cfce041..effca6efdbc4 100644 --- a/builtin/credential/aws/path_login_test.go +++ b/builtin/credential/aws/path_login_test.go @@ -114,6 +114,10 @@ func TestBackend_pathLogin_parseIamArn(t *testing.T) { if err == nil { t.Error("expected error from empty principal type and no principal name (arn:aws:iam::1234556789012:/)") } + _, err = parseIamArn("arn:aws:sts::1234556789012:assumed-role/role") + if err == nil { + t.Error("expected error from malformed assumed role ARN") + } } func TestBackend_validateVaultHeaderValue(t *testing.T) { @@ -215,7 +219,7 @@ func TestBackend_pathLogin_IAMHeaders(t *testing.T) { AuthType: iamAuthType, } - if err := b.nonLockedSetAWSRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil { + if err := b.setRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil { t.Fatalf("failed to set entry: %s", err) } @@ -307,10 +311,11 @@ func TestBackend_pathLogin_IAMHeaders(t *testing.T) { } loginRequest := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "login", - Storage: storage, - Data: loginData, + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{}, } resp, err := b.HandleRequest(context.Background(), loginRequest) diff --git a/builtin/credential/aws/path_role.go b/builtin/credential/aws/path_role.go index d1b5545b6548..dd1a75b383f4 100644 --- a/builtin/credential/aws/path_role.go +++ b/builtin/credential/aws/path_role.go @@ -2,6 +2,7 @@ package awsauth import ( "context" + "errors" "fmt" "strings" "time" @@ -10,8 +11,9 @@ import ( uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" ) var ( @@ -19,7 +21,7 @@ var ( ) func pathRole(b *backend) *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role"), Fields: map[string]*framework.FieldSchema{ "role": { @@ -131,28 +133,24 @@ Defaults to an empty string, meaning that role tags are disabled. This is only allowed if auth_type is ec2.`, }, "period": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Default: 0, - Description: ` -If set, indicates that the token generated using this role should never expire. -The token should be renewed within the duration specified by this value. At -each renewal, the token's TTL will be set to the value of this parameter.`, + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, }, "ttl": { - Type: framework.TypeDurationSecond, - Default: 0, - Description: `Duration in seconds after which the issued token should expire. Defaults -to 0, in which case the value will fallback to the system/mount defaults.`, + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, }, "max_ttl": { Type: framework.TypeDurationSecond, - Default: 0, - Description: "The maximum allowed lifetime of tokens issued using this role.", + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, }, "policies": { Type: framework.TypeCommaStringSlice, - Default: "default", - Description: "Policies to be set on tokens issued using this role.", + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, }, "allow_instance_migration": { Type: framework.TypeBool, @@ -187,6 +185,9 @@ auth_type is ec2.`, HelpSynopsis: pathRoleSyn, HelpDescription: pathRoleDesc, } + + tokenutil.AddTokenFields(p.Fields) + return p } func pathListRole(b *backend) *framework.Path { @@ -218,82 +219,83 @@ func pathListRoles(b *backend) *framework.Path { // Establishes dichotomy of request operation between CreateOperation and UpdateOperation. // Returning 'true' forces an UpdateOperation, CreateOperation otherwise. func (b *backend) pathRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { - entry, err := b.lockedAWSRole(ctx, req.Storage, strings.ToLower(data.Get("role").(string))) + entry, err := b.role(ctx, req.Storage, strings.ToLower(data.Get("role").(string))) if err != nil { return false, err } return entry != nil, nil } -// lockedAWSRole returns the properties set on the given role. This method -// acquires the read lock before reading the role from the storage. -func (b *backend) lockedAWSRole(ctx context.Context, s logical.Storage, roleName string) (*awsRoleEntry, error) { +// role fetches the role entry from cache, or loads from disk if necessary +func (b *backend) role(ctx context.Context, s logical.Storage, roleName string) (*awsRoleEntry, error) { if roleName == "" { return nil, fmt.Errorf("missing role name") } - b.roleMutex.RLock() - roleEntry, err := b.nonLockedAWSRole(ctx, s, roleName) - // we manually unlock rather than defer the unlock because we might need to grab - // a read/write lock in the upgrade path - b.roleMutex.RUnlock() + roleEntryRaw, found := b.roleCache.Get(roleName) + if found && roleEntryRaw != nil { + roleEntry, ok := roleEntryRaw.(*awsRoleEntry) + if !ok { + return nil, errors.New("could not convert role entry internally") + } + if roleEntry == nil { + return nil, errors.New("converted role entry is nil") + } + } + + // Not found, or was nil + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + return b.roleInternal(ctx, s, roleName) +} + +// roleInternal does not perform locking, and rechecks the cache, going to disk if necessar +func (b *backend) roleInternal(ctx context.Context, s logical.Storage, roleName string) (*awsRoleEntry, error) { + // Check cache again now that we have the lock + roleEntryRaw, found := b.roleCache.Get(roleName) + if found && roleEntryRaw != nil { + roleEntry, ok := roleEntryRaw.(*awsRoleEntry) + if !ok { + return nil, errors.New("could not convert role entry internally") + } + if roleEntry == nil { + return nil, errors.New("converted role entry is nil") + } + } + + // Fetch from storage + entry, err := s.Get(ctx, "role/"+strings.ToLower(roleName)) if err != nil { return nil, err } - if roleEntry == nil { + if entry == nil { return nil, nil } - needUpgrade, err := b.upgradeRoleEntry(ctx, s, roleEntry) + + result := new(awsRoleEntry) + if err := entry.DecodeJSON(result); err != nil { + return nil, err + } + + needUpgrade, err := b.upgradeRole(ctx, s, result) if err != nil { return nil, errwrap.Wrapf("error upgrading roleEntry: {{err}}", err) } if needUpgrade && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby)) { - b.roleMutex.Lock() - defer b.roleMutex.Unlock() - // Now that we have a R/W lock, we need to re-read the role entry in case it was - // written to between releasing the read lock and acquiring the write lock - roleEntry, err = b.nonLockedAWSRole(ctx, s, roleName) - if err != nil { - return nil, err - } - // somebody deleted the role, so no use in putting it back - if roleEntry == nil { - return nil, nil - } - // now re-check to see if we need to upgrade - if needUpgrade, err = b.upgradeRoleEntry(ctx, s, roleEntry); err != nil { - return nil, errwrap.Wrapf("error upgrading roleEntry: {{err}}", err) - } - if needUpgrade { - if err = b.nonLockedSetAWSRole(ctx, s, roleName, roleEntry); err != nil { - return nil, errwrap.Wrapf("error saving upgraded roleEntry: {{err}}", err) - } + if err = b.setRole(ctx, s, roleName, result); err != nil { + return nil, errwrap.Wrapf("error saving upgraded roleEntry: {{err}}", err) } } - return roleEntry, nil -} - -// lockedSetAWSRole creates or updates a role in the storage. This method -// acquires the write lock before creating or updating the role at the storage. -func (b *backend) lockedSetAWSRole(ctx context.Context, s logical.Storage, roleName string, roleEntry *awsRoleEntry) error { - if roleName == "" { - return fmt.Errorf("missing role name") - } - - if roleEntry == nil { - return fmt.Errorf("nil role entry") - } - b.roleMutex.Lock() - defer b.roleMutex.Unlock() + b.roleCache.SetDefault(roleName, result) - return b.nonLockedSetAWSRole(ctx, s, roleName, roleEntry) + return result, nil } -// nonLockedSetAWSRole creates or updates a role in the storage. This method -// does not acquire the write lock before reading the role from the storage. If -// locking is desired, use lockedSetAWSRole instead. -func (b *backend) nonLockedSetAWSRole(ctx context.Context, s logical.Storage, roleName string, +// setRole creates or updates a role in the storage. The caller must hold +// the write lock. +func (b *backend) setRole(ctx context.Context, s logical.Storage, roleName string, roleEntry *awsRoleEntry) error { if roleName == "" { return fmt.Errorf("missing role name") @@ -312,12 +314,14 @@ func (b *backend) nonLockedSetAWSRole(ctx context.Context, s logical.Storage, ro return err } + b.roleCache.SetDefault(roleName, roleEntry) + return nil } // If needed, updates the role entry and returns a bool indicating if it was updated // (and thus needs to be persisted) -func (b *backend) upgradeRoleEntry(ctx context.Context, s logical.Storage, roleEntry *awsRoleEntry) (bool, error) { +func (b *backend) upgradeRole(ctx context.Context, s logical.Storage, roleEntry *awsRoleEntry) (bool, error) { if roleEntry == nil { return false, fmt.Errorf("received nil roleEntry") } @@ -418,34 +422,22 @@ func (b *backend) upgradeRoleEntry(ctx context.Context, s logical.Storage, roleE return false, fmt.Errorf("unrecognized role version: %q", roleEntry.Version) } - return upgraded, nil -} - -// nonLockedAWSRole returns the properties set on the given role. This method -// does not acquire the read lock before reading the role from the storage. If -// locking is desired, use lockedAWSRole instead. -// This method also does NOT check to see if a role upgrade is required. It is -// the responsibility of the caller to check if a role upgrade is required and, -// if so, to upgrade the role -func (b *backend) nonLockedAWSRole(ctx context.Context, s logical.Storage, roleName string) (*awsRoleEntry, error) { - if roleName == "" { - return nil, fmt.Errorf("missing role name") + // Add tokenutil upgrades. These don't need to be persisted, they're fine + // being upgraded each time until changed. + if roleEntry.TokenTTL == 0 && roleEntry.TTL > 0 { + roleEntry.TokenTTL = roleEntry.TTL } - - entry, err := s.Get(ctx, "role/"+strings.ToLower(roleName)) - if err != nil { - return nil, err + if roleEntry.TokenMaxTTL == 0 && roleEntry.MaxTTL > 0 { + roleEntry.TokenMaxTTL = roleEntry.MaxTTL } - if entry == nil { - return nil, nil + if roleEntry.TokenPeriod == 0 && roleEntry.Period > 0 { + roleEntry.TokenPeriod = roleEntry.Period } - - var result awsRoleEntry - if err := entry.DecodeJSON(&result); err != nil { - return nil, err + if len(roleEntry.TokenPolicies) == 0 && len(roleEntry.Policies) > 0 { + roleEntry.TokenPolicies = roleEntry.Policies } - return &result, nil + return upgraded, nil } // pathRoleDelete is used to delete the information registered for a given AMI ID. @@ -458,24 +450,29 @@ func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data b.roleMutex.Lock() defer b.roleMutex.Unlock() - return nil, req.Storage.Delete(ctx, "role/"+strings.ToLower(roleName)) + err := req.Storage.Delete(ctx, "role/"+strings.ToLower(roleName)) + if err != nil { + return nil, errwrap.Wrapf("error deleting role: {{err}}", err) + } + + b.roleCache.Delete(roleName) + + return nil, nil } // pathRoleList is used to list all the AMI IDs registered with Vault. func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - b.roleMutex.RLock() - defer b.roleMutex.RUnlock() - roles, err := req.Storage.List(ctx, "role/") if err != nil { return nil, err } + return logical.ListResponse(roles), nil } // pathRoleRead is used to view the information registered for a given AMI ID. func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - roleEntry, err := b.lockedAWSRole(ctx, req.Storage, strings.ToLower(data.Get("role").(string))) + roleEntry, err := b.role(ctx, req.Storage, strings.ToLower(data.Get("role").(string))) if err != nil { return nil, err } @@ -498,7 +495,10 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request b.roleMutex.Lock() defer b.roleMutex.Unlock() - roleEntry, err := b.nonLockedAWSRole(ctx, req.Storage, roleName) + // We use the internal one here to ensure that we have fresh data and + // nobody else is concurrently modifying. This will also call the upgrade + // path on existing role entries. + roleEntry, err := b.roleInternal(ctx, req.Storage, roleName) if err != nil { return nil, err } @@ -512,16 +512,14 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request Version: currentRoleStorageVersion, } } else { - needUpdate, err := b.upgradeRoleEntry(ctx, req.Storage, roleEntry) + // We want to always use a copy so we aren't modifying items in the + // version in the cache while other users may be looking it up (or if + // we fail somewhere) + cp, err := copystructure.Copy(roleEntry) if err != nil { - return logical.ErrorResponse(fmt.Sprintf("failed to update roleEntry: %v", err)), nil - } - if needUpdate { - err = b.nonLockedSetAWSRole(ctx, req.Storage, roleName, roleEntry) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("failed to save upgraded roleEntry: %v", err)), nil - } + return nil, err } + roleEntry = cp.(*awsRoleEntry) } // Fetch and set the bound parameters. There can't be default values @@ -709,13 +707,6 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request return logical.ErrorResponse("at least one bound parameter should be specified on the role"), nil } - policiesRaw, ok := data.GetOk("policies") - if ok { - roleEntry.Policies = policyutil.ParsePolicies(policiesRaw) - } else if req.Operation == logical.CreateOperation { - roleEntry.Policies = []string{} - } - disallowReauthenticationBool, ok := data.GetOk("disallow_reauthentication") if ok { if roleEntry.AuthType != ec2AuthType { @@ -742,48 +733,54 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request var resp logical.Response - ttlRaw, ok := data.GetOk("ttl") - if ok { - ttl := time.Duration(ttlRaw.(int)) * time.Second - defaultLeaseTTL := b.System().DefaultLeaseTTL() - if ttl > defaultLeaseTTL { - resp.AddWarning(fmt.Sprintf("Given ttl of %d seconds greater than current mount/system default of %d seconds; ttl will be capped at login time", ttl/time.Second, defaultLeaseTTL/time.Second)) - } - roleEntry.TTL = ttl - } else if req.Operation == logical.CreateOperation { - roleEntry.TTL = time.Duration(data.Get("ttl").(int)) * time.Second + if err := roleEntry.ParseTokenFields(req, data); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - maxTTLInt, ok := data.GetOk("max_ttl") - if ok { - maxTTL := time.Duration(maxTTLInt.(int)) * time.Second - systemMaxTTL := b.System().MaxLeaseTTL() - if maxTTL > systemMaxTTL { - resp.AddWarning(fmt.Sprintf("Given max_ttl of %d seconds greater than current mount/system default of %d seconds; max_ttl will be capped at login time", maxTTL/time.Second, systemMaxTTL/time.Second)) + // Handle upgrade cases + { + if err := tokenutil.UpgradeValue(data, "policies", "token_policies", &roleEntry.Policies, &roleEntry.TokenPolicies); err != nil { + return logical.ErrorResponse(err.Error()), nil } - if maxTTL < time.Duration(0) { - return logical.ErrorResponse("max_ttl cannot be negative"), nil + if err := tokenutil.UpgradeValue(data, "ttl", "token_ttl", &roleEntry.TTL, &roleEntry.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + // Special case here for old lease value + _, ok := data.GetOk("token_ttl") + if !ok { + _, ok = data.GetOk("ttl") + if !ok { + ttlRaw, ok := data.GetOk("lease") + if ok { + roleEntry.TTL = time.Duration(ttlRaw.(int)) * time.Second + roleEntry.TokenTTL = roleEntry.TTL + } + } } - roleEntry.MaxTTL = maxTTL - } else if req.Operation == logical.CreateOperation { - roleEntry.MaxTTL = time.Duration(data.Get("max_ttl").(int)) * time.Second - } + if err := tokenutil.UpgradeValue(data, "max_ttl", "token_max_ttl", &roleEntry.MaxTTL, &roleEntry.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } - if roleEntry.MaxTTL != 0 && roleEntry.MaxTTL < roleEntry.TTL { - return logical.ErrorResponse("ttl should be shorter than max_ttl"), nil + if err := tokenutil.UpgradeValue(data, "period", "token_period", &roleEntry.Period, &roleEntry.TokenPeriod); err != nil { + return logical.ErrorResponse(err.Error()), nil + } } - periodRaw, ok := data.GetOk("period") - if ok { - roleEntry.Period = time.Second * time.Duration(periodRaw.(int)) - } else if req.Operation == logical.CreateOperation { - roleEntry.Period = time.Second * time.Duration(data.Get("period").(int)) + defaultLeaseTTL := b.System().DefaultLeaseTTL() + systemMaxTTL := b.System().MaxLeaseTTL() + if roleEntry.TokenTTL > defaultLeaseTTL { + resp.AddWarning(fmt.Sprintf("Given ttl of %d seconds greater than current mount/system default of %d seconds; ttl will be capped at login time", roleEntry.TokenTTL/time.Second, defaultLeaseTTL/time.Second)) } - - if roleEntry.Period > b.System().MaxLeaseTTL() { - return logical.ErrorResponse(fmt.Sprintf("'period' of '%s' is greater than the backend's maximum lease TTL of '%s'", roleEntry.Period.String(), b.System().MaxLeaseTTL().String())), nil + if roleEntry.TokenMaxTTL > systemMaxTTL { + resp.AddWarning(fmt.Sprintf("Given max ttl of %d seconds greater than current mount/system default of %d seconds; max ttl will be capped at login time", roleEntry.TokenMaxTTL/time.Second, systemMaxTTL/time.Second)) + } + if roleEntry.TokenMaxTTL != 0 && roleEntry.TokenMaxTTL < roleEntry.TokenTTL { + return logical.ErrorResponse("ttl should be shorter than max ttl"), nil + } + if roleEntry.TokenPeriod > b.System().MaxLeaseTTL() { + return logical.ErrorResponse(fmt.Sprintf("period of '%s' is greater than the backend's maximum lease TTL of '%s'", roleEntry.TokenPeriod.String(), b.System().MaxLeaseTTL().String())), nil } roleTagStr, ok := data.GetOk("role_tag") @@ -808,7 +805,7 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request } } - if err := b.nonLockedSetAWSRole(ctx, req.Storage, roleName, roleEntry); err != nil { + if err := b.setRole(ctx, req.Storage, roleName, roleEntry); err != nil { return nil, err } @@ -821,30 +818,35 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request // Struct to hold the information associated with a Vault role type awsRoleEntry struct { - RoleID string `json:"role_id"` - AuthType string `json:"auth_type"` - BoundAmiIDs []string `json:"bound_ami_id_list"` - BoundAccountIDs []string `json:"bound_account_id_list"` - BoundEc2InstanceIDs []string `json:"bound_ec2_instance_id_list"` - BoundIamPrincipalARNs []string `json:"bound_iam_principal_arn_list"` - BoundIamPrincipalIDs []string `json:"bound_iam_principal_id_list"` - BoundIamRoleARNs []string `json:"bound_iam_role_arn_list"` - BoundIamInstanceProfileARNs []string `json:"bound_iam_instance_profile_arn_list"` - BoundRegions []string `json:"bound_region_list"` - BoundSubnetIDs []string `json:"bound_subnet_id_list"` - BoundVpcIDs []string `json:"bound_vpc_id_list"` - InferredEntityType string `json:"inferred_entity_type"` - InferredAWSRegion string `json:"inferred_aws_region"` - ResolveAWSUniqueIDs bool `json:"resolve_aws_unique_ids"` - RoleTag string `json:"role_tag"` - AllowInstanceMigration bool `json:"allow_instance_migration"` - TTL time.Duration `json:"ttl"` - MaxTTL time.Duration `json:"max_ttl"` - Policies []string `json:"policies"` - DisallowReauthentication bool `json:"disallow_reauthentication"` - HMACKey string `json:"hmac_key"` - Period time.Duration `json:"period"` - Version int `json:"version"` + tokenutil.TokenParams + + RoleID string `json:"role_id"` + AuthType string `json:"auth_type"` + BoundAmiIDs []string `json:"bound_ami_id_list"` + BoundAccountIDs []string `json:"bound_account_id_list"` + BoundEc2InstanceIDs []string `json:"bound_ec2_instance_id_list"` + BoundIamPrincipalARNs []string `json:"bound_iam_principal_arn_list"` + BoundIamPrincipalIDs []string `json:"bound_iam_principal_id_list"` + BoundIamRoleARNs []string `json:"bound_iam_role_arn_list"` + BoundIamInstanceProfileARNs []string `json:"bound_iam_instance_profile_arn_list"` + BoundRegions []string `json:"bound_region_list"` + BoundSubnetIDs []string `json:"bound_subnet_id_list"` + BoundVpcIDs []string `json:"bound_vpc_id_list"` + InferredEntityType string `json:"inferred_entity_type"` + InferredAWSRegion string `json:"inferred_aws_region"` + ResolveAWSUniqueIDs bool `json:"resolve_aws_unique_ids"` + RoleTag string `json:"role_tag"` + AllowInstanceMigration bool `json:"allow_instance_migration"` + DisallowReauthentication bool `json:"disallow_reauthentication"` + HMACKey string `json:"hmac_key"` + Version int `json:"version"` + + // Deprecated: These are superceded by TokenUtil + TTL time.Duration `json:"ttl"` + MaxTTL time.Duration `json:"max_ttl"` + Period time.Duration `json:"period"` + Policies []string `json:"policies"` + // DEPRECATED -- these are the old fields before we supported lists and exist for backwards compatibility BoundAmiID string `json:"bound_ami_id,omitempty" ` BoundAccountID string `json:"bound_account_id,omitempty"` @@ -876,11 +878,21 @@ func (r *awsRoleEntry) ToResponseData() map[string]interface{} { "role_id": r.RoleID, "role_tag": r.RoleTag, "allow_instance_migration": r.AllowInstanceMigration, - "ttl": r.TTL / time.Second, - "max_ttl": r.MaxTTL / time.Second, - "policies": r.Policies, "disallow_reauthentication": r.DisallowReauthentication, - "period": r.Period / time.Second, + } + + r.PopulateTokenData(responseData) + if r.TTL > 0 { + responseData["ttl"] = int64(r.TTL.Seconds()) + } + if r.MaxTTL > 0 { + responseData["max_ttl"] = int64(r.MaxTTL.Seconds()) + } + if r.Period > 0 { + responseData["period"] = int64(r.Period.Seconds()) + } + if len(r.Policies) > 0 { + responseData["policies"] = responseData["token_policies"] } convertNilToEmptySlice := func(data map[string]interface{}, field string) { diff --git a/builtin/credential/aws/path_role_tag.go b/builtin/credential/aws/path_role_tag.go index 1b011edc5856..8d57b11b8906 100644 --- a/builtin/credential/aws/path_role_tag.go +++ b/builtin/credential/aws/path_role_tag.go @@ -77,7 +77,7 @@ func (b *backend) pathRoleTagUpdate(ctx context.Context, req *logical.Request, d } // Fetch the role entry - roleEntry, err := b.lockedAWSRole(ctx, req.Storage, roleName) + roleEntry, err := b.role(ctx, req.Storage, roleName) if err != nil { return nil, err } @@ -110,7 +110,7 @@ func (b *backend) pathRoleTagUpdate(ctx context.Context, req *logical.Request, d if ok { policies = policyutil.ParsePolicies(policiesRaw) } - if !strutil.StrListSubset(roleEntry.Policies, policies) { + if !strutil.StrListSubset(roleEntry.TokenPolicies, policies) { resp.AddWarning("Policies on the tag are not a subset of the policies set on the role. Login will not be allowed with this tag unless the role policies are updated.") } @@ -135,8 +135,8 @@ func (b *backend) pathRoleTagUpdate(ctx context.Context, req *logical.Request, d resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the mount maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, b.System().MaxLeaseTTL()/time.Second)) } // If max_ttl is set for the role, check the bounds for tag's max_ttl value using that. - if roleEntry.MaxTTL != time.Duration(0) && maxTTL > roleEntry.MaxTTL { - resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the role maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, roleEntry.MaxTTL/time.Second)) + if roleEntry.TokenMaxTTL != time.Duration(0) && maxTTL > roleEntry.TokenMaxTTL { + resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the role maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, roleEntry.TokenMaxTTL/time.Second)) } if maxTTL < time.Duration(0) { @@ -349,7 +349,7 @@ func (b *backend) parseAndVerifyRoleTagValue(ctx context.Context, s logical.Stor return nil, fmt.Errorf("missing role name") } - roleEntry, err := b.lockedAWSRole(ctx, s, rTag.Role) + roleEntry, err := b.role(ctx, s, rTag.Role) if err != nil { return nil, err } diff --git a/builtin/credential/aws/path_role_test.go b/builtin/credential/aws/path_role_test.go index 150f32316997..b7a572c73671 100644 --- a/builtin/credential/aws/path_role_test.go +++ b/builtin/credential/aws/path_role_test.go @@ -5,7 +5,6 @@ import ( "reflect" "strings" "testing" - "time" "github.com/go-test/deep" "github.com/hashicorp/vault/sdk/helper/policyutil" @@ -591,7 +590,6 @@ func TestAwsEc2_RoleCrud(t *testing.T) { } roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v, err: %v", resp, err) @@ -614,53 +612,106 @@ func TestAwsEc2_RoleCrud(t *testing.T) { "resolve_aws_unique_ids": false, "role_tag": "testtag", "allow_instance_migration": true, - "ttl": time.Duration(600), - "max_ttl": time.Duration(1200), + "ttl": int64(600), + "token_ttl": int64(600), + "max_ttl": int64(1200), + "token_max_ttl": int64(1200), + "token_explicit_max_ttl": int64(0), "policies": []string{"testpolicy1", "testpolicy2"}, + "token_policies": []string{"testpolicy1", "testpolicy2"}, "disallow_reauthentication": false, - "period": time.Duration(60), + "period": int64(60), + "token_period": int64(60), + "token_bound_cidrs": []string{}, + "token_no_default_policy": false, + "token_num_uses": 0, + "token_type": "default", } if resp.Data["role_id"] == nil { t.Fatal("role_id not found in repsonse") } expected["role_id"] = resp.Data["role_id"] - if diff := deep.Equal(expected, resp.Data); diff != nil { t.Fatal(diff) } roleData["bound_vpc_id"] = "newvpcid" roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v, err: %v", resp, err) } roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v, err: %v", resp, err) } - expected["bound_vpc_id"] = []string{"newvpcid"} + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: role data: expected: %#v\n actual: %#v", expected, resp.Data) + } + + // Create a new backend so we have a new cache (thus populating from disk). + // Then test reading (reading from disk + lock), writing, reading, + // deleting, reading. + b, err = Backend(config) + if err != nil { + t.Fatal(err) + } + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Read again, make sure things are what we expect + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } if !reflect.DeepEqual(expected, resp.Data) { t.Fatalf("bad: role data: expected: %#v\n actual: %#v", expected, resp.Data) } - roleReq.Operation = logical.DeleteOperation + roleReq.Operation = logical.UpdateOperation + roleData["bound_ami_id"] = "testamiid2" + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v, err: %v", resp, err) } + expected["bound_ami_id"] = []string{"testamiid2"} + if diff := deep.Equal(expected, resp.Data); diff != nil { + t.Fatal(diff) + } + + // Delete which should remove from disk and also cache + roleReq.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } if resp != nil { t.Fatalf("failed to delete role entry") } + + // Verify it was deleted, e.g. it isn't found in the role cache + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + if resp != nil { + t.Fatal("expected nil") + } } func TestAwsEc2_RoleDurationSeconds(t *testing.T) { @@ -706,13 +757,13 @@ func TestAwsEc2_RoleDurationSeconds(t *testing.T) { t.Fatalf("resp: %#v, err: %v", resp, err) } - if int64(resp.Data["ttl"].(time.Duration)) != 10 { + if int64(resp.Data["ttl"].(int64)) != 10 { t.Fatalf("bad: period; expected: 10, actual: %d", resp.Data["ttl"]) } - if int64(resp.Data["max_ttl"].(time.Duration)) != 20 { + if int64(resp.Data["max_ttl"].(int64)) != 20 { t.Fatalf("bad: period; expected: 20, actual: %d", resp.Data["max_ttl"]) } - if int64(resp.Data["period"].(time.Duration)) != 30 { + if int64(resp.Data["period"].(int64)) != 30 { t.Fatalf("bad: period; expected: 30, actual: %d", resp.Data["period"]) } } @@ -742,7 +793,7 @@ func TestRoleEntryUpgradeV(t *testing.T) { Version: currentRoleStorageVersion, } - upgraded, err := b.upgradeRoleEntry(context.Background(), storage, roleEntryToUpgrade) + upgraded, err := b.upgradeRole(context.Background(), storage, roleEntryToUpgrade) if err != nil { t.Fatalf("error upgrading role entry: %#v", err) } diff --git a/builtin/credential/aws/path_roletag_blacklist.go b/builtin/credential/aws/path_roletag_blacklist.go index 88b79c388a8f..2ec0ea08a8f9 100644 --- a/builtin/credential/aws/path_roletag_blacklist.go +++ b/builtin/credential/aws/path_roletag_blacklist.go @@ -163,7 +163,7 @@ func (b *backend) pathRoletagBlacklistUpdate(ctx context.Context, req *logical.R } // Get the entry for the role mentioned in the role tag. - roleEntry, err := b.lockedAWSRole(ctx, req.Storage, rTag.Role) + roleEntry, err := b.role(ctx, req.Storage, rTag.Role) if err != nil { return nil, err } @@ -196,8 +196,8 @@ func (b *backend) pathRoletagBlacklistUpdate(ctx context.Context, req *logical.R // Decide the expiration time based on the max_ttl values. Since this is // restricting access, use the greatest duration, not the least. maxDur := rTag.MaxTTL - if roleEntry.MaxTTL > maxDur { - maxDur = roleEntry.MaxTTL + if roleEntry.TokenMaxTTL > maxDur { + maxDur = roleEntry.TokenMaxTTL } if b.System().MaxLeaseTTL() > maxDur { maxDur = b.System().MaxLeaseTTL() diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go index 7facab0413f2..8e2db8dfc312 100644 --- a/builtin/credential/cert/backend_test.go +++ b/builtin/credential/cert/backend_test.go @@ -11,6 +11,7 @@ import ( "net/url" "path/filepath" + "github.com/go-test/deep" "github.com/hashicorp/go-sockaddr" "golang.org/x/net/http2" @@ -39,6 +40,7 @@ import ( logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" @@ -1163,8 +1165,8 @@ func TestBackend_email_singleCert(t *testing.T) { Subject: pkix.Name{ CommonName: "example.com", }, - EmailAddresses: []string{"valid@example.com"}, - IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + EmailAddresses: []string{"valid@example.com"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, ExtKeyUsage: []x509.ExtKeyUsage{ x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth, @@ -1949,3 +1951,60 @@ func Test_Renew(t *testing.T) { t.Fatal("expected error") } } + +func TestBackend_CertUpgrade(t *testing.T) { + s := &logical.InmemStorage{} + + config := logical.TestBackendConfig() + config.StorageView = s + + ctx := context.Background() + + b := Backend() + if b == nil { + t.Fatalf("failed to create backend") + } + if err := b.Setup(ctx, config); err != nil { + t.Fatal(err) + } + + foo := &CertEntry{ + Policies: []string{"foo"}, + Period: time.Second, + TTL: time.Second, + MaxTTL: time.Second, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + } + + entry, err := logical.StorageEntryJSON("cert/foo", foo) + if err != nil { + t.Fatal(err) + } + err = s.Put(ctx, entry) + if err != nil { + t.Fatal(err) + } + + certEntry, err := b.Cert(ctx, s, "foo") + if err != nil { + t.Fatal(err) + } + + exp := &CertEntry{ + Policies: []string{"foo"}, + Period: time.Second, + TTL: time.Second, + MaxTTL: time.Second, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + TokenParams: tokenutil.TokenParams{ + TokenPolicies: []string{"foo"}, + TokenPeriod: time.Second, + TokenTTL: time.Second, + TokenMaxTTL: time.Second, + TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + }, + } + if diff := deep.Equal(certEntry, exp); diff != nil { + t.Fatal(diff) + } +} diff --git a/builtin/credential/cert/path_certs.go b/builtin/credential/cert/path_certs.go index dfbee218c6a5..0a57f347be8e 100644 --- a/builtin/credential/cert/path_certs.go +++ b/builtin/credential/cert/path_certs.go @@ -9,8 +9,7 @@ import ( sockaddr "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/parseutil" - "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -28,7 +27,7 @@ func pathListCerts(b *backend) *framework.Path { } func pathCerts(b *backend) *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: "certs/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ "name": &framework.FieldSchema{ @@ -95,39 +94,38 @@ certificate.`, "policies": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, - Description: "Comma-separated list of policies.", + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, }, "lease": &framework.FieldSchema{ - Type: framework.TypeInt, - Description: `Deprecated: use "ttl" instead. TTL time in -seconds. Defaults to system/backend default TTL.`, + Type: framework.TypeInt, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, }, "ttl": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `TTL for tokens issued by this backend. -Defaults to system/backend default TTL time.`, + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, }, "max_ttl": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `Duration in either an integer number of seconds (3600) or -an integer time unit (60m) after which the -issued token can no longer be renewed.`, + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, }, "period": &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `If set, indicates that the token generated using this role -should never expire. The token should be renewed within the -duration specified by this value. At each renewal, the token's -TTL will be set to the value of this parameter.`, + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, }, + "bound_cidrs": &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of -IP addresses which can perform the login operation.`, + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_bound_cidrs"), + Deprecated: true, }, }, @@ -140,6 +138,9 @@ IP addresses which can perform the login operation.`, HelpSynopsis: pathCertHelpSyn, HelpDescription: pathCertHelpDesc, } + + tokenutil.AddTokenFields(p.Fields) + return p } func (b *backend) Cert(ctx context.Context, s logical.Storage, n string) (*CertEntry, error) { @@ -155,6 +156,23 @@ func (b *backend) Cert(ctx context.Context, s logical.Storage, n string) (*CertE if err := entry.DecodeJSON(&result); err != nil { return nil, err } + + if result.TokenTTL == 0 && result.TTL > 0 { + result.TokenTTL = result.TTL + } + if result.TokenMaxTTL == 0 && result.MaxTTL > 0 { + result.TokenMaxTTL = result.MaxTTL + } + if result.TokenPeriod == 0 && result.Period > 0 { + result.TokenPeriod = result.Period + } + if len(result.TokenPolicies) == 0 && len(result.Policies) > 0 { + result.TokenPolicies = result.Policies + } + if len(result.TokenBoundCIDRs) == 0 && len(result.BoundCIDRs) > 0 { + result.TokenBoundCIDRs = result.BoundCIDRs + } + return &result, nil } @@ -183,86 +201,146 @@ func (b *backend) pathCertRead(ctx context.Context, req *logical.Request, d *fra return nil, nil } + data := map[string]interface{}{ + "certificate": cert.Certificate, + "display_name": cert.DisplayName, + "allowed_names": cert.AllowedNames, + "allowed_common_names": cert.AllowedCommonNames, + "allowed_dns_sans": cert.AllowedDNSSANs, + "allowed_email_sans": cert.AllowedEmailSANs, + "allowed_uri_sans": cert.AllowedURISANs, + "allowed_organizational_units": cert.AllowedOrganizationalUnits, + "required_extensions": cert.RequiredExtensions, + } + cert.PopulateTokenData(data) + + if cert.TTL > 0 { + data["ttl"] = int64(cert.TTL.Seconds()) + } + if cert.MaxTTL > 0 { + data["max_ttl"] = int64(cert.MaxTTL.Seconds()) + } + if cert.Period > 0 { + data["period"] = int64(cert.Period.Seconds()) + } + if len(cert.Policies) > 0 { + data["policies"] = data["token_policies"] + } + if len(cert.BoundCIDRs) > 0 { + data["bound_cidrs"] = data["token_bound_cidrs"] + } + return &logical.Response{ - Data: map[string]interface{}{ - "certificate": cert.Certificate, - "display_name": cert.DisplayName, - "policies": cert.Policies, - "ttl": cert.TTL / time.Second, - "max_ttl": cert.MaxTTL / time.Second, - "period": cert.Period / time.Second, - "allowed_names": cert.AllowedNames, - "allowed_common_names": cert.AllowedCommonNames, - "allowed_dns_sans": cert.AllowedDNSSANs, - "allowed_email_sans": cert.AllowedEmailSANs, - "allowed_uri_sans": cert.AllowedURISANs, - "allowed_organizational_units": cert.AllowedOrganizationalUnits, - "required_extensions": cert.RequiredExtensions, - "bound_cidrs": cert.BoundCIDRs, - }, + Data: data, }, nil } func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := strings.ToLower(d.Get("name").(string)) - certificate := d.Get("certificate").(string) - displayName := d.Get("display_name").(string) - policies := policyutil.ParsePolicies(d.Get("policies")) - allowedNames := d.Get("allowed_names").([]string) - allowedCommonNames := d.Get("allowed_common_names").([]string) - allowedDNSSANs := d.Get("allowed_dns_sans").([]string) - allowedEmailSANs := d.Get("allowed_email_sans").([]string) - allowedURISANs := d.Get("allowed_uri_sans").([]string) - allowedOrganizationalUnits := d.Get("allowed_organizational_units").([]string) - requiredExtensions := d.Get("required_extensions").([]string) - var resp logical.Response - - // Parse the ttl (or lease duration) - systemDefaultTTL := b.System().DefaultLeaseTTL() - ttl := time.Duration(d.Get("ttl").(int)) * time.Second - if ttl == 0 { - ttl = time.Duration(d.Get("lease").(int)) * time.Second - } - if ttl > systemDefaultTTL { - resp.AddWarning(fmt.Sprintf("Given ttl of %d seconds is greater than current mount/system default of %d seconds", ttl/time.Second, systemDefaultTTL/time.Second)) + cert, err := b.Cert(ctx, req.Storage, name) + if err != nil { + return nil, err } - if ttl < time.Duration(0) { - return logical.ErrorResponse("ttl cannot be negative"), nil + if cert == nil { + cert = &CertEntry{ + Name: name, + } } - // Parse max_ttl - systemMaxTTL := b.System().MaxLeaseTTL() - maxTTL := time.Duration(d.Get("max_ttl").(int)) * time.Second - if maxTTL > systemMaxTTL { - resp.AddWarning(fmt.Sprintf("Given max_ttl of %d seconds is greater than current mount/system default of %d seconds", maxTTL/time.Second, systemMaxTTL/time.Second)) + // Get non tokenutil fields + if certificateRaw, ok := d.GetOk("certificate"); ok { + cert.Certificate = certificateRaw.(string) } - - if maxTTL < time.Duration(0) { - return logical.ErrorResponse("max_ttl cannot be negative"), nil + if displayNameRaw, ok := d.GetOk("display_name"); ok { + cert.DisplayName = displayNameRaw.(string) + } + if allowedNamesRaw, ok := d.GetOk("allowed_names"); ok { + cert.AllowedNames = allowedNamesRaw.([]string) + } + if allowedCommonNamesRaw, ok := d.GetOk("allowed_common_names"); ok { + cert.AllowedCommonNames = allowedCommonNamesRaw.([]string) + } + if allowedDNSSANsRaw, ok := d.GetOk("allowed_dns_sans"); ok { + cert.AllowedDNSSANs = allowedDNSSANsRaw.([]string) + } + if allowedEmailSANsRaw, ok := d.GetOk("allowed_email_sans"); ok { + cert.AllowedEmailSANs = allowedEmailSANsRaw.([]string) + } + if allowedURISANsRaw, ok := d.GetOk("allowed_uri_sans"); ok { + cert.AllowedURISANs = allowedURISANsRaw.([]string) + } + if allowedOrganizationalUnitsRaw, ok := d.GetOk("allowed_organizational_units"); ok { + cert.AllowedOrganizationalUnits = allowedOrganizationalUnitsRaw.([]string) + } + if requiredExtensionsRaw, ok := d.GetOk("required_extensions"); ok { + cert.RequiredExtensions = requiredExtensionsRaw.([]string) } - if maxTTL != 0 && ttl > maxTTL { - return logical.ErrorResponse("ttl should be shorter than max_ttl"), nil + // Get tokenutil fields + if err := cert.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - // Parse period - period := time.Duration(d.Get("period").(int)) * time.Second - if period > systemMaxTTL { - resp.AddWarning(fmt.Sprintf("Given period of %d seconds is greater than the backend's maximum TTL of %d seconds", period/time.Second, systemMaxTTL/time.Second)) + // Handle upgrade cases + { + if err := tokenutil.UpgradeValue(d, "policies", "token_policies", &cert.Policies, &cert.TokenPolicies); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "ttl", "token_ttl", &cert.TTL, &cert.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + // Special case here for old lease value + _, ok := d.GetOk("token_ttl") + if !ok { + _, ok = d.GetOk("ttl") + if !ok { + ttlRaw, ok := d.GetOk("lease") + if ok { + cert.TTL = time.Duration(ttlRaw.(int)) * time.Second + cert.TokenTTL = cert.TTL + } + } + } + + if err := tokenutil.UpgradeValue(d, "max_ttl", "token_max_ttl", &cert.MaxTTL, &cert.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "period", "token_period", &cert.Period, &cert.TokenPeriod); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "bound_cidrs", "token_bound_cidrs", &cert.BoundCIDRs, &cert.TokenBoundCIDRs); err != nil { + return logical.ErrorResponse(err.Error()), nil + } } - if period < time.Duration(0) { - return logical.ErrorResponse("period cannot be negative"), nil + var resp logical.Response + + systemDefaultTTL := b.System().DefaultLeaseTTL() + if cert.TokenTTL > systemDefaultTTL { + resp.AddWarning(fmt.Sprintf("Given ttl of %d seconds is greater than current mount/system default of %d seconds", cert.TokenTTL/time.Second, systemDefaultTTL/time.Second)) + } + systemMaxTTL := b.System().MaxLeaseTTL() + if cert.TokenMaxTTL > systemMaxTTL { + resp.AddWarning(fmt.Sprintf("Given max_ttl of %d seconds is greater than current mount/system default of %d seconds", cert.TokenMaxTTL/time.Second, systemMaxTTL/time.Second)) + } + if cert.TokenMaxTTL != 0 && cert.TokenTTL > cert.TokenMaxTTL { + return logical.ErrorResponse("ttl should be shorter than max_ttl"), nil + } + if cert.TokenPeriod > systemMaxTTL { + resp.AddWarning(fmt.Sprintf("Given period of %d seconds is greater than the backend's maximum TTL of %d seconds", cert.TokenPeriod/time.Second, systemMaxTTL/time.Second)) } // Default the display name to the certificate name if not given - if displayName == "" { - displayName = name + if cert.DisplayName == "" { + cert.DisplayName = name } - parsed := parsePEM([]byte(certificate)) + parsed := parsePEM([]byte(cert.Certificate)) if len(parsed) == 0 { return logical.ErrorResponse("failed to parse certificate"), nil } @@ -281,31 +359,8 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr } } - parsedCIDRs, err := parseutil.ParseAddrs(d.Get("bound_cidrs")) - if err != nil { - return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest - } - - certEntry := &CertEntry{ - Name: name, - Certificate: certificate, - DisplayName: displayName, - Policies: policies, - AllowedNames: allowedNames, - AllowedCommonNames: allowedCommonNames, - AllowedDNSSANs: allowedDNSSANs, - AllowedEmailSANs: allowedEmailSANs, - AllowedURISANs: allowedURISANs, - AllowedOrganizationalUnits: allowedOrganizationalUnits, - RequiredExtensions: requiredExtensions, - TTL: ttl, - MaxTTL: maxTTL, - Period: period, - BoundCIDRs: parsedCIDRs, - } - // Store it - entry, err := logical.StorageEntryJSON("cert/"+name, certEntry) + entry, err := logical.StorageEntryJSON("cert/"+name, cert) if err != nil { return nil, err } @@ -321,6 +376,8 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr } type CertEntry struct { + tokenutil.TokenParams + Name string Certificate string DisplayName string diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go index c5cf0eaef388..de5db78d66c3 100644 --- a/builtin/credential/cert/path_login.go +++ b/builtin/credential/cert/path_login.go @@ -83,36 +83,28 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *fra skid := base64.StdEncoding.EncodeToString(clientCerts[0].SubjectKeyId) akid := base64.StdEncoding.EncodeToString(clientCerts[0].AuthorityKeyId) - resp := &logical.Response{ - Auth: &logical.Auth{ - Period: matched.Entry.Period, - InternalData: map[string]interface{}{ - "subject_key_id": skid, - "authority_key_id": akid, - }, - Policies: matched.Entry.Policies, - DisplayName: matched.Entry.DisplayName, - Metadata: map[string]string{ - "cert_name": matched.Entry.Name, - "common_name": clientCerts[0].Subject.CommonName, - "serial_number": clientCerts[0].SerialNumber.String(), - "subject_key_id": certutil.GetHexFormatted(clientCerts[0].SubjectKeyId, ":"), - "authority_key_id": certutil.GetHexFormatted(clientCerts[0].AuthorityKeyId, ":"), - }, - LeaseOptions: logical.LeaseOptions{ - Renewable: true, - TTL: matched.Entry.TTL, - MaxTTL: matched.Entry.MaxTTL, - }, - Alias: &logical.Alias{ - Name: clientCerts[0].Subject.CommonName, - }, - BoundCIDRs: matched.Entry.BoundCIDRs, + auth := &logical.Auth{ + InternalData: map[string]interface{}{ + "subject_key_id": skid, + "authority_key_id": akid, + }, + DisplayName: matched.Entry.DisplayName, + Metadata: map[string]string{ + "cert_name": matched.Entry.Name, + "common_name": clientCerts[0].Subject.CommonName, + "serial_number": clientCerts[0].SerialNumber.String(), + "subject_key_id": certutil.GetHexFormatted(clientCerts[0].SubjectKeyId, ":"), + "authority_key_id": certutil.GetHexFormatted(clientCerts[0].AuthorityKeyId, ":"), + }, + Alias: &logical.Alias{ + Name: clientCerts[0].Subject.CommonName, }, } + matched.Entry.PopulateTokenAuth(auth) - // Generate a response - return resp, nil + return &logical.Response{ + Auth: auth, + }, nil } func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { @@ -159,14 +151,14 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f return nil, nil } - if !policyutil.EquivalentPolicies(cert.Policies, req.Auth.TokenPolicies) { + if !policyutil.EquivalentPolicies(cert.TokenPolicies, req.Auth.TokenPolicies) { return nil, fmt.Errorf("policies have changed, not renewing") } resp := &logical.Response{Auth: req.Auth} - resp.Auth.TTL = cert.TTL - resp.Auth.MaxTTL = cert.MaxTTL - resp.Auth.Period = cert.Period + resp.Auth.TTL = cert.TokenTTL + resp.Auth.MaxTTL = cert.TokenMaxTTL + resp.Auth.Period = cert.TokenPeriod return resp, nil } @@ -478,7 +470,7 @@ func (b *backend) checkForValidChain(chains [][]*x509.Certificate) bool { } func (b *backend) checkCIDR(cert *CertEntry, req *logical.Request) error { - if cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, cert.BoundCIDRs) { + if cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, cert.TokenBoundCIDRs) { return nil } return logical.ErrPermissionDenied diff --git a/builtin/credential/github/backend_test.go b/builtin/credential/github/backend_test.go index 35036fd75aff..4e51eedba855 100644 --- a/builtin/credential/github/backend_test.go +++ b/builtin/credential/github/backend_test.go @@ -35,34 +35,35 @@ func TestBackend_Config(t *testing.T) { "ttl": "", "max_ttl": "", } - expectedTTL1, _ := time.ParseDuration("24h0m0s") + expectedTTL1 := 24 * time.Hour config_data2 := map[string]interface{}{ "organization": os.Getenv("GITHUB_ORG"), "ttl": "1h", "max_ttl": "2h", } - expectedTTL2, _ := time.ParseDuration("1h0m0s") + expectedTTL2 := time.Hour config_data3 := map[string]interface{}{ "organization": os.Getenv("GITHUB_ORG"), "ttl": "50h", "max_ttl": "50h", } + expectedTTL3 := 48 * time.Hour logicaltest.Test(t, logicaltest.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - LogicalBackend: b, + PreCheck: func() { testAccPreCheck(t) }, + CredentialBackend: b, Steps: []logicaltest.TestStep{ testConfigWrite(t, config_data1), - testLoginWrite(t, login_data, expectedTTL1.Nanoseconds(), false), + testLoginWrite(t, login_data, expectedTTL1, false), testConfigWrite(t, config_data2), - testLoginWrite(t, login_data, expectedTTL2.Nanoseconds(), false), + testLoginWrite(t, login_data, expectedTTL2, false), testConfigWrite(t, config_data3), - testLoginWrite(t, login_data, 0, true), + testLoginWrite(t, login_data, expectedTTL3, true), }, }) } -func testLoginWrite(t *testing.T, d map[string]interface{}, expectedTTL int64, expectFail bool) logicaltest.TestStep { +func testLoginWrite(t *testing.T, d map[string]interface{}, expectedTTL time.Duration, expectFail bool) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "login", @@ -72,10 +73,9 @@ func testLoginWrite(t *testing.T, d map[string]interface{}, expectedTTL int64, e if resp.IsError() && expectFail { return nil } - var actualTTL int64 - actualTTL = resp.Auth.LeaseOptions.TTL.Nanoseconds() + actualTTL := resp.Auth.LeaseOptions.TTL if actualTTL != expectedTTL { - return fmt.Errorf("TTL mismatched. Expected: %d Actual: %d", expectedTTL, resp.Auth.LeaseOptions.TTL.Nanoseconds()) + return fmt.Errorf("TTL mismatched. Expected: %d Actual: %d", expectedTTL, resp.Auth.LeaseOptions.TTL) } return nil }, @@ -105,25 +105,25 @@ func TestBackend_basic(t *testing.T) { } logicaltest.Test(t, logicaltest.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - LogicalBackend: b, + PreCheck: func() { testAccPreCheck(t) }, + CredentialBackend: b, Steps: []logicaltest.TestStep{ testAccStepConfig(t, false), testAccMap(t, "default", "fakepol"), testAccMap(t, "oWnErs", "fakepol"), - testAccLogin(t, []string{"default", "fakepol"}), + testAccLogin(t, []string{"default", "abc", "fakepol"}), testAccStepConfig(t, true), testAccMap(t, "default", "fakepol"), testAccMap(t, "oWnErs", "fakepol"), - testAccLogin(t, []string{"default", "fakepol"}), + testAccLogin(t, []string{"default", "abc", "fakepol"}), testAccStepConfigWithBaseURL(t), testAccMap(t, "default", "fakepol"), testAccMap(t, "oWnErs", "fakepol"), - testAccLogin(t, []string{"default", "fakepol"}), + testAccLogin(t, []string{"default", "abc", "fakepol"}), testAccMap(t, "default", "fakepol"), testAccStepConfig(t, true), mapUserToPolicy(t, os.Getenv("GITHUB_USER"), "userpolicy"), - testAccLogin(t, []string{"default", "fakepol", "userpolicy"}), + testAccLogin(t, []string{"default", "abc", "fakepol", "userpolicy"}), }, }) } @@ -133,6 +133,10 @@ func testAccPreCheck(t *testing.T) { t.Skip("GITHUB_TOKEN must be set for acceptance tests") } + if v := os.Getenv("GITHUB_USER"); v == "" { + t.Skip("GITHUB_USER must be set for acceptance tests") + } + if v := os.Getenv("GITHUB_ORG"); v == "" { t.Skip("GITHUB_ORG must be set for acceptance tests") } @@ -147,7 +151,8 @@ func testAccStepConfig(t *testing.T, upper bool) logicaltest.TestStep { Operation: logical.UpdateOperation, Path: "config", Data: map[string]interface{}{ - "organization": os.Getenv("GITHUB_ORG"), + "organization": os.Getenv("GITHUB_ORG"), + "token_policies": []string{"abc"}, }, } if upper { diff --git a/builtin/credential/github/path_config.go b/builtin/credential/github/path_config.go index 18e72f4f5885..3822936ac7ae 100644 --- a/builtin/credential/github/path_config.go +++ b/builtin/credential/github/path_config.go @@ -4,15 +4,17 @@ import ( "context" "fmt" "net/url" + "strings" "time" "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" ) func pathConfig(b *backend) *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: "config", Fields: map[string]*framework.FieldSchema{ "organization": &framework.FieldSchema{ @@ -25,17 +27,20 @@ func pathConfig(b *backend) *framework.Path { Description: `The API endpoint to use. Useful if you are running GitHub Enterprise or an API-compatible authentication server.`, - DisplayName: "Base URL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Base URL", + Group: "GitHub Options", + }, }, "ttl": &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Duration after which authentication will be expired`, - DisplayName: "TTL", + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, }, "max_ttl": &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Maximum duration after which authentication will be expired`, - DisplayName: "Max TTL", + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, }, }, @@ -44,48 +49,53 @@ API-compatible authentication server.`, logical.ReadOperation: b.pathConfigRead, }, } + + tokenutil.AddTokenFields(p.Fields) + p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any policies configured for specific users/groups." + return p } func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - organization := data.Get("organization").(string) - baseURL := data.Get("base_url").(string) - if len(baseURL) != 0 { + c, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + if c == nil { + c = &config{} + } + + if organizationRaw, ok := data.GetOk("organization"); ok { + c.Organization = organizationRaw.(string) + } + + if baseURLRaw, ok := data.GetOk("base_url"); ok { + baseURL := baseURLRaw.(string) _, err := url.Parse(baseURL) if err != nil { return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil } + if !strings.HasSuffix(baseURL, "/") { + baseURL += "/" + } + c.BaseURL = baseURL } - var ttl time.Duration - var err error - ttlRaw, ok := data.GetOk("ttl") - if !ok || len(ttlRaw.(string)) == 0 { - ttl = 0 - } else { - ttl, err = time.ParseDuration(ttlRaw.(string)) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Invalid 'ttl':%s", err)), nil - } + if err := c.ParseTokenFields(req, data); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - var maxTTL time.Duration - maxTTLRaw, ok := data.GetOk("max_ttl") - if !ok || len(maxTTLRaw.(string)) == 0 { - maxTTL = 0 - } else { - maxTTL, err = time.ParseDuration(maxTTLRaw.(string)) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Invalid 'max_ttl':%s", err)), nil + // Handle upgrade cases + { + if err := tokenutil.UpgradeValue(data, "ttl", "token_ttl", &c.TTL, &c.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil } - } - entry, err := logical.StorageEntryJSON("config", config{ - Organization: organization, - BaseURL: baseURL, - TTL: ttl, - MaxTTL: maxTTL, - }) + if err := tokenutil.UpgradeValue(data, "max_ttl", "token_max_ttl", &c.MaxTTL, &c.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + entry, err := logical.StorageEntryJSON("config", c) if err != nil { return nil, err } @@ -102,23 +112,26 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, data if err != nil { return nil, err } - if config == nil { - return nil, fmt.Errorf("configuration object not found") + return nil, nil } - config.TTL /= time.Second - config.MaxTTL /= time.Second + d := map[string]interface{}{ + "organization": config.Organization, + "base_url": config.BaseURL, + } + config.PopulateTokenData(d) - resp := &logical.Response{ - Data: map[string]interface{}{ - "organization": config.Organization, - "base_url": config.BaseURL, - "ttl": config.TTL, - "max_ttl": config.MaxTTL, - }, + if config.TTL > 0 { + d["ttl"] = int64(config.TTL.Seconds()) + } + if config.MaxTTL > 0 { + d["max_ttl"] = int64(config.MaxTTL.Seconds()) } - return resp, nil + + return &logical.Response{ + Data: d, + }, nil } // Config returns the configuration for this backend. @@ -127,6 +140,9 @@ func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error if err != nil { return nil, err } + if entry == nil { + return nil, nil + } var result config if entry != nil { @@ -135,10 +151,19 @@ func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error } } + if result.TokenTTL == 0 && result.TTL > 0 { + result.TokenTTL = result.TTL + } + if result.TokenMaxTTL == 0 && result.MaxTTL > 0 { + result.TokenMaxTTL = result.MaxTTL + } + return &result, nil } type config struct { + tokenutil.TokenParams + Organization string `json:"organization" structs:"organization" mapstructure:"organization"` BaseURL string `json:"base_url" structs:"base_url" mapstructure:"base_url"` TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` diff --git a/builtin/credential/github/path_login.go b/builtin/credential/github/path_login.go index 4bf5a27e297b..3080bb6cfffa 100644 --- a/builtin/credential/github/path_login.go +++ b/builtin/credential/github/path_login.go @@ -9,6 +9,7 @@ import ( "github.com/google/go-github/github" "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -63,31 +64,28 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *fra verifyResp = verifyResponse } - config, err := b.Config(ctx, req.Storage) - if err != nil { - return nil, err + auth := &logical.Auth{ + InternalData: map[string]interface{}{ + "token": token, + }, + Metadata: map[string]string{ + "username": *verifyResp.User.Login, + "org": *verifyResp.Org.Login, + }, + DisplayName: *verifyResp.User.Login, + Alias: &logical.Alias{ + Name: *verifyResp.User.Login, + }, + } + verifyResp.Config.PopulateTokenAuth(auth) + + // Add in configured policies from user/group mapping + if len(verifyResp.Policies) > 0 { + auth.Policies = append(auth.Policies, verifyResp.Policies...) } resp := &logical.Response{ - Auth: &logical.Auth{ - InternalData: map[string]interface{}{ - "token": token, - }, - Policies: verifyResp.Policies, - Metadata: map[string]string{ - "username": *verifyResp.User.Login, - "org": *verifyResp.Org.Login, - }, - DisplayName: *verifyResp.User.Login, - LeaseOptions: logical.LeaseOptions{ - TTL: config.TTL, - MaxTTL: config.MaxTTL, - Renewable: true, - }, - Alias: &logical.Alias{ - Name: *verifyResp.User.Login, - }, - }, + Auth: auth, } for _, teamName := range verifyResp.TeamNames { @@ -125,14 +123,10 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f return nil, fmt.Errorf("policies do not match") } - config, err := b.Config(ctx, req.Storage) - if err != nil { - return nil, err - } - resp := &logical.Response{Auth: req.Auth} - resp.Auth.TTL = config.TTL - resp.Auth.MaxTTL = config.MaxTTL + resp.Auth.Period = verifyResp.Config.TokenPeriod + resp.Auth.TTL = verifyResp.Config.TokenTTL + resp.Auth.MaxTTL = verifyResp.Config.TokenMaxTTL // Remove old aliases resp.Auth.GroupAliases = nil @@ -151,9 +145,18 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, t if err != nil { return nil, nil, err } + if config == nil { + return nil, logical.ErrorResponse("configuration has not been set"), nil + } + + // Check for a CIDR match. + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, config.TokenBoundCIDRs) { + return nil, nil, logical.ErrPermissionDenied + } + if config.Organization == "" { return nil, logical.ErrorResponse( - "configure the github credential backend first"), nil + "organization not found in configuration"), nil } client, err := b.Client(token) @@ -255,6 +258,7 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, t Org: org, Policies: append(groupPoliciesList, userPoliciesList...), TeamNames: teamNames, + Config: config, }, nil, nil } @@ -263,4 +267,7 @@ type verifyCredentialsResp struct { Org *github.Organization Policies []string TeamNames []string + + // This is just a cache to send back to the caller + Config *config } diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index 5aa168a94255..d8a6d9d72935 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -77,7 +77,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri LDAP: ldaputil.NewLDAP(), } - c, err := ldapClient.DialLDAP(cfg) + c, err := ldapClient.DialLDAP(cfg.ConfigEntry) if err != nil { return nil, logical.ErrorResponse(err.Error()), nil, nil } @@ -88,7 +88,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri // Clean connection defer c.Close() - userBindDN, err := ldapClient.GetUserBindDN(cfg, c, username) + userBindDN, err := ldapClient.GetUserBindDN(cfg.ConfigEntry, c, username) if err != nil { if b.Logger().IsDebug() { b.Logger().Debug("error getting user bind DN", "error", err) @@ -127,12 +127,12 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri } } - userDN, err := ldapClient.GetUserDN(cfg, c, userBindDN) + userDN, err := ldapClient.GetUserDN(cfg.ConfigEntry, c, userBindDN) if err != nil { return nil, logical.ErrorResponse(err.Error()), nil, nil } - ldapGroups, err := ldapClient.GetLdapGroups(cfg, c, userDN, username) + ldapGroups, err := ldapClient.GetLdapGroups(cfg.ConfigEntry, c, userDN, username) if err != nil { return nil, logical.ErrorResponse(err.Error()), nil, nil } diff --git a/builtin/credential/ldap/backend_test.go b/builtin/credential/ldap/backend_test.go index 73fd2c24bf87..fd5104546ad8 100644 --- a/builtin/credential/ldap/backend_test.go +++ b/builtin/credential/ldap/backend_test.go @@ -10,9 +10,12 @@ import ( "testing" "time" + "github.com/go-test/deep" "github.com/hashicorp/vault/helper/namespace" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/ldaputil" "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) @@ -230,8 +233,9 @@ func TestLdapAuthBackend_CaseSensitivity(t *testing.T) { "groups": "EngineerS", "policies": "userpolicy", }, - Path: "users/tesla", - Storage: storage, + Path: "users/tesla", + Storage: storage, + Connection: &logical.Connection{}, } resp, err = b.HandleRequest(ctx, userReq) if err != nil || (resp != nil && resp.IsError()) { @@ -245,7 +249,8 @@ func TestLdapAuthBackend_CaseSensitivity(t *testing.T) { Data: map[string]interface{}{ "password": "password", }, - Storage: storage, + Storage: storage, + Connection: &logical.Connection{}, } resp, err = b.HandleRequest(ctx, loginReq) if err != nil || (resp != nil && resp.IsError()) { @@ -325,8 +330,9 @@ func TestLdapAuthBackend_UserPolicies(t *testing.T) { Data: map[string]interface{}{ "policies": "grouppolicy", }, - Path: "groups/engineers", - Storage: storage, + Path: "groups/engineers", + Storage: storage, + Connection: &logical.Connection{}, } resp, err = b.HandleRequest(context.Background(), groupReq) if err != nil || (resp != nil && resp.IsError()) { @@ -339,8 +345,9 @@ func TestLdapAuthBackend_UserPolicies(t *testing.T) { "groups": "engineers", "policies": "userpolicy", }, - Path: "users/tesla", - Storage: storage, + Path: "users/tesla", + Storage: storage, + Connection: &logical.Connection{}, } resp, err = b.HandleRequest(context.Background(), userReq) @@ -354,7 +361,8 @@ func TestLdapAuthBackend_UserPolicies(t *testing.T) { Data: map[string]interface{}{ "password": "password", }, - Storage: storage, + Storage: storage, + Connection: &logical.Connection{}, } resp, err = b.HandleRequest(context.Background(), loginReq) @@ -581,6 +589,7 @@ func testAccStepConfigUrl(t *testing.T) logicaltest.TestStep { "userdn": "dc=example,dc=com", "groupdn": "dc=example,dc=com", "case_sensitive_names": true, + "token_policies": "abc,xyz", }, } } @@ -600,6 +609,7 @@ func testAccStepConfigUrlWithAuthBind(t *testing.T) logicaltest.TestStep { "binddn": "cn=read-only-admin,dc=example,dc=com", "bindpass": "password", "case_sensitive_names": true, + "token_policies": "abc,xyz", }, } } @@ -617,6 +627,7 @@ func testAccStepConfigUrlWithDiscover(t *testing.T) logicaltest.TestStep { "groupdn": "dc=example,dc=com", "discoverdn": true, "case_sensitive_names": true, + "token_policies": "abc,xyz", }, } } @@ -752,7 +763,7 @@ func testAccStepLogin(t *testing.T, user string, pass string) logicaltest.TestSt Unauthenticated: true, // Verifies user tesla maps to groups via local group (engineers) as well as remote group (Scientists) - Check: logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}), + Check: logicaltest.TestCheckAuth([]string{"abc", "bar", "default", "foo", "xyz"}), } } @@ -766,7 +777,7 @@ func testAccStepLoginNoAttachedPolicies(t *testing.T, user string, pass string) Unauthenticated: true, // Verifies user tesla maps to groups via local group (engineers) as well as remote group (Scientists) - Check: logicaltest.TestCheckAuth([]string{"default"}), + Check: logicaltest.TestCheckAuth([]string{"abc", "default", "xyz"}), } } @@ -839,3 +850,92 @@ func testAccStepUserList(t *testing.T, users []string) logicaltest.TestStep { }, } } + +func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + ctx := context.Background() + + // Write in some initial config + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "url": "ldap://ldap.forumsys.com", + "userattr": "uid", + "userdn": "dc=example,dc=com", + "groupdn": "dc=example,dc=com", + "binddn": "cn=read-only-admin,dc=example,dc=com", + "token_period": "5m", + "token_explicit_max_ttl": "24h", + }, + Storage: storage, + Connection: &logical.Connection{}, + } + resp, err = b.HandleRequest(ctx, configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + fd, err := b.getConfigFieldData() + if err != nil { + t.Fatal(err) + } + defParams, err := ldaputil.NewConfigEntry(nil, fd) + if err != nil { + t.Fatal(err) + } + falseBool := new(bool) + *falseBool = false + + exp := &ldapConfigEntry{ + TokenParams: tokenutil.TokenParams{ + TokenPeriod: 5 * time.Minute, + TokenExplicitMaxTTL: 24 * time.Hour, + }, + ConfigEntry: &ldaputil.ConfigEntry{ + Url: "ldap://ldap.forumsys.com", + UserAttr: "uid", + UserDN: "dc=example,dc=com", + GroupDN: "dc=example,dc=com", + BindDN: "cn=read-only-admin,dc=example,dc=com", + GroupFilter: defParams.GroupFilter, + DenyNullBind: defParams.DenyNullBind, + GroupAttr: defParams.GroupAttr, + TLSMinVersion: defParams.TLSMinVersion, + TLSMaxVersion: defParams.TLSMaxVersion, + CaseSensitiveNames: falseBool, + }, + } + + configEntry, err := b.Config(ctx, configReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(exp, configEntry); diff != nil { + t.Fatal(diff) + } + + // Store just the config entry portion, for upgrade testing + entry, err := logical.StorageEntryJSON("config", configEntry.ConfigEntry) + if err != nil { + t.Fatal(err) + } + err = configReq.Storage.Put(ctx, entry) + if err != nil { + t.Fatal(err) + } + + configEntry, err = b.Config(ctx, configReq) + if err != nil { + t.Fatal(err) + } + // We won't have token params anymore so nil those out + exp.TokenParams = tokenutil.TokenParams{} + if diff := deep.Equal(exp, configEntry); diff != nil { + t.Fatal(diff) + } + +} diff --git a/builtin/credential/ldap/path_config.go b/builtin/credential/ldap/path_config.go index c7d1b76916e0..55e56cd1ab24 100644 --- a/builtin/credential/ldap/path_config.go +++ b/builtin/credential/ldap/path_config.go @@ -6,11 +6,12 @@ import ( "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/ldaputil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" ) func pathConfig(b *backend) *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: `config`, Fields: ldaputil.ConfigFields(), @@ -21,41 +22,49 @@ func pathConfig(b *backend) *framework.Path { HelpSynopsis: pathConfigHelpSyn, HelpDescription: pathConfigHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Configure", + }, } + + tokenutil.AddTokenFields(p.Fields) + p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any configured for specific users/groups." + return p } /* * Construct ConfigEntry struct using stored configuration. */ -func (b *backend) Config(ctx context.Context, req *logical.Request) (*ldaputil.ConfigEntry, error) { - // Schema for ConfigEntry - fd, err := b.getConfigFieldData() - if err != nil { - return nil, err - } - - // Create a new ConfigEntry, filling in defaults where appropriate - result, err := ldaputil.NewConfigEntry(fd) - if err != nil { - return nil, err - } - +func (b *backend) Config(ctx context.Context, req *logical.Request) (*ldapConfigEntry, error) { storedConfig, err := req.Storage.Get(ctx, "config") if err != nil { return nil, err } if storedConfig == nil { + // Create a new ConfigEntry, filling in defaults where appropriate + fd, err := b.getConfigFieldData() + if err != nil { + return nil, err + } + + result, err := ldaputil.NewConfigEntry(nil, fd) + if err != nil { + return nil, err + } + // No user overrides, return default configuration result.CaseSensitiveNames = new(bool) *result.CaseSensitiveNames = false - return result, nil + return &ldapConfigEntry{ConfigEntry: result}, nil } // Deserialize stored configuration. // Fields not specified in storedConfig will retain their defaults. - if err := storedConfig.DecodeJSON(&result); err != nil { + result := new(ldapConfigEntry) + result.ConfigEntry = new(ldaputil.ConfigEntry) + if err := storedConfig.DecodeJSON(result); err != nil { return nil, err } @@ -89,15 +98,25 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *f return nil, nil } - resp := &logical.Response{ - Data: cfg.PasswordlessMap(), - } - return resp, nil + data := cfg.PasswordlessMap() + cfg.PopulateTokenData(data) + + return &logical.Response{ + Data: data, + }, nil } func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return nil, nil + } + // Build a ConfigEntry struct out of the supplied FieldData - cfg, err := ldaputil.NewConfigEntry(d) + cfg.ConfigEntry, err = ldaputil.NewConfigEntry(cfg.ConfigEntry, d) if err != nil { return logical.ErrorResponse(err.Error()), nil } @@ -109,6 +128,10 @@ func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d * *cfg.CaseSensitiveNames = false } + if err := cfg.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + entry, err := logical.StorageEntryJSON("config", cfg) if err != nil { return nil, err @@ -140,6 +163,11 @@ func (b *backend) getConfigFieldData() (*framework.FieldData, error) { return &fd, nil } +type ldapConfigEntry struct { + tokenutil.TokenParams + *ldaputil.ConfigEntry +} + const pathConfigHelpSyn = ` Configure the LDAP server to connect to, along with its options. ` diff --git a/builtin/credential/ldap/path_groups.go b/builtin/credential/ldap/path_groups.go index c8a33d9d5748..76e64c4505a0 100644 --- a/builtin/credential/ldap/path_groups.go +++ b/builtin/credential/ldap/path_groups.go @@ -19,6 +19,9 @@ func pathGroupsList(b *backend) *framework.Path { HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Navigation: true, + }, } } @@ -45,6 +48,9 @@ func pathGroups(b *backend) *framework.Path { HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Create", + }, } } diff --git a/builtin/credential/ldap/path_login.go b/builtin/credential/ldap/path_login.go index 690a94131fa8..9c4584879305 100644 --- a/builtin/credential/ldap/path_login.go +++ b/builtin/credential/ldap/path_login.go @@ -3,9 +3,9 @@ package ldap import ( "context" "fmt" - "sort" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -51,6 +51,19 @@ func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Requ } func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("auth method not configured"), nil + } + + // Check for a CIDR match. + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, cfg.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + username := d.Get("username").(string) password := d.Get("password").(string) @@ -68,10 +81,7 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew resp = &logical.Response{} } - sort.Strings(policies) - - resp.Auth = &logical.Auth{ - Policies: policies, + auth := &logical.Auth{ Metadata: map[string]string{ "username": username, }, @@ -79,14 +89,20 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew "password": password, }, DisplayName: username, - LeaseOptions: logical.LeaseOptions{ - Renewable: true, - }, Alias: &logical.Alias{ Name: username, }, } + cfg.PopulateTokenAuth(auth) + + // Add in configured policies from mappings + if len(policies) > 0 { + auth.Policies = append(auth.Policies, policies...) + } + + resp.Auth = auth + for _, groupName := range groupNames { if groupName == "" { continue @@ -99,6 +115,14 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew } func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("auth method not configured"), nil + } + username := req.Auth.Metadata["username"] password := req.Auth.InternalData["password"].(string) @@ -106,12 +130,19 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f if len(loginPolicies) == 0 { return resp, err } + finalPolicies := cfg.TokenPolicies + if len(loginPolicies) > 0 { + finalPolicies = append(finalPolicies, loginPolicies...) + } - if !policyutil.EquivalentPolicies(loginPolicies, req.Auth.TokenPolicies) { + if !policyutil.EquivalentPolicies(finalPolicies, req.Auth.TokenPolicies) { return nil, fmt.Errorf("policies have changed, not renewing") } resp.Auth = req.Auth + resp.Auth.Period = cfg.TokenPeriod + resp.Auth.TTL = cfg.TokenTTL + resp.Auth.MaxTTL = cfg.TokenMaxTTL // Remove old aliases resp.Auth.GroupAliases = nil diff --git a/builtin/credential/ldap/path_users.go b/builtin/credential/ldap/path_users.go index 2eb566db325a..60276cf64e5e 100644 --- a/builtin/credential/ldap/path_users.go +++ b/builtin/credential/ldap/path_users.go @@ -20,6 +20,10 @@ func pathUsersList(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Navigation: true, + Action: "Create", + }, } } @@ -27,17 +31,17 @@ func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the LDAP user.", }, - "groups": &framework.FieldSchema{ - Type: framework.TypeString, + "groups": { + Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of additional groups associated with the user.", }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated with the user.", }, @@ -51,6 +55,9 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Create", + }, } } @@ -126,7 +133,7 @@ func (b *backend) pathUserWrite(ctx context.Context, req *logical.Request, d *fr lowercaseGroups = true } - groups := strutil.RemoveDuplicates(strutil.ParseStringSlice(d.Get("groups").(string), ","), lowercaseGroups) + groups := strutil.RemoveDuplicates(d.Get("groups").([]string), lowercaseGroups) policies := policyutil.ParsePolicies(d.Get("policies")) for i, g := range groups { groups[i] = strings.TrimSpace(g) diff --git a/builtin/credential/okta/backend.go b/builtin/credential/okta/backend.go index 0e384ae24b87..36e6eaea65f8 100644 --- a/builtin/credential/okta/backend.go +++ b/builtin/credential/okta/backend.go @@ -8,6 +8,7 @@ import ( "github.com/chrismalek/oktasdk-go/okta" "github.com/hashicorp/vault/helper/mfa" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -65,6 +66,11 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri return nil, logical.ErrorResponse("Okta auth method not configured"), nil, nil } + // Check for a CIDR match. + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, cfg.TokenBoundCIDRs) { + return nil, nil, nil, logical.ErrPermissionDenied + } + client := cfg.OktaClient() type mfaFactor struct { diff --git a/builtin/credential/okta/path_config.go b/builtin/credential/okta/path_config.go index f2b400541547..3cbf1041e2ba 100644 --- a/builtin/credential/okta/path_config.go +++ b/builtin/credential/okta/path_config.go @@ -10,6 +10,7 @@ import ( "github.com/chrismalek/oktasdk-go/okta" cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -19,51 +20,61 @@ const ( ) func pathConfig(b *backend) *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: `config`, Fields: map[string]*framework.FieldSchema{ "organization": &framework.FieldSchema{ Type: framework.TypeString, - Description: "(DEPRECATED) Okta organization to authenticate against. Use org_name instead.", + Description: "Use org_name instead.", Deprecated: true, }, "org_name": &framework.FieldSchema{ Type: framework.TypeString, Description: "Name of the organization to be used in the Okta API.", - DisplayName: "Organization Name", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Organization Name", + }, }, "token": &framework.FieldSchema{ Type: framework.TypeString, - Description: "(DEPRECATED) Okta admin API token. Use api_token instead.", + Description: "Use api_token instead.", Deprecated: true, }, "api_token": &framework.FieldSchema{ Type: framework.TypeString, Description: "Okta API key.", - DisplayName: "API Token", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "API Token", + }, }, "base_url": &framework.FieldSchema{ Type: framework.TypeString, Description: `The base domain to use for the Okta API. When not specified in the configuration, "okta.com" is used.`, - DisplayName: "Base URL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Base URL", + }, }, "production": &framework.FieldSchema{ Type: framework.TypeBool, - Description: `(DEPRECATED) Use base_url.`, + Description: `Use base_url instead.`, Deprecated: true, }, "ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, - Description: `Duration after which authentication will be expired`, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, }, "max_ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, - Description: `Maximum duration after which authentication will be expired`, + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, }, "bypass_okta_mfa": &framework.FieldSchema{ Type: framework.TypeBool, Description: `When set true, requests by Okta for a MFA check will be bypassed. This also disallows certain status checks on the account, such as whether the password is expired.`, - DisplayName: "Bypass Okta MFA", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Bypass Okta MFA", + }, }, }, @@ -77,6 +88,10 @@ func pathConfig(b *backend) *framework.Path { HelpSynopsis: pathConfigHelp, } + + tokenutil.AddTokenFields(p.Fields) + p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any configured for specific users/groups." + return p } // Config returns the configuration for this backend. @@ -96,6 +111,13 @@ func (b *backend) Config(ctx context.Context, s logical.Storage) (*ConfigEntry, } } + if result.TokenTTL == 0 && result.TTL > 0 { + result.TokenTTL = result.TTL + } + if result.TokenMaxTTL == 0 && result.MaxTTL > 0 { + result.TokenMaxTTL = result.MaxTTL + } + return &result, nil } @@ -108,20 +130,28 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *f return nil, nil } - resp := &logical.Response{ - Data: map[string]interface{}{ - "organization": cfg.Org, - "org_name": cfg.Org, - "ttl": cfg.TTL.Seconds(), - "max_ttl": cfg.MaxTTL.Seconds(), - "bypass_okta_mfa": cfg.BypassOktaMFA, - }, + data := map[string]interface{}{ + "organization": cfg.Org, + "org_name": cfg.Org, + "bypass_okta_mfa": cfg.BypassOktaMFA, } + cfg.PopulateTokenData(data) + if cfg.BaseURL != "" { - resp.Data["base_url"] = cfg.BaseURL + data["base_url"] = cfg.BaseURL } if cfg.Production != nil { - resp.Data["production"] = *cfg.Production + data["production"] = *cfg.Production + } + if cfg.TTL > 0 { + data["ttl"] = int64(cfg.TTL.Seconds()) + } + if cfg.MaxTTL > 0 { + data["max_ttl"] = int64(cfg.MaxTTL.Seconds()) + } + + resp := &logical.Response{ + Data: data, } if cfg.BypassOktaMFA { @@ -192,18 +222,19 @@ func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d * cfg.BypassOktaMFA = bypass.(bool) } - ttl, ok := d.GetOk("ttl") - if ok { - cfg.TTL = time.Duration(ttl.(int)) * time.Second - } else if req.Operation == logical.CreateOperation { - cfg.TTL = time.Duration(d.Get("ttl").(int)) * time.Second + if err := cfg.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - maxTTL, ok := d.GetOk("max_ttl") - if ok { - cfg.MaxTTL = time.Duration(maxTTL.(int)) * time.Second - } else if req.Operation == logical.CreateOperation { - cfg.MaxTTL = time.Duration(d.Get("max_ttl").(int)) * time.Second + // Handle upgrade cases + { + if err := tokenutil.UpgradeValue(d, "ttl", "token_ttl", &cfg.TTL, &cfg.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "max_ttl", "token_max_ttl", &cfg.MaxTTL, &cfg.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } } jsonCfg, err := logical.StorageEntryJSON("config", cfg) @@ -251,6 +282,8 @@ func (c *ConfigEntry) OktaClient() *okta.Client { // ConfigEntry for Okta type ConfigEntry struct { + tokenutil.TokenParams + Org string `json:"organization"` Token string `json:"token"` BaseURL string `json:"base_url"` diff --git a/builtin/credential/okta/path_login.go b/builtin/credential/okta/path_login.go index f217e4b9e797..8538a7a3c957 100644 --- a/builtin/credential/okta/path_login.go +++ b/builtin/credential/okta/path_login.go @@ -3,7 +3,6 @@ package okta import ( "context" "fmt" - "sort" "strings" "github.com/go-errors/errors" @@ -70,15 +69,12 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew resp = &logical.Response{} } - sort.Strings(policies) - cfg, err := b.getConfig(ctx, req) if err != nil { return nil, err } - resp.Auth = &logical.Auth{ - Policies: policies, + auth := &logical.Auth{ Metadata: map[string]string{ "username": username, "policies": strings.Join(policies, ","), @@ -87,15 +83,18 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew "password": password, }, DisplayName: username, - LeaseOptions: logical.LeaseOptions{ - TTL: cfg.TTL, - MaxTTL: cfg.MaxTTL, - Renewable: true, - }, Alias: &logical.Alias{ Name: username, }, } + cfg.PopulateTokenAuth(auth) + + // Add in configured policies from mappings + if len(policies) > 0 { + auth.Policies = append(auth.Policies, policies...) + } + + resp.Auth = auth for _, groupName := range groupNames { if groupName == "" { @@ -113,23 +112,28 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f username := req.Auth.Metadata["username"] password := req.Auth.InternalData["password"].(string) + cfg, err := b.getConfig(ctx, req) + if err != nil { + return nil, err + } + loginPolicies, resp, groupNames, err := b.Login(ctx, req, username, password) if len(loginPolicies) == 0 { return resp, err } - if !policyutil.EquivalentPolicies(loginPolicies, req.Auth.TokenPolicies) { - return nil, fmt.Errorf("policies have changed, not renewing") + finalPolicies := cfg.TokenPolicies + if len(loginPolicies) > 0 { + finalPolicies = append(finalPolicies, loginPolicies...) } - - cfg, err := b.getConfig(ctx, req) - if err != nil { - return nil, err + if !policyutil.EquivalentPolicies(finalPolicies, req.Auth.TokenPolicies) { + return nil, fmt.Errorf("policies have changed, not renewing") } resp.Auth = req.Auth - resp.Auth.TTL = cfg.TTL - resp.Auth.MaxTTL = cfg.MaxTTL + resp.Auth.Period = cfg.TokenPeriod + resp.Auth.TTL = cfg.TokenTTL + resp.Auth.MaxTTL = cfg.TokenMaxTTL // Remove old aliases resp.Auth.GroupAliases = nil diff --git a/builtin/credential/radius/path_config.go b/builtin/credential/radius/path_config.go index a00f8f5b92f9..28476e337a2f 100644 --- a/builtin/credential/radius/path_config.go +++ b/builtin/credential/radius/path_config.go @@ -5,23 +5,28 @@ import ( "strings" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" ) func pathConfig(b *backend) *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: "config", Fields: map[string]*framework.FieldSchema{ "host": &framework.FieldSchema{ Type: framework.TypeString, Description: "RADIUS server host", - DisplayName: "Host", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Host", + }, }, - "port": &framework.FieldSchema{ Type: framework.TypeInt, Default: 1812, Description: "RADIUS server port (default: 1812)", + DisplayAttrs: &framework.DisplayAttributes{ + Value: 1812, + }, }, "secret": &framework.FieldSchema{ Type: framework.TypeString, @@ -31,29 +36,42 @@ func pathConfig(b *backend) *framework.Path { Type: framework.TypeString, Default: "", Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregisted user (default: empty)", - DisplayName: "Policies for unregistered users", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Policies for unregistered users", + }, }, "dial_timeout": &framework.FieldSchema{ Type: framework.TypeDurationSecond, Default: 10, Description: "Number of seconds before connect times out (default: 10)", + DisplayAttrs: &framework.DisplayAttributes{ + Value: 10, + }, }, "read_timeout": &framework.FieldSchema{ Type: framework.TypeDurationSecond, Default: 10, Description: "Number of seconds before response times out (default: 10)", + DisplayAttrs: &framework.DisplayAttributes{ + Value: 10, + }, }, "nas_port": &framework.FieldSchema{ Type: framework.TypeInt, Default: 10, Description: "RADIUS NAS port field (default: 10)", - DisplayName: "NAS Port", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "NAS Port", + Value: 10, + }, }, "nas_identifier": &framework.FieldSchema{ Type: framework.TypeString, Default: "", Description: "RADIUS NAS Identifier field (optional)", - DisplayName: "NAS Identifier", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "NAS Identifier", + }, }, }, @@ -68,6 +86,10 @@ func pathConfig(b *backend) *framework.Path { HelpSynopsis: pathConfigHelpSyn, HelpDescription: pathConfigHelpDesc, } + + tokenutil.AddTokenFields(p.Fields) + p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any configured for specific users." + return p } // Establishes dichotomy of request operation between CreateOperation and UpdateOperation. @@ -111,18 +133,20 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *f return nil, nil } - resp := &logical.Response{ - Data: map[string]interface{}{ - "host": cfg.Host, - "port": cfg.Port, - "unregistered_user_policies": cfg.UnregisteredUserPolicies, - "dial_timeout": cfg.DialTimeout, - "read_timeout": cfg.ReadTimeout, - "nas_port": cfg.NasPort, - "nas_identifier": cfg.NasIdentifier, - }, + data := map[string]interface{}{ + "host": cfg.Host, + "port": cfg.Port, + "unregistered_user_policies": cfg.UnregisteredUserPolicies, + "dial_timeout": cfg.DialTimeout, + "read_timeout": cfg.ReadTimeout, + "nas_port": cfg.NasPort, + "nas_identifier": cfg.NasIdentifier, } - return resp, nil + cfg.PopulateTokenData(data) + + return &logical.Response{ + Data: data, + }, nil } func (b *backend) pathConfigCreateUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { @@ -135,6 +159,10 @@ func (b *backend) pathConfigCreateUpdate(ctx context.Context, req *logical.Reque cfg = &ConfigEntry{} } + if err := cfg.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + host, ok := d.GetOk("host") if ok { cfg.Host = strings.ToLower(host.(string)) @@ -219,6 +247,8 @@ func (b *backend) pathConfigCreateUpdate(ctx context.Context, req *logical.Reque } type ConfigEntry struct { + tokenutil.TokenParams + Host string `json:"host" structs:"host" mapstructure:"host"` Port int `json:"port" structs:"port" mapstructure:"port"` Secret string `json:"secret" structs:"secret" mapstructure:"secret"` diff --git a/builtin/credential/radius/path_login.go b/builtin/credential/radius/path_login.go index 4351e555d557..d4462bd0b24b 100644 --- a/builtin/credential/radius/path_login.go +++ b/builtin/credential/radius/path_login.go @@ -12,6 +12,7 @@ import ( . "layeh.com/radius/rfc2865" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -62,6 +63,19 @@ func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Requ } func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("radius backend not configured"), nil + } + + // Check for a CIDR match. + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, cfg.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + username := d.Get("username").(string) password := d.Get("password").(string) @@ -88,8 +102,7 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew } } - resp.Auth = &logical.Auth{ - Policies: policies, + auth := &logical.Auth{ Metadata: map[string]string{ "username": username, "policies": strings.Join(policies, ","), @@ -98,18 +111,28 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew "password": password, }, DisplayName: username, - LeaseOptions: logical.LeaseOptions{ - Renewable: true, - }, Alias: &logical.Alias{ Name: username, }, } + cfg.PopulateTokenAuth(auth) + + if policies != nil { + resp.Auth.Policies = append(resp.Auth.Policies, policies...) + } + + resp.Auth = auth return resp, nil } func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - var err error + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("radius backend not configured"), nil + } username := req.Auth.Metadata["username"] password := req.Auth.InternalData["password"].(string) @@ -121,16 +144,22 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f if err != nil || (resp != nil && resp.IsError()) { return resp, err } + finalPolicies := cfg.TokenPolicies + if loginPolicies != nil { + finalPolicies = append(finalPolicies, loginPolicies...) + } - if !policyutil.EquivalentPolicies(loginPolicies, req.Auth.TokenPolicies) { + if !policyutil.EquivalentPolicies(finalPolicies, req.Auth.TokenPolicies) { return nil, fmt.Errorf("policies have changed, not renewing") } + req.Auth.Period = cfg.TokenPeriod + req.Auth.TTL = cfg.TokenTTL + req.Auth.MaxTTL = cfg.TokenMaxTTL return &logical.Response{Auth: req.Auth}, nil } func (b *backend) RadiusLogin(ctx context.Context, req *logical.Request, username string, password string) ([]string, *logical.Response, error) { - cfg, err := b.Config(ctx, req) if err != nil { return nil, nil, err diff --git a/builtin/credential/userpass/backend_test.go b/builtin/credential/userpass/backend_test.go index 57614f8d6f2a..e02031141139 100644 --- a/builtin/credential/userpass/backend_test.go +++ b/builtin/credential/userpass/backend_test.go @@ -9,8 +9,11 @@ import ( "crypto/tls" + "github.com/go-test/deep" + sockaddr "github.com/hashicorp/go-sockaddr" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) @@ -20,7 +23,7 @@ const ( testSysMaxTTL = time.Hour * 20 ) -func TestBackend_TTL(t *testing.T) { +func TestBackend_CRUD(t *testing.T) { var resp *logical.Response var err error @@ -39,12 +42,22 @@ func TestBackend_TTL(t *testing.T) { t.Fatalf("failed to create backend") } + localhostSockAddr, err := sockaddr.NewSockAddr("127.0.0.1") + if err != nil { + t.Fatal(err) + } + + // Use new token_ forms resp, err = b.HandleRequest(ctx, &logical.Request{ Path: "users/testuser", Operation: logical.CreateOperation, Storage: storage, Data: map[string]interface{}{ - "password": "testpassword", + "password": "testpassword", + "token_ttl": 5, + "token_max_ttl": 10, + "token_policies": []string{"foo"}, + "token_bound_cidrs": []string{"127.0.0.1"}, }, }) if err != nil || (resp != nil && resp.IsError()) { @@ -59,17 +72,32 @@ func TestBackend_TTL(t *testing.T) { if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) } - if resp.Data["ttl"].(float64) != 0 && resp.Data["max_ttl"].(float64) != 0 { - t.Fatalf("bad: ttl and max_ttl are not set correctly") + if resp.Data["token_ttl"].(int64) != 5 && resp.Data["token_max_ttl"].(int64) != 10 { + t.Fatalf("bad: token_ttl and token_max_ttl are not set correctly") + } + if diff := deep.Equal(resp.Data["token_policies"], []string{"foo"}); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["token_bound_cidrs"], []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{localhostSockAddr}}); diff != nil { + t.Fatal(diff) + } + + localhostSockAddr, err = sockaddr.NewSockAddr("127.0.1.1") + if err != nil { + t.Fatal(err) } + // Use the old forms and verify that they zero out the new ones and then + // the new ones read with the expected value resp, err = b.HandleRequest(ctx, &logical.Request{ Path: "users/testuser", Operation: logical.UpdateOperation, Storage: storage, Data: map[string]interface{}{ - "ttl": "5m", - "max_ttl": "10m", + "ttl": "5m", + "max_ttl": "10m", + "policies": []string{"bar"}, + "bound_cidrs": []string{"127.0.1.1"}, }, }) if err != nil || (resp != nil && resp.IsError()) { @@ -84,9 +112,24 @@ func TestBackend_TTL(t *testing.T) { if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) } - if resp.Data["ttl"].(float64) != 300 && resp.Data["max_ttl"].(float64) != 600 { + if resp.Data["ttl"].(int64) != 300 && resp.Data["max_ttl"].(int64) != 600 { t.Fatalf("bad: ttl and max_ttl are not set correctly") } + if resp.Data["token_ttl"].(int64) != 300 && resp.Data["token_max_ttl"].(int64) != 600 { + t.Fatalf("bad: token_ttl and token_max_ttl are not set correctly") + } + if diff := deep.Equal(resp.Data["policies"], []string{"bar"}); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["token_policies"], []string{"bar"}); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["bound_cidrs"], []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{localhostSockAddr}}); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["token_bound_cidrs"], []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{localhostSockAddr}}); diff != nil { + t.Fatal(diff) + } } func TestBackend_basic(t *testing.T) { @@ -318,3 +361,57 @@ func testAccStepReadUser(t *testing.T, name string, policies string) logicaltest }, } } + +func TestBackend_UserUpgrade(t *testing.T) { + s := &logical.InmemStorage{} + + config := logical.TestBackendConfig() + config.StorageView = s + + ctx := context.Background() + + b := Backend() + if b == nil { + t.Fatalf("failed to create backend") + } + if err := b.Setup(ctx, config); err != nil { + t.Fatal(err) + } + + foo := &UserEntry{ + Policies: []string{"foo"}, + TTL: time.Second, + MaxTTL: time.Second, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + } + + entry, err := logical.StorageEntryJSON("user/foo", foo) + if err != nil { + t.Fatal(err) + } + err = s.Put(ctx, entry) + if err != nil { + t.Fatal(err) + } + + userEntry, err := b.user(ctx, s, "foo") + if err != nil { + t.Fatal(err) + } + + exp := &UserEntry{ + Policies: []string{"foo"}, + TTL: time.Second, + MaxTTL: time.Second, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + TokenParams: tokenutil.TokenParams{ + TokenPolicies: []string{"foo"}, + TokenTTL: time.Second, + TokenMaxTTL: time.Second, + TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + }, + } + if diff := deep.Equal(userEntry, exp); diff != nil { + t.Fatal(diff) + } +} diff --git a/builtin/credential/userpass/path_login.go b/builtin/credential/userpass/path_login.go index 44e818f7d4f4..1a81a70f6ccf 100644 --- a/builtin/credential/userpass/path_login.go +++ b/builtin/credential/userpass/path_login.go @@ -64,6 +64,11 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew // Get the user and validate auth user, userError := b.user(ctx, req.Storage, username) + // Check for a CIDR match. + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, user.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + var userPassword []byte var legacyPassword bool // If there was an error or it's nil, we fake a password for the bcrypt @@ -103,28 +108,19 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew return logical.ErrorResponse("invalid username or password"), nil } - // Check for a CIDR match. - if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, user.BoundCIDRs) { - return logical.ErrorResponse("login request originated from invalid CIDR"), nil + auth := &logical.Auth{ + Metadata: map[string]string{ + "username": username, + }, + DisplayName: username, + Alias: &logical.Alias{ + Name: username, + }, } + user.PopulateTokenAuth(auth) return &logical.Response{ - Auth: &logical.Auth{ - Policies: user.Policies, - Metadata: map[string]string{ - "username": username, - }, - DisplayName: username, - LeaseOptions: logical.LeaseOptions{ - TTL: user.TTL, - MaxTTL: user.MaxTTL, - Renewable: true, - }, - Alias: &logical.Alias{ - Name: username, - }, - BoundCIDRs: user.BoundCIDRs, - }, + Auth: auth, }, nil } @@ -139,13 +135,14 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f return nil, nil } - if !policyutil.EquivalentPolicies(user.Policies, req.Auth.TokenPolicies) { + if !policyutil.EquivalentPolicies(user.TokenPolicies, req.Auth.TokenPolicies) { return nil, fmt.Errorf("policies have changed, not renewing") } resp := &logical.Response{Auth: req.Auth} - resp.Auth.TTL = user.TTL - resp.Auth.MaxTTL = user.MaxTTL + resp.Auth.Period = user.TokenPeriod + resp.Auth.TTL = user.TokenTTL + resp.Auth.MaxTTL = user.TokenMaxTTL return resp, nil } diff --git a/builtin/credential/userpass/path_user_policies.go b/builtin/credential/userpass/path_user_policies.go index 956ee097b571..dadc5480cce0 100644 --- a/builtin/credential/userpass/path_user_policies.go +++ b/builtin/credential/userpass/path_user_policies.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -18,6 +19,11 @@ func pathUserPolicies(b *backend) *framework.Path { Description: "Username for this user.", }, "policies": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + "token_policies": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", }, @@ -43,7 +49,22 @@ func (b *backend) pathUserPoliciesUpdate(ctx context.Context, req *logical.Reque return nil, fmt.Errorf("username does not exist") } - userEntry.Policies = policyutil.ParsePolicies(d.Get("policies")) + policiesRaw, ok := d.GetOk("token_policies") + if !ok { + policiesRaw, ok = d.GetOk("policies") + if ok { + userEntry.Policies = policyutil.ParsePolicies(policiesRaw) + userEntry.TokenPolicies = userEntry.Policies + } + } else { + userEntry.TokenPolicies = policyutil.ParsePolicies(policiesRaw) + _, ok = d.GetOk("policies") + if ok { + userEntry.Policies = userEntry.TokenPolicies + } else { + userEntry.Policies = nil + } + } return nil, b.setUser(ctx, req.Storage, username, userEntry) } diff --git a/builtin/credential/userpass/path_users.go b/builtin/credential/userpass/path_users.go index 4e1e5fc24a8c..ae9af65b80c4 100644 --- a/builtin/credential/userpass/path_users.go +++ b/builtin/credential/userpass/path_users.go @@ -8,8 +8,7 @@ import ( sockaddr "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/parseutil" - "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -27,7 +26,7 @@ func pathUsersList(b *backend) *framework.Path { } func pathUsers(b *backend) *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username"), Fields: map[string]*framework.FieldSchema{ "username": &framework.FieldSchema{ @@ -42,23 +41,26 @@ func pathUsers(b *backend) *framework.Path { "policies": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, - Description: "Comma-separated list of policies", + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, }, "ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, - Description: "Duration after which authentication will be expired", + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, }, "max_ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, - Description: "Maximum duration after which authentication will be expired", + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, }, "bound_cidrs": &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of -IP addresses which can perform the login operation.`, + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_bound_cidrs"), + Deprecated: true, }, }, @@ -74,10 +76,13 @@ IP addresses which can perform the login operation.`, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, } + + tokenutil.AddTokenFields(p.Fields) + return p } -func (b *backend) userExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { - userEntry, err := b.user(ctx, req.Storage, data.Get("username").(string)) +func (b *backend) userExistenceCheck(ctx context.Context, req *logical.Request, d *framework.FieldData) (bool, error) { + userEntry, err := b.user(ctx, req.Storage, d.Get("username").(string)) if err != nil { return false, err } @@ -103,6 +108,19 @@ func (b *backend) user(ctx context.Context, s logical.Storage, username string) return nil, err } + if result.TokenTTL == 0 && result.TTL > 0 { + result.TokenTTL = result.TTL + } + if result.TokenMaxTTL == 0 && result.MaxTTL > 0 { + result.TokenMaxTTL = result.MaxTTL + } + if len(result.TokenPolicies) == 0 && len(result.Policies) > 0 { + result.TokenPolicies = result.Policies + } + if len(result.TokenBoundCIDRs) == 0 && len(result.BoundCIDRs) > 0 { + result.TokenBoundCIDRs = result.BoundCIDRs + } + return &result, nil } @@ -141,13 +159,25 @@ func (b *backend) pathUserRead(ctx context.Context, req *logical.Request, d *fra return nil, nil } + data := map[string]interface{}{} + user.PopulateTokenData(data) + + // Add backwards compat data + if user.TTL > 0 { + data["ttl"] = int64(user.TTL.Seconds()) + } + if user.MaxTTL > 0 { + data["max_ttl"] = int64(user.MaxTTL.Seconds()) + } + if len(user.Policies) > 0 { + data["policies"] = data["token_policies"] + } + if len(user.BoundCIDRs) > 0 { + data["bound_cidrs"] = user.BoundCIDRs + } + return &logical.Response{ - Data: map[string]interface{}{ - "policies": user.Policies, - "ttl": user.TTL.Seconds(), - "max_ttl": user.MaxTTL.Seconds(), - "bound_cidrs": user.BoundCIDRs, - }, + Data: data, }, nil } @@ -162,6 +192,10 @@ func (b *backend) userCreateUpdate(ctx context.Context, req *logical.Request, d userEntry = &UserEntry{} } + if err := userEntry.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if _, ok := d.GetOk("password"); ok { userErr, intErr := b.updateUserPassword(req, d, userEntry) if intErr != nil { @@ -172,25 +206,24 @@ func (b *backend) userCreateUpdate(ctx context.Context, req *logical.Request, d } } - if policiesRaw, ok := d.GetOk("policies"); ok { - userEntry.Policies = policyutil.ParsePolicies(policiesRaw) - } + // handle upgrade cases + { + if err := tokenutil.UpgradeValue(d, "policies", "token_policies", &userEntry.Policies, &userEntry.TokenPolicies); err != nil { + return logical.ErrorResponse(err.Error()), nil + } - ttl, ok := d.GetOk("ttl") - if ok { - userEntry.TTL = time.Duration(ttl.(int)) * time.Second - } + if err := tokenutil.UpgradeValue(d, "ttl", "token_ttl", &userEntry.TTL, &userEntry.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } - maxTTL, ok := d.GetOk("max_ttl") - if ok { - userEntry.MaxTTL = time.Duration(maxTTL.(int)) * time.Second - } + if err := tokenutil.UpgradeValue(d, "max_ttl", "token_max_ttl", &userEntry.MaxTTL, &userEntry.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } - boundCIDRs, err := parseutil.ParseAddrs(d.Get("bound_cidrs")) - if err != nil { - return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + if err := tokenutil.UpgradeValue(d, "bound_cidrs", "token_bound_cirs", &userEntry.BoundCIDRs, &userEntry.TokenBoundCIDRs); err != nil { + return logical.ErrorResponse(err.Error()), nil + } } - userEntry.BoundCIDRs = boundCIDRs return nil, b.setUser(ctx, req.Storage, username, userEntry) } @@ -204,6 +237,8 @@ func (b *backend) pathUserWrite(ctx context.Context, req *logical.Request, d *fr } type UserEntry struct { + tokenutil.TokenParams + // Password is deprecated in Vault 0.2 in favor of // PasswordHash, but is retained for backwards compatibility. Password string diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go index 6091d93104fe..8a3f728f9bc2 100644 --- a/builtin/logical/aws/path_roles.go +++ b/builtin/logical/aws/path_roles.go @@ -41,7 +41,9 @@ func pathRoles(b *backend) *framework.Path { "name": &framework.FieldSchema{ Type: framework.TypeString, Description: "Name of the policy", - DisplayName: "Policy Name", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Policy Name", + }, }, "credential_type": &framework.FieldSchema{ @@ -52,13 +54,17 @@ func pathRoles(b *backend) *framework.Path { "role_arns": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: "ARNs of AWS roles allowed to be assumed. Only valid when credential_type is " + assumedRoleCred, - DisplayName: "Role ARNs", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Role ARNs", + }, }, "policy_arns": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: "ARNs of AWS policies to attach to IAM users. Only valid when credential_type is " + iamUserCred, - DisplayName: "Policy ARNs", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Policy ARNs", + }, }, "policy_document": &framework.FieldSchema{ @@ -73,33 +79,39 @@ GetFederationToken API call, acting as a filter on permissions available.`, "default_sts_ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, Description: fmt.Sprintf("Default TTL for %s and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred), - DisplayName: "Default TTL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Default STS TTL", + }, }, "max_sts_ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, Description: fmt.Sprintf("Max allowed TTL for %s and %s credential types", assumedRoleCred, federationTokenCred), - DisplayName: "Max TTL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Max STS TTL", + }, }, "arn": &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Deprecated; use role_arns or policy_arns instead. ARN Reference to a managed policy -or IAM role to assume`, - Deprecated: true, + Type: framework.TypeString, + Description: `Use role_arns or policy_arns instead.`, + Deprecated: true, }, "policy": &framework.FieldSchema{ Type: framework.TypeString, - Description: "Deprecated; use policy_document instead. IAM policy document", + Description: "Use policy_document instead.", Deprecated: true, }, "user_path": &framework.FieldSchema{ Type: framework.TypeString, Description: "Path for IAM User. Only valid when credential_type is " + iamUserCred, - DisplayName: "User Path", - Default: "/", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User Path", + Value: "/", + }, + Default: "/", }, }, diff --git a/builtin/logical/consul/path_roles.go b/builtin/logical/consul/path_roles.go index 05e5b07727c1..55c713c4839d 100644 --- a/builtin/logical/consul/path_roles.go +++ b/builtin/logical/consul/path_roles.go @@ -68,7 +68,8 @@ Defaults to 'client'.`, "lease": &framework.FieldSchema{ Type: framework.TypeDurationSecond, - Description: "DEPRECATED: Use ttl.", + Description: "Use ttl instead.", + Deprecated: true, }, }, diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go index b28f8fe6f630..804a98d35bec 100644 --- a/builtin/logical/database/backend.go +++ b/builtin/logical/database/backend.go @@ -14,11 +14,17 @@ import ( "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/locksutil" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" ) -const databaseConfigPath = "database/config/" +const ( + databaseConfigPath = "database/config/" + databaseRolePath = "role/" + databaseStaticRolePath = "static-role/" +) type dbPluginInstance struct { sync.RWMutex @@ -46,6 +52,15 @@ func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, if err := b.Setup(ctx, conf); err != nil { return nil, err } + + b.credRotationQueue = queue.New() + // Create a context with a cancel method for processing any WAL entries and + // populating the queue + initCtx := context.Background() + ictx, cancel := context.WithCancel(initCtx) + b.cancelQueue = cancel + // Load queue and kickoff new periodic ticker + go b.initQueue(ictx, conf) return b, nil } @@ -55,31 +70,39 @@ func Backend(conf *logical.BackendConfig) *databaseBackend { Help: strings.TrimSpace(backendHelp), PathsSpecial: &logical.Paths{ + LocalStorage: []string{ + framework.WALPrefix, + }, SealWrapStorage: []string{ "config/*", + "static-role/*", }, }, - - Paths: []*framework.Path{ - pathListPluginConnection(&b), - pathConfigurePluginConnection(&b), + Paths: framework.PathAppend( + []*framework.Path{ + pathListPluginConnection(&b), + pathConfigurePluginConnection(&b), + pathResetConnection(&b), + }, pathListRoles(&b), pathRoles(&b), pathCredsCreate(&b), - pathResetConnection(&b), pathRotateCredentials(&b), - }, + ), Secrets: []*framework.Secret{ secretCreds(&b), }, - Clean: b.closeAllDBs, + Clean: b.clean, Invalidate: b.invalidate, BackendType: logical.TypeLogical, } b.logger = conf.Logger b.connections = make(map[string]*dbPluginInstance) + + b.roleLocks = locksutil.CreateLocks() + return &b } @@ -89,6 +112,20 @@ type databaseBackend struct { *framework.Backend sync.RWMutex + // CredRotationQueue is an in-memory priority queue used to track Static Roles + // that require periodic rotation. Backends will have a PriorityQueue + // initialized on setup, but only backends that are mounted by a primary + // server or mounted as a local mount will perform the rotations. + // + // cancelQueue is used to remove the priority queue and terminate the + // background ticker. + credRotationQueue *queue.PriorityQueue + cancelQueue context.CancelFunc + + // roleLocks is used to lock modifications to roles in the queue, to ensure + // concurrent requests are not modifying the same role and possibly causing + // issues with the priority queue. + roleLocks []*locksutil.LockEntry } func (b *databaseBackend) DatabaseConfig(ctx context.Context, s logical.Storage, name string) (*DatabaseConfig, error) { @@ -124,7 +161,15 @@ type upgradeCheck struct { } func (b *databaseBackend) Role(ctx context.Context, s logical.Storage, roleName string) (*roleEntry, error) { - entry, err := s.Get(ctx, "role/"+roleName) + return b.roleAtPath(ctx, s, roleName, databaseRolePath) +} + +func (b *databaseBackend) StaticRole(ctx context.Context, s logical.Storage, roleName string) (*roleEntry, error) { + return b.roleAtPath(ctx, s, roleName, databaseStaticRolePath) +} + +func (b *databaseBackend) roleAtPath(ctx context.Context, s logical.Storage, roleName string, pathPrefix string) (*roleEntry, error) { + entry, err := s.Get(ctx, pathPrefix+roleName) if err != nil { return nil, err } @@ -228,6 +273,17 @@ func (b *databaseBackend) GetConnection(ctx context.Context, s logical.Storage, return db, nil } +// invalidateQueue cancels any background queue loading and destroys the queue. +func (b *databaseBackend) invalidateQueue() { + b.Lock() + defer b.Unlock() + + if b.cancelQueue != nil { + b.cancelQueue() + } + b.credRotationQueue = nil +} + // ClearConnection closes the database connection and // removes it from the b.connections map. func (b *databaseBackend) ClearConnection(name string) error { @@ -267,8 +323,13 @@ func (b *databaseBackend) CloseIfShutdown(db *dbPluginInstance, err error) { } } -// closeAllDBs closes all connections from all database types -func (b *databaseBackend) closeAllDBs(ctx context.Context) { +// clean closes all connections from all database types +// and cancels any rotation queue loading operation. +func (b *databaseBackend) clean(ctx context.Context) { + // invalidateQueue acquires it's own lock on the backend, removes queue, and + // terminates the background ticker + b.invalidateQueue() + b.Lock() defer b.Unlock() diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go index 07c93382b9e0..e7f186d335ae 100644 --- a/builtin/logical/database/backend_test.go +++ b/builtin/logical/database/backend_test.go @@ -56,7 +56,7 @@ func preparePostgresTestContainer(t *testing.T, s logical.Storage, b logical.Bac retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp")) - // exponential backoff-retry + // Exponential backoff-retry if err = pool.Retry(func() error { // This will cause a validation to run resp, err := b.HandleRequest(namespace.RootContext(nil), &logical.Request{ @@ -101,12 +101,12 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) sys := vault.TestDynamicSystemView(cores[0].Core) - vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_Postgres", []string{}, "") return cluster, sys } -func TestBackend_PluginMain(t *testing.T) { +func TestBackend_PluginMain_Postgres(t *testing.T) { if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { return } @@ -850,17 +850,6 @@ func TestBackend_roleCrud(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - exists, err := b.pathRoleExistenceCheck()(context.Background(), req, &framework.FieldData{ - Raw: data, - Schema: pathRoles(b).Fields, - }) - if err != nil { - t.Fatal(err) - } - if exists { - t.Fatal("expected not exists") - } - // Read the role data = map[string]interface{}{} req = &logical.Request{ @@ -920,17 +909,6 @@ func TestBackend_roleCrud(t *testing.T) { t.Fatalf("err:%v resp:%#v\n", err, resp) } - exists, err := b.pathRoleExistenceCheck()(context.Background(), req, &framework.FieldData{ - Raw: data, - Schema: pathRoles(b).Fields, - }) - if err != nil { - t.Fatal(err) - } - if !exists { - t.Fatal("expected exists") - } - // Read the role data = map[string]interface{}{} req = &logical.Request{ @@ -994,17 +972,6 @@ func TestBackend_roleCrud(t *testing.T) { t.Fatalf("err:%v resp:%#v\n", err, resp) } - exists, err := b.pathRoleExistenceCheck()(context.Background(), req, &framework.FieldData{ - Raw: data, - Schema: pathRoles(b).Fields, - }) - if err != nil { - t.Fatal(err) - } - if !exists { - t.Fatal("expected exists") - } - // Read the role data = map[string]interface{}{} req = &logical.Request{ diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go index e076cc4811c6..2f0667b3dd93 100644 --- a/builtin/logical/database/dbplugin/plugin_test.go +++ b/builtin/logical/database/dbplugin/plugin_test.go @@ -22,6 +22,8 @@ type mockPlugin struct { users map[string][]string } +var _ dbplugin.Database = &mockPlugin{} + func (m *mockPlugin) Type() (string, error) { return "mock", nil } func (m *mockPlugin) CreateUser(_ context.Context, statements dbplugin.Statements, usernameConf dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { err = errors.New("err") @@ -86,6 +88,14 @@ func (m *mockPlugin) Close() error { return nil } +func (m *mockPlugin) GenerateCredentials(ctx context.Context) (password string, err error) { + return password, err +} + +func (m *mockPlugin) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticConfig dbplugin.StaticUserConfig) (username string, password string, err error) { + return username, password, err +} + func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, diff --git a/builtin/logical/database/path_creds_create.go b/builtin/logical/database/path_creds_create.go index 2eaf79e09321..e66d6d13e4a0 100644 --- a/builtin/logical/database/path_creds_create.go +++ b/builtin/logical/database/path_creds_create.go @@ -11,22 +11,40 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -func pathCredsCreate(b *databaseBackend) *framework.Path { - return &framework.Path{ - Pattern: "creds/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ - Type: framework.TypeString, - Description: "Name of the role.", +func pathCredsCreate(b *databaseBackend) []*framework.Path { + return []*framework.Path{ + &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathCredsCreateRead(), }, - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathCredsCreateRead(), + HelpSynopsis: pathCredsCreateReadHelpSyn, + HelpDescription: pathCredsCreateReadHelpDesc, }, + &framework.Path{ + Pattern: "static-creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the static role.", + }, + }, - HelpSynopsis: pathCredsCreateReadHelpSyn, - HelpDescription: pathCredsCreateReadHelpDesc, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathStaticCredsRead(), + }, + + HelpSynopsis: pathStaticCredsReadHelpSyn, + HelpDescription: pathStaticCredsReadHelpDesc, + }, } } @@ -99,6 +117,41 @@ func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { } } +func (b *databaseBackend) pathStaticCredsRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + role, err := b.StaticRole(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse("unknown role: %s", name), nil + } + + dbConfig, err := b.DatabaseConfig(ctx, req.Storage, role.DBName) + if err != nil { + return nil, err + } + + // If role name isn't in the database's allowed roles, send back a + // permission denied. + if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContainsGlob(dbConfig.AllowedRoles, name) { + return nil, fmt.Errorf("%q is not an allowed role", name) + } + + return &logical.Response{ + Data: map[string]interface{}{ + "username": role.StaticAccount.Username, + "password": role.StaticAccount.Password, + "ttl": role.StaticAccount.PasswordTTL().Seconds(), + "rotation_period": role.StaticAccount.RotationPeriod.Seconds(), + "last_vault_rotation": role.StaticAccount.LastVaultRotation, + }, + }, nil + } +} + const pathCredsCreateReadHelpSyn = ` Request database credentials for a certain role. ` @@ -108,3 +161,14 @@ This path reads database credentials for a certain role. The database credentials will be generated on demand and will be automatically revoked when the lease is up. ` + +const pathStaticCredsReadHelpSyn = ` +Request database credentials for a certain static role. These credentials are +rotated periodically. +` + +const pathStaticCredsReadHelpDesc = ` +This path reads database credentials for a certain static role. The database +credentials are rotated periodically according to their configuration, and will +return the same password until they are rotated. +` diff --git a/builtin/logical/database/path_roles.go b/builtin/logical/database/path_roles.go index 1d518111550b..c63eb980adaa 100644 --- a/builtin/logical/database/path_roles.go +++ b/builtin/logical/database/path_roles.go @@ -2,241 +2,502 @@ package database import ( "context" + "fmt" + "strings" "time" "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/locksutil" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" ) -func pathListRoles(b *databaseBackend) *framework.Path { - return &framework.Path{ - Pattern: "roles/?$", +func pathListRoles(b *databaseBackend) []*framework.Path { + return []*framework.Path{ + &framework.Path{ + Pattern: "roles/?$", - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ListOperation: b.pathRoleList(), + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, }, + &framework.Path{ + Pattern: "static-roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, + HelpSynopsis: pathStaticRoleHelpSyn, + HelpDescription: pathStaticRoleHelpDesc, + }, } } -func pathRoles(b *databaseBackend) *framework.Path { - return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role.", +func pathRoles(b *databaseBackend) []*framework.Path { + return []*framework.Path{ + &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: fieldsForType(databaseRolePath), + ExistenceCheck: b.pathRoleExistenceCheck, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead, + logical.CreateOperation: b.pathRoleCreateUpdate, + logical.UpdateOperation: b.pathRoleCreateUpdate, + logical.DeleteOperation: b.pathRoleDelete, }, - "db_name": { - Type: framework.TypeString, - Description: "Name of the database this role acts on.", - }, - "creation_statements": { - Type: framework.TypeStringSlice, - Description: `Specifies the database statements executed to - create and configure a user. See the plugin's API page for more - information on support and formatting for this parameter.`, - }, - "revocation_statements": { - Type: framework.TypeStringSlice, - Description: `Specifies the database statements to be executed - to revoke a user. See the plugin's API page for more information - on support and formatting for this parameter.`, - }, - "renew_statements": { - Type: framework.TypeStringSlice, - Description: `Specifies the database statements to be executed - to renew a user. Not every plugin type will support this - functionality. See the plugin's API page for more information on - support and formatting for this parameter. `, - }, - "rollback_statements": { - Type: framework.TypeStringSlice, - Description: `Specifies the database statements to be executed - rollback a create operation in the event of an error. Not every - plugin type will support this functionality. See the plugin's - API page for more information on support and formatting for this - parameter.`, - }, + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + }, - "default_ttl": { - Type: framework.TypeDurationSecond, - Description: "Default ttl for role.", + &framework.Path{ + Pattern: "static-roles/" + framework.GenericNameRegex("name"), + Fields: fieldsForType(databaseStaticRolePath), + ExistenceCheck: b.pathStaticRoleExistenceCheck, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathStaticRoleRead, + logical.CreateOperation: b.pathStaticRoleCreateUpdate, + logical.UpdateOperation: b.pathStaticRoleCreateUpdate, + logical.DeleteOperation: b.pathStaticRoleDelete, }, - "max_ttl": { - Type: framework.TypeDurationSecond, - Description: "Maximum time a credential is valid for", - }, + HelpSynopsis: pathStaticRoleHelpSyn, + HelpDescription: pathStaticRoleHelpDesc, }, + } +} - ExistenceCheck: b.pathRoleExistenceCheck(), - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathRoleRead(), - logical.CreateOperation: b.pathRoleCreateUpdate(), - logical.UpdateOperation: b.pathRoleCreateUpdate(), - logical.DeleteOperation: b.pathRoleDelete(), +// fieldsForType returns a map of string/FieldSchema items for the given role +// type. The purpose is to keep the shared fields between dynamic and static +// roles consistent, and allow for each type to override or provide their own +// specific fields +func fieldsForType(roleType string) map[string]*framework.FieldSchema { + fields := map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", }, + "db_name": { + Type: framework.TypeString, + Description: "Name of the database this role acts on.", + }, + } - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, + // Get the fields that are specific to the type of role, and add them to the + // common fields + var typeFields map[string]*framework.FieldSchema + switch roleType { + case databaseStaticRolePath: + typeFields = staticFields() + default: + typeFields = dynamicFields() } + + for k, v := range typeFields { + fields[k] = v + } + + return fields } -func (b *databaseBackend) pathRoleExistenceCheck() framework.ExistenceFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { - role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) - if err != nil { - return false, err - } +// dynamicFields returns a map of key and field schema items that are specific +// only to dynamic roles +func dynamicFields() map[string]*framework.FieldSchema { + fields := map[string]*framework.FieldSchema{ + "default_ttl": { + Type: framework.TypeDurationSecond, + Description: "Default ttl for role.", + }, + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: "Maximum time a credential is valid for", + }, + "creation_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements executed to + create and configure a user. See the plugin's API page for more + information on support and formatting for this parameter.`, + }, + "revocation_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements to be executed + to revoke a user. See the plugin's API page for more information + on support and formatting for this parameter.`, + }, + "renew_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements to be executed + to renew a user. Not every plugin type will support this + functionality. See the plugin's API page for more information on + support and formatting for this parameter. `, + }, + "rollback_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements to be executed + rollback a create operation in the event of an error. Not every plugin + type will support this functionality. See the plugin's API page for + more information on support and formatting for this parameter.`, + }, + } + return fields +} - return role != nil, nil +// staticFields returns a map of key and field schema items that are specific +// only to static roles +func staticFields() map[string]*framework.FieldSchema { + fields := map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: `Name of the static user account for Vault to manage. + Requires "rotation_period" to be specified`, + }, + "rotation_period": { + Type: framework.TypeDurationSecond, + Description: `Period for automatic + credential rotation of the given username. Not valid unless used with + "username".`, + }, + "rotation_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements to be executed to + rotate the accounts credentials. Not every plugin type will support + this functionality. See the plugin's API page for more information on + support and formatting for this parameter.`, + }, } + return fields } -func (b *databaseBackend) pathRoleDelete() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) - if err != nil { - return nil, err +func (b *databaseBackend) pathRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return false, err + } + return role != nil, nil +} + +func (b *databaseBackend) pathStaticRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + role, err := b.StaticRole(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return false, err + } + return role != nil, nil +} + +func (b *databaseBackend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, databaseRolePath+data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *databaseBackend) pathStaticRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Grab the exclusive lock + lock := locksutil.LockForKey(b.roleLocks, name) + lock.Lock() + defer lock.Unlock() + + // Remove the item from the queue + _, _ = b.popFromRotationQueueByKey(name) + + err := req.Storage.Delete(ctx, databaseStaticRolePath+name) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *databaseBackend) pathStaticRoleRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + role, err := b.StaticRole(ctx, req.Storage, d.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + data := map[string]interface{}{ + "db_name": role.DBName, + "rotation_statements": role.Statements.Rotation, + } + + // guard against nil StaticAccount; shouldn't happen but we'll be safe + if role.StaticAccount != nil { + data["username"] = role.StaticAccount.Username + data["rotation_statements"] = role.Statements.Rotation + data["rotation_period"] = role.StaticAccount.RotationPeriod.Seconds() + if !role.StaticAccount.LastVaultRotation.IsZero() { + data["last_vault_rotation"] = role.StaticAccount.LastVaultRotation } + } + + if len(role.Statements.Rotation) == 0 { + data["rotation_statements"] = []string{} + } + return &logical.Response{ + Data: data, + }, nil +} + +func (b *databaseBackend) pathRoleRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + role, err := b.Role(ctx, req.Storage, d.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { return nil, nil } + + data := map[string]interface{}{ + "db_name": role.DBName, + "creation_statements": role.Statements.Creation, + "revocation_statements": role.Statements.Revocation, + "rollback_statements": role.Statements.Rollback, + "renew_statements": role.Statements.Renewal, + "default_ttl": role.DefaultTTL.Seconds(), + "max_ttl": role.MaxTTL.Seconds(), + } + if len(role.Statements.Creation) == 0 { + data["creation_statements"] = []string{} + } + if len(role.Statements.Revocation) == 0 { + data["revocation_statements"] = []string{} + } + if len(role.Statements.Rollback) == 0 { + data["rollback_statements"] = []string{} + } + if len(role.Statements.Renewal) == 0 { + data["renew_statements"] = []string{} + } + + return &logical.Response{ + Data: data, + }, nil } -func (b *databaseBackend) pathRoleRead() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - role, err := b.Role(ctx, req.Storage, d.Get("name").(string)) - if err != nil { - return nil, err +func (b *databaseBackend) pathRoleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := databaseRolePath + if strings.HasPrefix(req.Path, "static-roles") { + path = databaseStaticRolePath + } + entries, err := req.Storage.List(ctx, path) + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *databaseBackend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse("empty role name attribute given"), nil + } + + exists, err := b.pathStaticRoleExistenceCheck(ctx, req, data) + if err != nil { + return nil, err + } + if exists { + return logical.ErrorResponse("Role and Static Role names must be unique"), nil + } + + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + role = &roleEntry{} + } + + createOperation := (req.Operation == logical.CreateOperation) + + // DB Attributes + { + if dbNameRaw, ok := data.GetOk("db_name"); ok { + role.DBName = dbNameRaw.(string) + } else if createOperation { + role.DBName = data.Get("db_name").(string) } - if role == nil { - return nil, nil + if role.DBName == "" { + return logical.ErrorResponse("database name is required"), nil } + } - data := map[string]interface{}{ - "db_name": role.DBName, - "creation_statements": role.Statements.Creation, - "revocation_statements": role.Statements.Revocation, - "rollback_statements": role.Statements.Rollback, - "renew_statements": role.Statements.Renewal, - "default_ttl": role.DefaultTTL.Seconds(), - "max_ttl": role.MaxTTL.Seconds(), - } - if len(role.Statements.Creation) == 0 { - data["creation_statements"] = []string{} + // Statements + { + if creationStmtsRaw, ok := data.GetOk("creation_statements"); ok { + role.Statements.Creation = creationStmtsRaw.([]string) + } else if createOperation { + role.Statements.Creation = data.Get("creation_statements").([]string) } - if len(role.Statements.Revocation) == 0 { - data["revocation_statements"] = []string{} + + if revocationStmtsRaw, ok := data.GetOk("revocation_statements"); ok { + role.Statements.Revocation = revocationStmtsRaw.([]string) + } else if createOperation { + role.Statements.Revocation = data.Get("revocation_statements").([]string) } - if len(role.Statements.Rollback) == 0 { - data["rollback_statements"] = []string{} + + if rollbackStmtsRaw, ok := data.GetOk("rollback_statements"); ok { + role.Statements.Rollback = rollbackStmtsRaw.([]string) + } else if createOperation { + role.Statements.Rollback = data.Get("rollback_statements").([]string) } - if len(role.Statements.Renewal) == 0 { - data["renew_statements"] = []string{} + + if renewStmtsRaw, ok := data.GetOk("renew_statements"); ok { + role.Statements.Renewal = renewStmtsRaw.([]string) + } else if createOperation { + role.Statements.Renewal = data.Get("renew_statements").([]string) } - return &logical.Response{ - Data: data, - }, nil + // Do not persist deprecated statements that are populated on role read + role.Statements.CreationStatements = "" + role.Statements.RevocationStatements = "" + role.Statements.RenewStatements = "" + role.Statements.RollbackStatements = "" } -} -func (b *databaseBackend) pathRoleList() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - entries, err := req.Storage.List(ctx, "role/") - if err != nil { - return nil, err + role.Statements.Revocation = strutil.RemoveEmpty(role.Statements.Revocation) + + // TTLs + { + if defaultTTLRaw, ok := data.GetOk("default_ttl"); ok { + role.DefaultTTL = time.Duration(defaultTTLRaw.(int)) * time.Second + } else if createOperation { + role.DefaultTTL = time.Duration(data.Get("default_ttl").(int)) * time.Second } + if maxTTLRaw, ok := data.GetOk("max_ttl"); ok { + role.MaxTTL = time.Duration(maxTTLRaw.(int)) * time.Second + } else if createOperation { + role.MaxTTL = time.Duration(data.Get("max_ttl").(int)) * time.Second + } + } - return logical.ListResponse(entries), nil + // Store it + entry, err := logical.StorageEntryJSON(databaseRolePath+name, role) + if err != nil { + return nil, err } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil } -func (b *databaseBackend) pathRoleCreateUpdate() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - if name == "" { - return logical.ErrorResponse("empty role name attribute given"), nil - } +func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse("empty role name attribute given"), nil + } - role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) - if err != nil { - return nil, err - } - if role == nil { - role = &roleEntry{} - } + // Grab the exclusive lock as well potentially pop and re-push the queue item + // for this role + lock := locksutil.LockForKey(b.roleLocks, name) + lock.Lock() + defer lock.Unlock() - // DB Attributes - { - if dbNameRaw, ok := data.GetOk("db_name"); ok { - role.DBName = dbNameRaw.(string) - } else if req.Operation == logical.CreateOperation { - role.DBName = data.Get("db_name").(string) - } - if role.DBName == "" { - return logical.ErrorResponse("empty database name attribute"), nil - } - } + exists, err := b.pathRoleExistenceCheck(ctx, req, data) + if err != nil { + return nil, err + } + if exists { + return logical.ErrorResponse("Role and Static Role names must be unique"), nil + } + + role, err := b.StaticRole(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } - // TTLs - { - if defaultTTLRaw, ok := data.GetOk("default_ttl"); ok { - role.DefaultTTL = time.Duration(defaultTTLRaw.(int)) * time.Second - } else if req.Operation == logical.CreateOperation { - role.DefaultTTL = time.Duration(data.Get("default_ttl").(int)) * time.Second - } - if maxTTLRaw, ok := data.GetOk("max_ttl"); ok { - role.MaxTTL = time.Duration(maxTTLRaw.(int)) * time.Second - } else if req.Operation == logical.CreateOperation { - role.MaxTTL = time.Duration(data.Get("max_ttl").(int)) * time.Second - } + // createRole is a boolean to indicate if this is a new role creation. This is + // can be used later by database plugins that distinguish between creating and + // updating roles, and may use seperate statements depending on the context. + createRole := (req.Operation == logical.CreateOperation) + if role == nil { + role = &roleEntry{ + StaticAccount: &staticAccount{}, } + createRole = true + } + + // DB Attributes + if dbNameRaw, ok := data.GetOk("db_name"); ok { + role.DBName = dbNameRaw.(string) + } else if createRole { + role.DBName = data.Get("db_name").(string) + } - // Statements - { - if creationStmtsRaw, ok := data.GetOk("creation_statements"); ok { - role.Statements.Creation = creationStmtsRaw.([]string) - } else if req.Operation == logical.CreateOperation { - role.Statements.Creation = data.Get("creation_statements").([]string) - } - - if revocationStmtsRaw, ok := data.GetOk("revocation_statements"); ok { - role.Statements.Revocation = revocationStmtsRaw.([]string) - } else if req.Operation == logical.CreateOperation { - role.Statements.Revocation = data.Get("revocation_statements").([]string) - } - - if rollbackStmtsRaw, ok := data.GetOk("rollback_statements"); ok { - role.Statements.Rollback = rollbackStmtsRaw.([]string) - } else if req.Operation == logical.CreateOperation { - role.Statements.Rollback = data.Get("rollback_statements").([]string) - } - - if renewStmtsRaw, ok := data.GetOk("renew_statements"); ok { - role.Statements.Renewal = renewStmtsRaw.([]string) - } else if req.Operation == logical.CreateOperation { - role.Statements.Renewal = data.Get("renew_statements").([]string) - } - - // Do not persist deprecated statements that are populated on role read - role.Statements.CreationStatements = "" - role.Statements.RevocationStatements = "" - role.Statements.RenewStatements = "" - role.Statements.RollbackStatements = "" + if role.DBName == "" { + return logical.ErrorResponse("database name is a required field"), nil + } + + username := data.Get("username").(string) + if username == "" && createRole { + return logical.ErrorResponse("username is a required field to create a static account"), nil + } + + if role.StaticAccount.Username != "" && role.StaticAccount.Username != username { + return logical.ErrorResponse("cannot update static account username"), nil + } + role.StaticAccount.Username = username + + // If it's a Create operation, both username and rotation_period must be included + rotationPeriodSecondsRaw, ok := data.GetOk("rotation_period") + if !ok && createRole { + return logical.ErrorResponse("rotation_period is required to create static accounts"), nil + } + if ok { + rotationPeriodSeconds := rotationPeriodSecondsRaw.(int) + if rotationPeriodSeconds < queueTickSeconds { + // If rotation frequency is specified, and this is an update, the value + // must be at least that of the constant queueTickSeconds (5 seconds at + // time of writing), otherwise we wont be able to rotate in time + return logical.ErrorResponse(fmt.Sprintf("rotation_period must be %d seconds or more", queueTickSeconds)), nil } + role.StaticAccount.RotationPeriod = time.Duration(rotationPeriodSeconds) * time.Second + } - role.Statements.Revocation = strutil.RemoveEmpty(role.Statements.Revocation) + if rotationStmtsRaw, ok := data.GetOk("rotation_statements"); ok { + role.Statements.Rotation = rotationStmtsRaw.([]string) + } else if req.Operation == logical.CreateOperation { + role.Statements.Rotation = data.Get("rotation_statements").([]string) + } - // Store it - entry, err := logical.StorageEntryJSON("role/"+name, role) + // lvr represents the roles' LastVaultRotation + lvr := role.StaticAccount.LastVaultRotation + + // Only call setStaticAccount if we're creating the role for the + // first time + switch req.Operation { + case logical.CreateOperation: + // setStaticAccount calls Storage.Put and saves the role to storage + resp, err := b.setStaticAccount(ctx, req.Storage, &setStaticAccountInput{ + RoleName: name, + Role: role, + CreateUser: createRole, + }) + if err != nil { + return nil, err + } + // guard against RotationTime not being set or zero-value + lvr = resp.RotationTime + case logical.UpdateOperation: + // store updated Role + entry, err := logical.StorageEntryJSON(databaseStaticRolePath+name, role) if err != nil { return nil, err } @@ -244,21 +505,83 @@ func (b *databaseBackend) pathRoleCreateUpdate() framework.OperationFunc { return nil, err } - return nil, nil + // In case this is an update, remove any previous version of the item from + // the queue + b.popFromRotationQueueByKey(name) + } + + // Add their rotation to the queue + if err := b.pushItem(&queue.Item{ + Key: name, + Priority: lvr.Add(role.StaticAccount.RotationPeriod).Unix(), + }); err != nil { + return nil, err } + + return nil, nil } type roleEntry struct { - DBName string `json:"db_name"` - Statements dbplugin.Statements `json:"statements"` - DefaultTTL time.Duration `json:"default_ttl"` - MaxTTL time.Duration `json:"max_ttl"` + DBName string `json:"db_name"` + Statements dbplugin.Statements `json:"statements"` + DefaultTTL time.Duration `json:"default_ttl"` + MaxTTL time.Duration `json:"max_ttl"` + StaticAccount *staticAccount `json:"static_account" mapstructure:"static_account"` +} + +type staticAccount struct { + // Username to create or assume management for static accounts + Username string `json:"username"` + + // Password is the current password for static accounts. As an input, this is + // used/required when trying to assume management of an existing static + // account. Return this on credential request if it exists. + Password string `json:"password"` + + // LastVaultRotation represents the last time Vault rotated the password + LastVaultRotation time.Time `json:"last_vault_rotation"` + + // RotationPeriod is number in seconds between each rotation, effectively a + // "time to live". This value is compared to the LastVaultRotation to + // determine if a password needs to be rotated + RotationPeriod time.Duration `json:"rotation_period"` + + // RevokeUser is a boolean flag to indicate if Vault should revoke the + // database user when the role is deleted + RevokeUserOnDelete bool `json:"revoke_user_on_delete"` +} + +// NextRotationTime calculates the next rotation by adding the Rotation Period +// to the last known vault rotation +func (s *staticAccount) NextRotationTime() time.Time { + return s.LastVaultRotation.Add(s.RotationPeriod) +} + +// PasswordTTL calculates the approximate time remaining until the password is +// no longer valid. This is approximate because the periodic rotation is only +// checked approximately every 5 seconds, and each rotation can take a small +// amount of time to process. This can result in a negative TTL time while the +// rotation function processes the Static Role and performs the rotation. If the +// TTL is negative, zero is returned. Users should not trust passwords with a +// Zero TTL, as they are likely in the process of being rotated and will quickly +// be invalidated. +func (s *staticAccount) PasswordTTL() time.Duration { + next := s.NextRotationTime() + ttl := next.Sub(time.Now()).Round(time.Second) + if ttl < 0 { + ttl = time.Duration(0) + } + return ttl } const pathRoleHelpSyn = ` Manage the roles that can be created with this backend. ` +const pathStaticRoleHelpSyn = ` +Manage the static roles that can be created with this backend. +` + const pathRoleHelpDesc = ` This path lets you manage the roles that can be created with this backend. @@ -299,3 +622,43 @@ user. The "rollback_statements' parameter customizes the statement string used to rollback a change if needed. ` + +const pathStaticRoleHelpDesc = ` +This path lets you manage the static roles that can be created with this +backend. Static Roles are associated with a single database user, and manage the +password based on a rotation period, automatically rotating the password. + +The "db_name" parameter is required and configures the name of the database +connection to use. + +The "creation_statements" parameter customizes the string used to create the +credentials. This can be a sequence of SQL queries, or other statement formats +for a particular database type. Some substitution will be done to the statement +strings for certain keys. The names of the variables must be surrounded by "{{" +and "}}" to be replaced. + + * "name" - The random username generated for the DB user. + + * "password" - The random password generated for the DB user. + +Example of a decent creation_statements for a postgresql database plugin: + + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; + +The "revocation_statements" parameter customizes the statement string used to +revoke a user. Example of a decent revocation_statements for a postgresql +database plugin: + + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; + REVOKE USAGE ON SCHEMA public FROM {{name}}; + DROP ROLE IF EXISTS {{name}}; + +The "renew_statements" parameter customizes the statement string used to renew a +user. +The "rollback_statements' parameter customizes the statement string used to +rollback a change if needed. +` diff --git a/builtin/logical/database/path_roles_test.go b/builtin/logical/database/path_roles_test.go new file mode 100644 index 000000000000..0e9bf4f003f7 --- /dev/null +++ b/builtin/logical/database/path_roles_test.go @@ -0,0 +1,524 @@ +package database + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" +) + +var dataKeys = []string{"username", "password", "last_vault_rotation", "rotation_period"} + +func TestBackend_StaticRole_Config(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Test static role creation scenarios. Uses a map, so there is no guaranteed + // ordering, so each case cleans up by deleting the role + testCases := map[string]struct { + account map[string]interface{} + expected map[string]interface{} + err error + }{ + "basic": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + }, + expected: map[string]interface{}{ + "username": dbUser, + "rotation_period": float64(5400), + }, + }, + "missing rotation period": { + account: map[string]interface{}{ + "username": dbUser, + }, + err: errors.New("rotation_period is required to create static accounts"), + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + data := map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + } + + for k, v := range tc.account { + data[k] = v + } + + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + if tc.err == nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + if err != nil && tc.err.Error() == err.Error() { + // errors match + return + } + if err == nil && tc.err.Error() == resp.Error().Error() { + // errors match + return + } + t.Fatalf("expected err message: (%s), got (%s), response error: (%s)", tc.err, err, resp.Error()) + } + + if tc.err != nil { + if err == nil || (resp == nil || !resp.IsError()) { + t.Fatal("expected error, got none") + } + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := tc.expected + actual := make(map[string]interface{}) + for _, key := range dataKeys { + if v, ok := resp.Data[key]; ok { + actual[key] = v + } + } + + if len(tc.expected) > 0 { + // verify a password is returned, but we don't care what it's value is + if actual["password"] == "" { + t.Fatalf("expected result to contain password, but none found") + } + if v, ok := actual["last_vault_rotation"].(time.Time); !ok { + t.Fatalf("expected last_vault_rotation to be set to time.Time type, got: %#v", v) + } + + // delete these values before the comparison, since we can't know them in + // advance + delete(actual, "password") + delete(actual, "last_vault_rotation") + if diff := deep.Equal(expected, actual); diff != nil { + t.Fatal(diff) + } + } + + if len(tc.expected) == 0 && resp.Data["static_account"] != nil { + t.Fatalf("got unexpected static_account info: %#v", actual) + } + + if diff := deep.Equal(resp.Data["db_name"], "plugin-test"); diff != nil { + t.Fatal(diff) + } + + // Delete role for next run + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + }) + } +} + +func TestBackend_StaticRole_Updates(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "5400s", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + rotation := resp.Data["rotation_period"].(float64) + + // capture the password to verify it doesn't change + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test-updates", + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("expected both username/password, got (%s), (%s)", username, password) + } + + // update rotation_period + updateData := map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "plugin-test", + "username": dbUser, + "rotation_period": "6400s", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: updateData, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // re-read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + newRotation := resp.Data["rotation_period"].(float64) + if newRotation == rotation { + t.Fatalf("expected change in rotation, but got old value: %#v", newRotation) + } + + // re-capture the password to ensure it did not change + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test-updates", + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if username != resp.Data["username"].(string) { + t.Fatalf("usernames dont match!: (%s) / (%s)", username, resp.Data["username"].(string)) + } + if password != resp.Data["password"].(string) { + t.Fatalf("passwords dont match!: (%s) / (%s)", password, resp.Data["password"].(string)) + } + + // verify that rotation_period is only required when creating + updateData = map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "plugin-test", + "username": dbUser, + "rotation_statements": testRoleStaticUpdateRotation, + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: updateData, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // verify updating static username returns an error + updateData = map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "plugin-test", + "username": "statictestmodified", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: updateData, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || !resp.IsError() { + t.Fatal("expected error on updating name") + } + err = resp.Error() + if err.Error() != "cannot update static account username" { + t.Fatalf("expected error on updating name, got: %s", err) + } +} + +func TestBackend_StaticRole_Role_name_check(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // non-static role + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "creation_statements": testRoleStaticCreate, + "rotation_statements": testRoleStaticUpdate, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // create a static role with the same name, and expect failure + // static role + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "creation_statements": testRoleStaticCreate, + "rotation_statements": testRoleStaticUpdate, + "revocation_statements": defaultRevocationSQL, + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if resp == nil || !resp.IsError() { + t.Fatalf("expected error, got none") + } + + // repeat, with a static role first + data = map[string]interface{}{ + "name": "plugin-role-test-2", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "1h", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test-2", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // create a non-static role with the same name, and expect failure + data = map[string]interface{}{ + "name": "plugin-role-test-2", + "db_name": "plugin-test", + "creation_statements": testRoleStaticCreate, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "roles/plugin-role-test-2", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if resp == nil || !resp.IsError() { + t.Fatalf("expected error, got none") + } +} + +const testRoleStaticCreate = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}'; +` + +const testRoleStaticUpdate = ` +ALTER USER "{{name}}" WITH PASSWORD '{{password}}'; +` + +const testRoleStaticUpdateRotation = ` +ALTER USER "{{name}}" WITH PASSWORD '{{password}}';GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go index a80c17892f58..bc82b7adbf8a 100644 --- a/builtin/logical/database/path_rotate_credentials.go +++ b/builtin/logical/database/path_rotate_credentials.go @@ -3,27 +3,47 @@ package database import ( "context" "fmt" + "time" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" ) -func pathRotateCredentials(b *databaseBackend) *framework.Path { - return &framework.Path{ - Pattern: "rotate-root/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ - Type: framework.TypeString, - Description: "Name of this database connection", +func pathRotateCredentials(b *databaseBackend) []*framework.Path { + return []*framework.Path{ + &framework.Path{ + Pattern: "rotate-root/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of this database connection", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRotateCredentialsUpdate(), }, - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathRotateCredentialsUpdate(), + HelpSynopsis: pathCredsCreateReadHelpSyn, + HelpDescription: pathCredsCreateReadHelpDesc, }, + &framework.Path{ + Pattern: "rotate-role/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the static role", + }, + }, - HelpSynopsis: pathCredsCreateReadHelpSyn, - HelpDescription: pathCredsCreateReadHelpDesc, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRotateRoleCredentialsUpdate(), + }, + + HelpSynopsis: pathCredsCreateReadHelpSyn, + HelpDescription: pathCredsCreateReadHelpDesc, + }, } } @@ -77,6 +97,56 @@ func (b *databaseBackend) pathRotateCredentialsUpdate() framework.OperationFunc return nil, nil } } +func (b *databaseBackend) pathRotateRoleCredentialsUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse("empty role name attribute given"), nil + } + + role, err := b.StaticRole(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse("no static role found for role name"), nil + } + + // In create/update of static accounts, we only care if the operation + // err'd , and this call does not return credentials + item, err := b.popFromRotationQueueByKey(name) + if err != nil { + item = &queue.Item{ + Key: name, + } + } + + resp, err := b.setStaticAccount(ctx, req.Storage, &setStaticAccountInput{ + RoleName: name, + Role: role, + }) + if err != nil { + b.logger.Warn("unable to rotate credentials in rotate-role", "error", err) + // Update the priority to re-try this rotation and re-add the item to + // the queue + item.Priority = time.Now().Add(10 * time.Second).Unix() + + // Preserve the WALID if it was returned + if resp.WALID != "" { + item.Value = resp.WALID + } + } else { + item.Priority = resp.RotationTime.Add(role.StaticAccount.RotationPeriod).Unix() + } + + // Add their rotation to the queue + if err := b.pushItem(item); err != nil { + return nil, err + } + + return nil, nil + } +} const pathRotateCredentialsUpdateHelpSyn = ` Request to rotate the root credentials for a certain database connection. @@ -85,3 +155,10 @@ Request to rotate the root credentials for a certain database connection. const pathRotateCredentialsUpdateHelpDesc = ` This path attempts to rotate the root credentials for the given database. ` + +const pathRotateRoleCredentialsUpdateHelpSyn = ` +Request to rotate the credentials for a static user account. +` +const pathRotateRoleCredentialsUpdateHelpDesc = ` +This path attempts to rotate the credentials for the given static user account. +` diff --git a/builtin/logical/database/rotation.go b/builtin/logical/database/rotation.go new file mode 100644 index 000000000000..46759cbec681 --- /dev/null +++ b/builtin/logical/database/rotation.go @@ -0,0 +1,528 @@ +package database + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +const ( + // Interval to check the queue for items needing rotation + queueTickSeconds = 5 + queueTickInterval = queueTickSeconds * time.Second + + // WAL storage key used for static account rotations + staticWALKey = "staticRotationKey" +) + +// populateQueue loads the priority queue with existing static accounts. This +// occurs at initialization, after any WAL entries of failed or interrupted +// rotations have been processed. It lists the roles from storage and searches +// for any that have an associated static account, then adds them to the +// priority queue for rotations. +func (b *databaseBackend) populateQueue(ctx context.Context, s logical.Storage) { + log := b.Logger() + log.Info("populating role rotation queue") + + // Build map of role name / wal entries + walMap, err := b.loadStaticWALs(ctx, s) + if err != nil { + log.Warn("unable to load rotation WALs", "error", err) + } + + roles, err := s.List(ctx, databaseStaticRolePath) + if err != nil { + log.Warn("unable to list role for enqueueing", "error", err) + return + } + + for _, roleName := range roles { + select { + case <-ctx.Done(): + log.Info("rotation queue restore cancelled") + return + default: + } + + role, err := b.StaticRole(ctx, s, roleName) + if err != nil { + log.Warn("unable to read static role", "error", err, "role", roleName) + continue + } + + item := queue.Item{ + Key: roleName, + Priority: role.StaticAccount.LastVaultRotation.Add(role.StaticAccount.RotationPeriod).Unix(), + } + + // Check if role name is in map + walEntry := walMap[roleName] + if walEntry != nil { + // Check walEntry last vault time + if !walEntry.LastVaultRotation.IsZero() && walEntry.LastVaultRotation.Before(role.StaticAccount.LastVaultRotation) { + // WAL's last vault rotation record is older than the role's data, so + // delete and move on + if err := framework.DeleteWAL(ctx, s, walEntry.walID); err != nil { + log.Warn("unable to delete WAL", "error", err, "WAL ID", walEntry.walID) + } + } else { + log.Info("adjusting priority for Role") + item.Value = walEntry.walID + item.Priority = time.Now().Unix() + } + } + + if err := b.pushItem(&item); err != nil { + log.Warn("unable to enqueue item", "error", err, "role", roleName) + } + } +} + +// runTicker kicks off a periodic ticker that invoke the automatic credential +// rotation method at a determined interval. The default interval is 5 seconds. +func (b *databaseBackend) runTicker(ctx context.Context, s logical.Storage) { + b.logger.Info("starting periodic ticker") + tick := time.NewTicker(queueTickInterval) + defer tick.Stop() + for { + select { + case <-tick.C: + b.rotateCredentials(ctx, s) + + case <-ctx.Done(): + b.logger.Info("stopping periodic ticker") + return + } + } +} + +// setCredentialsWAL is used to store information in a WAL that can retry a +// credential setting or rotation in the event of partial failure. +type setCredentialsWAL struct { + NewPassword string `json:"new_password"` + OldPassword string `json:"old_password"` + RoleName string `json:"role_name"` + Username string `json:"username"` + + LastVaultRotation time.Time `json:"last_vault_rotation"` + + walID string +} + +// rotateCredentials sets a new password for a static account. This method is +// invoked in the runTicker method, which is in it's own go-routine, and invoked +// periodically (approximately every 5 seconds). +// +// This method loops through the priority queue, popping the highest priority +// item until it encounters the first item that does not yet need rotation, +// based on the current time. +func (b *databaseBackend) rotateCredentials(ctx context.Context, s logical.Storage) error { + for { + // Quit rotating credentials if shutdown has started + select { + case <-ctx.Done(): + return nil + default: + } + item, err := b.popFromRotationQueue() + if err != nil { + if err == queue.ErrEmpty { + return nil + } + return err + } + + // Guard against possible nil item + if item == nil { + return nil + } + + // Grab the exclusive lock for this Role, to make sure we don't incur and + // writes during the rotation process + lock := locksutil.LockForKey(b.roleLocks, item.Key) + lock.Lock() + defer lock.Unlock() + + // Validate the role still exists + role, err := b.StaticRole(ctx, s, item.Key) + if err != nil { + b.logger.Error("unable to load role", "role", item.Key, "error", err) + item.Priority = time.Now().Add(10 * time.Second).Unix() + if err := b.pushItem(item); err != nil { + b.logger.Error("unable to push item on to queue", "error", err) + } + continue + } + if role == nil { + b.logger.Warn("role not found", "role", item.Key, "error", err) + continue + } + + // If "now" is less than the Item priority, then this item does not need to + // be rotated + if time.Now().Unix() < item.Priority { + if err := b.pushItem(item); err != nil { + b.logger.Error("unable to push item on to queue", "error", err) + } + // Break out of the for loop + break + } + + input := &setStaticAccountInput{ + RoleName: item.Key, + Role: role, + } + + // If there is a WAL entry related to this Role, the corresponding WAL ID + // should be stored in the Item's Value field. + if walID, ok := item.Value.(string); ok { + walEntry, err := b.findStaticWAL(ctx, s, walID) + if err != nil { + b.logger.Error("error finding static WAL", "error", err) + item.Priority = time.Now().Add(10 * time.Second).Unix() + if err := b.pushItem(item); err != nil { + b.logger.Error("unable to push item on to queue", "error", err) + } + } + if walEntry != nil && walEntry.NewPassword != "" { + input.Password = walEntry.NewPassword + input.WALID = walID + } + } + + resp, err := b.setStaticAccount(ctx, s, input) + if err != nil { + b.logger.Error("unable to rotate credentials in periodic function", "error", err) + // Increment the priority enough so that the next call to this method + // likely will not attempt to rotate it, as a back-off of sorts + item.Priority = time.Now().Add(10 * time.Second).Unix() + + // Preserve the WALID if it was returned + if resp != nil && resp.WALID != "" { + item.Value = resp.WALID + } + + if err := b.pushItem(item); err != nil { + b.logger.Error("unable to push item on to queue", "error", err) + } + // Go to next item + continue + } + + lvr := resp.RotationTime + if lvr.IsZero() { + lvr = time.Now() + } + + // Update priority and push updated Item to the queue + nextRotation := lvr.Add(role.StaticAccount.RotationPeriod) + item.Priority = nextRotation.Unix() + if err := b.pushItem(item); err != nil { + b.logger.Warn("unable to push item on to queue", "error", err) + } + } + return nil +} + +// findStaticWAL loads a WAL entry by ID. If found, only return the WAL if it +// is of type staticWALKey, otherwise return nil +func (b *databaseBackend) findStaticWAL(ctx context.Context, s logical.Storage, id string) (*setCredentialsWAL, error) { + wal, err := framework.GetWAL(ctx, s, id) + if err != nil { + return nil, err + } + + if wal == nil || wal.Kind != staticWALKey { + return nil, nil + } + + data := wal.Data.(map[string]interface{}) + walEntry := setCredentialsWAL{ + walID: id, + NewPassword: data["new_password"].(string), + OldPassword: data["old_password"].(string), + RoleName: data["role_name"].(string), + Username: data["username"].(string), + } + lvr, err := time.Parse(time.RFC3339, data["last_vault_rotation"].(string)) + if err != nil { + return nil, err + } + walEntry.LastVaultRotation = lvr + + return &walEntry, nil +} + +type setStaticAccountInput struct { + RoleName string + Role *roleEntry + Password string + CreateUser bool + WALID string +} + +type setStaticAccountOutput struct { + RotationTime time.Time + Password string + // Optional return field, in the event WAL was created and not destroyed + // during the operation + WALID string +} + +// setStaticAccount sets the password for a static account associated with a +// Role. This method does many things: +// - verifies role exists and is in the allowed roles list +// - loads an existing WAL entry if WALID input is given, otherwise creates a +// new WAL entry +// - gets a database connection +// - accepts an input password, otherwise generates a new one via gRPC to the +// database plugin +// - sets new password for the static account +// - uses WAL for ensuring passwords are not lost if storage to Vault fails +// +// This method does not perform any operations on the priority queue. Those +// tasks must be handled outside of this method. +func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storage, input *setStaticAccountInput) (*setStaticAccountOutput, error) { + var merr error + if input == nil || input.Role == nil || input.RoleName == "" { + return nil, errors.New("input was empty when attempting to set credentials for static account") + } + // Re-use WAL ID if present, otherwise PUT a new WAL + output := &setStaticAccountOutput{WALID: input.WALID} + + dbConfig, err := b.DatabaseConfig(ctx, s, input.Role.DBName) + if err != nil { + return output, err + } + + // If role name isn't in the database's allowed roles, send back a + // permission denied. + if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContainsGlob(dbConfig.AllowedRoles, input.RoleName) { + return output, fmt.Errorf("%q is not an allowed role", input.RoleName) + } + + // Get the Database object + db, err := b.GetConnection(ctx, s, input.Role.DBName) + if err != nil { + return output, err + } + + db.RLock() + defer db.RUnlock() + + // Use password from input if available. This happens if we're restoring from + // a WAL item or processing the rotation queue with an item that has a WAL + // associated with it + newPassword := input.Password + if newPassword == "" { + // Generate a new password + newPassword, err = db.GenerateCredentials(ctx) + if err != nil { + return output, err + } + } + output.Password = newPassword + + config := dbplugin.StaticUserConfig{ + Username: input.Role.StaticAccount.Username, + Password: newPassword, + } + + if output.WALID == "" { + output.WALID, err = framework.PutWAL(ctx, s, staticWALKey, &setCredentialsWAL{ + RoleName: input.RoleName, + Username: config.Username, + NewPassword: config.Password, + OldPassword: input.Role.StaticAccount.Password, + LastVaultRotation: input.Role.StaticAccount.LastVaultRotation, + }) + if err != nil { + return output, errwrap.Wrapf("error writing WAL entry: {{err}}", err) + } + } + + _, password, err := db.SetCredentials(ctx, input.Role.Statements, config) + if err != nil { + b.CloseIfShutdown(db, err) + return output, errwrap.Wrapf("error setting credentials: {{err}}", err) + } + + if newPassword != password { + return output, errors.New("mismatch passwords returned") + } + + // Store updated role information + // lvr is the known LastVaultRotation + lvr := time.Now() + input.Role.StaticAccount.LastVaultRotation = lvr + input.Role.StaticAccount.Password = password + output.RotationTime = lvr + + entry, err := logical.StorageEntryJSON(databaseStaticRolePath+input.RoleName, input.Role) + if err != nil { + return output, err + } + if err := s.Put(ctx, entry); err != nil { + return output, err + } + + // Cleanup WAL after successfully rotating and pushing new item on to queue + if err := framework.DeleteWAL(ctx, s, output.WALID); err != nil { + merr = multierror.Append(merr, err) + return output, merr + } + + // The WAL has been deleted, return new setStaticAccountOutput without it + return &setStaticAccountOutput{RotationTime: lvr}, merr +} + +// initQueue preforms the necessary checks and initializations needed to preform +// automatic credential rotation for roles associated with static accounts. This +// method verifies if a queue is needed (primary server or local mount), and if +// so initializes the queue and launches a go-routine to periodically invoke a +// method to preform the rotations. +// +// initQueue is invoked by the Factory method in a go-routine. The Factory does +// not wait for success or failure of it's tasks before continuing. This is to +// avoid blocking the mount process while loading and evaluating existing roles, +// etc. +func (b *databaseBackend) initQueue(ctx context.Context, conf *logical.BackendConfig) { + // Verify this mount is on the primary server, or is a local mount. If not, do + // not create a queue or launch a ticker. Both processing the WAL list and + // populating the queue are done sequentially and before launching a + // go-routine to run the periodic ticker. + replicationState := conf.System.ReplicationState() + if (conf.System.LocalMount() || !replicationState.HasState(consts.ReplicationPerformanceSecondary)) && + !replicationState.HasState(consts.ReplicationDRSecondary) && + !replicationState.HasState(consts.ReplicationPerformanceStandby) { + b.Logger().Info("initializing database rotation queue") + + // Poll for a PutWAL call that does not return a "read-only storage" error. + // This ensures the startup phases of loading WAL entries from any possible + // failed rotations can complete without error when deleting from storage. + READONLY_LOOP: + for { + select { + case <-ctx.Done(): + b.Logger().Info("queue initialization canceled") + return + default: + } + + walID, err := framework.PutWAL(ctx, conf.StorageView, staticWALKey, &setCredentialsWAL{RoleName: "vault-readonlytest"}) + if walID != "" { + defer framework.DeleteWAL(ctx, conf.StorageView, walID) + } + switch { + case err == nil: + break READONLY_LOOP + case err.Error() == logical.ErrSetupReadOnly.Error(): + time.Sleep(10 * time.Millisecond) + default: + b.Logger().Error("deleting nil key resulted in error", "error", err) + return + } + } + + // Load roles and populate queue with static accounts + b.populateQueue(ctx, conf.StorageView) + + // Launch ticker + go b.runTicker(ctx, conf.StorageView) + } +} + +// loadStaticWALs reads WAL entries and returns a map of roles and their +// setCredentialsWAL, if found. +func (b *databaseBackend) loadStaticWALs(ctx context.Context, s logical.Storage) (map[string]*setCredentialsWAL, error) { + keys, err := framework.ListWAL(ctx, s) + if err != nil { + return nil, err + } + if len(keys) == 0 { + b.Logger().Debug("no WAL entries found") + return nil, nil + } + + walMap := make(map[string]*setCredentialsWAL) + // Loop through WAL keys and process any rotation ones + for _, walID := range keys { + walEntry, err := b.findStaticWAL(ctx, s, walID) + if err != nil { + b.Logger().Error("error loading static WAL", "id", walID, "error", err) + continue + } + if walEntry == nil { + continue + } + + // Verify the static role still exists + roleName := walEntry.RoleName + role, err := b.StaticRole(ctx, s, roleName) + if err != nil { + b.Logger().Warn("unable to read static role", "error", err, "role", roleName) + continue + } + if role == nil || role.StaticAccount == nil { + if err := framework.DeleteWAL(ctx, s, walEntry.walID); err != nil { + b.Logger().Warn("unable to delete WAL", "error", err, "WAL ID", walEntry.walID) + } + continue + } + + walEntry.walID = walID + walMap[walEntry.RoleName] = walEntry + } + return walMap, nil +} + +// pushItem wraps the internal queue's Push call, to make sure a queue is +// actually available. This is needed because both runTicker and initQueue +// operate in go-routines, and could be accessing the queue concurrently +func (b *databaseBackend) pushItem(item *queue.Item) error { + b.RLock() + unlockFunc := b.RUnlock + defer func() { unlockFunc() }() + + if b.credRotationQueue != nil { + return b.credRotationQueue.Push(item) + } + + b.Logger().Warn("no queue found during push item") + return nil +} + +// popFromRotationQueue wraps the internal queue's Pop call, to make sure a queue is +// actually available. This is needed because both runTicker and initQueue +// operate in go-routines, and could be accessing the queue concurrently +func (b *databaseBackend) popFromRotationQueue() (*queue.Item, error) { + b.RLock() + defer b.RUnlock() + if b.credRotationQueue != nil { + return b.credRotationQueue.Pop() + } + return nil, queue.ErrEmpty +} + +// popFromRotationQueueByKey wraps the internal queue's PopByKey call, to make sure a queue is +// actually available. This is needed because both runTicker and initQueue +// operate in go-routines, and could be accessing the queue concurrently +func (b *databaseBackend) popFromRotationQueueByKey(name string) (*queue.Item, error) { + b.RLock() + defer b.RUnlock() + if b.credRotationQueue != nil { + return b.credRotationQueue.PopByKey(name) + } + return nil, queue.ErrEmpty +} diff --git a/builtin/logical/database/rotation_test.go b/builtin/logical/database/rotation_test.go new file mode 100644 index 000000000000..29530fcbdd7b --- /dev/null +++ b/builtin/logical/database/rotation_test.go @@ -0,0 +1,852 @@ +package database + +import ( + "context" + "log" + "strings" + "testing" + "time" + + "database/sql" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/logical" + + "github.com/lib/pq" +) + +const dbUser = "vaultstatictest" + +func TestBackend_StaticRole_Rotate_basic(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + verifyPgConn(t, dbUser, "password", connURL) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "5400s", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("empty username (%s) or password (%s)", username, password) + } + + // Verify username/password + verifyPgConn(t, dbUser, password, connURL) + + // Re-read the creds, verifying they aren't changing on read + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if username != resp.Data["username"].(string) || password != resp.Data["password"].(string) { + t.Fatal("expected re-read username/password to match, but didn't") + } + + // Trigger rotation + data = map[string]interface{}{"name": "plugin-role-test"} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-role/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if resp != nil { + t.Fatalf("Expected empty response from rotate-role: (%#v)", resp) + } + + // Re-Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + newPassword := resp.Data["password"].(string) + if password == newPassword { + t.Fatalf("expected passwords to differ, got (%s)", newPassword) + } + + // Verify new username/password + verifyPgConn(t, username, newPassword, connURL) +} + +// Sanity check to make sure we don't allow an attempt of rotating credentials +// for non-static accounts, which doesn't make sense anyway, but doesn't hurt to +// verify we return an error +func TestBackend_StaticRole_Rotate_NonStaticError(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "creation_statements": testRoleStaticCreate, + "rotation_statements": testRoleStaticUpdate, + "revocation_statements": defaultRevocationSQL, + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("empty username (%s) or password (%s)", username, password) + } + + // Verify username/password + verifyPgConn(t, dbUser, "password", connURL) + // Trigger rotation + data = map[string]interface{}{"name": "plugin-role-test"} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-role/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + // expect resp to be an error + resp, _ = b.HandleRequest(namespace.RootContext(nil), req) + if !resp.IsError() { + t.Fatalf("expected error rotating non-static role") + } + + if resp.Error().Error() != "no static role found for role name" { + t.Fatalf("wrong error message: %s", err) + } +} + +func TestBackend_StaticRole_Revoke_user(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + testCases := map[string]struct { + revoke *bool + expectVerifyErr bool + }{ + // Default case: user does not specify, Vault leaves the database user + // untouched, and the final connection check passes because the user still + // exists + "unset": {}, + // Revoke on delete. The final connection check should fail because the user + // no longer exists + "revoke": { + revoke: newBoolPtr(true), + expectVerifyErr: true, + }, + // Revoke false, final connection check should still pass + "persist": { + revoke: newBoolPtr(false), + }, + } + for k, tc := range testCases { + t.Run(k, func(t *testing.T) { + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "5400s", + } + if tc.revoke != nil { + data["revoke_user_on_delete"] = *tc.revoke + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("empty username (%s) or password (%s)", username, password) + } + + // Verify username/password + verifyPgConn(t, username, password, connURL) + + // delete the role, expect the default where the user is not destroyed + // Read the creds + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Verify new username/password still work + verifyPgConn(t, username, password, connURL) + }) + } +} + +func createTestPGUser(t *testing.T, connURL string, username, password, query string) { + t.Helper() + log.Printf("[TRACE] Creating test user") + conn, err := pq.ParseURL(connURL) + if err != nil { + t.Fatal(err) + } + + db, err := sql.Open("postgres", conn) + defer db.Close() + if err != nil { + t.Fatal(err) + } + + // Start a transaction + ctx := context.Background() + tx, err := db.BeginTx(ctx, nil) + if err != nil { + t.Fatal(err) + } + defer func() { + _ = tx.Rollback() + }() + + m := map[string]string{ + "name": username, + "password": password, + } + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + t.Fatal(err) + } + // Commit the transaction + if err := tx.Commit(); err != nil { + t.Fatal(err) + } +} + +func verifyPgConn(t *testing.T, username, password, connURL string) { + t.Helper() + cURL := strings.Replace(connURL, "postgres:secret", username+":"+password, 1) + db, err := sql.Open("postgres", cURL) + if err != nil { + t.Fatal(err) + } + if err := db.Ping(); err != nil { + t.Fatal(err) + } +} + +// WAL testing +// +// First scenario, WAL contains a role name that does not exist. +func TestBackend_Static_QueueWAL_discard_role_not_found(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + ctx := context.Background() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + _, err := framework.PutWAL(ctx, config.StorageView, staticWALKey, &setCredentialsWAL{ + RoleName: "doesnotexist", + }) + if err != nil { + t.Fatalf("error with PutWAL: %s", err) + } + + assertWALCount(t, config.StorageView, 1) + + b, err := Factory(ctx, config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(ctx) + + time.Sleep(5 * time.Second) + bd := b.(*databaseBackend) + if bd.credRotationQueue == nil { + t.Fatal("database backend had no credential rotation queue") + } + + // Verify empty queue + if bd.credRotationQueue.Len() != 0 { + t.Fatalf("expected zero queue items, got: %d", bd.credRotationQueue.Len()) + } + + assertWALCount(t, config.StorageView, 0) +} + +// Second scenario, WAL contains a role name that does exist, but the role's +// LastVaultRotation is greater than the WAL has +func TestBackend_Static_QueueWAL_discard_role_newer_rotation_date(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + ctx := context.Background() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + roleName := "test-discard-by-date" + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Save Now() to make sure rotation time is after this, as well as the WAL + // time + roleTime := time.Now() + + // Create role + data = map[string]interface{}{ + "name": roleName, + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + // Low value here, to make sure the backend rotates this password at least + // once before we compare it to the WAL + "rotation_period": "10s", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/" + roleName, + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Allow the first rotation to occur, setting LastVaultRotation + time.Sleep(time.Second * 12) + + // Cleanup the backend, then create a WAL for the role with a + // LastVaultRotation of 1 hour ago, so that when we recreate the backend the + // WAL will be read but discarded + b.Cleanup(ctx) + b = nil + time.Sleep(time.Second * 3) + + // Make a fake WAL entry with an older time + oldRotationTime := roleTime.Add(time.Hour * -1) + walPassword := "somejunkpassword" + _, err = framework.PutWAL(ctx, config.StorageView, staticWALKey, &setCredentialsWAL{ + RoleName: roleName, + NewPassword: walPassword, + LastVaultRotation: oldRotationTime, + Username: dbUser, + }) + if err != nil { + t.Fatalf("error with PutWAL: %s", err) + } + + assertWALCount(t, config.StorageView, 1) + + // Reload backend + lb, err = Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok = lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(ctx) + + // Allow enough time for populateQueue to work after boot + time.Sleep(time.Second * 12) + + // PopulateQueue should have processed the entry + assertWALCount(t, config.StorageView, 0) + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/" + roleName, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + lastVaultRotation := resp.Data["last_vault_rotation"].(time.Time) + if !lastVaultRotation.After(oldRotationTime) { + t.Fatal("last vault rotation time not greater than WAL time") + } + + if !lastVaultRotation.After(roleTime) { + t.Fatal("last vault rotation time not greater than role creation time") + } + + // Grab password to verify it didn't change + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + roleName, + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + password := resp.Data["password"].(string) + if password == walPassword { + t.Fatalf("expected password to not be changed by WAL, but was") + } +} + +// Helper to assert the number of WAL entries is what we expect +func assertWALCount(t *testing.T, s logical.Storage, expected int) { + var count int + ctx := context.Background() + keys, err := framework.ListWAL(ctx, s) + if err != nil { + t.Fatal("error listing WALs") + } + + // Loop through WAL keys and process any rotation ones + for _, k := range keys { + walEntry, _ := framework.GetWAL(ctx, s, k) + if walEntry == nil { + continue + } + + if walEntry.Kind != staticWALKey { + continue + } + count++ + } + if expected != count { + t.Fatalf("WAL count mismatch, expected (%d), got (%d)", expected, count) + } +} + +// +// End WAL testing +// + +func TestBackend_StaticRole_Rotations_PostgreSQL(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + bd := b.(*databaseBackend) + if bd.credRotationQueue == nil { + t.Fatal("database backend had no credential rotation queue") + } + + // Configure backend, add item and confirm length + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + testCases := []string{"65", "130", "5400"} + // Create database users ahead + for _, tc := range testCases { + createTestPGUser(t, connURL, dbUser+tc, "password", testRoleStaticCreate) + } + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create three static roles with different rotation periods + for _, tc := range testCases { + roleName := "plugin-static-role-" + tc + data = map[string]interface{}{ + "name": roleName, + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser + tc, + "rotation_period": tc, + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/" + roleName, + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + } + + // Verify the queue has 3 items in it + if bd.credRotationQueue.Len() != 3 { + t.Fatalf("expected 3 items in the rotation queue, got: (%d)", bd.credRotationQueue.Len()) + } + + // List the roles + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ListOperation, + Path: "static-roles/", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + keys := resp.Data["keys"].([]string) + if len(keys) != 3 { + t.Fatalf("expected 3 roles, got: (%d)", len(keys)) + } + + // Capture initial passwords, before the periodic function is triggered + pws := make(map[string][]string, 0) + pws = capturePasswords(t, b, config, testCases, pws) + + // Sleep to make sure the 65s role will be up for rotation by the time the + // periodic function ticks + time.Sleep(7 * time.Second) + + // Sleep 75 to make sure the periodic func has time to actually run + time.Sleep(75 * time.Second) + pws = capturePasswords(t, b, config, testCases, pws) + + // Sleep more, this should allow both sr65 and sr130 to rotate + time.Sleep(140 * time.Second) + pws = capturePasswords(t, b, config, testCases, pws) + + // Verify all pws are as they should + pass := true + for k, v := range pws { + switch { + case k == "plugin-static-role-65": + // expect all passwords to be different + if v[0] == v[1] || v[1] == v[2] || v[0] == v[2] { + pass = false + } + case k == "plugin-static-role-130": + // expect the first two to be equal, but different from the third + if v[0] != v[1] || v[0] == v[2] { + pass = false + } + case k == "plugin-static-role-5400": + // expect all passwords to be equal + if v[0] != v[1] || v[1] != v[2] { + pass = false + } + } + } + if !pass { + t.Fatalf("password rotations did not match expected: %#v", pws) + } +} + +// capturePasswords captures the current passwords at the time of calling, and +// returns a map of username / passwords building off of the input map +func capturePasswords(t *testing.T, b logical.Backend, config *logical.BackendConfig, testCases []string, pws map[string][]string) map[string][]string { + new := make(map[string][]string, 0) + for _, tc := range testCases { + // Read the role + roleName := "plugin-static-role-" + tc + req := &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + roleName, + Storage: config.StorageView, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("expected both username/password for (%s), got (%s), (%s)", roleName, username, password) + } + new[roleName] = append(new[roleName], password) + } + + for k, v := range new { + pws[k] = append(pws[k], v...) + } + + return pws +} + +func newBoolPtr(b bool) *bool { + v := b + return &v +} diff --git a/builtin/logical/pki/backend_test.go b/builtin/logical/pki/backend_test.go index 38bbe2baa55c..7a05ce434fc8 100644 --- a/builtin/logical/pki/backend_test.go +++ b/builtin/logical/pki/backend_test.go @@ -432,7 +432,7 @@ func checkCertsAndPrivateKey(keyType string, key crypto.Signer, usage x509.KeyUs } func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep { - expected := urlEntries{ + expected := certutil.URLEntries{ IssuingCertificates: []string{ "http://example.com/ca1", "http://example.com/ca2", @@ -499,7 +499,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s if resp.Data == nil { return fmt.Errorf("no data returned") } - var entries urlEntries + var entries certutil.URLEntries err := mapstructure.Decode(resp.Data, &entries) if err != nil { return err diff --git a/builtin/logical/pki/ca_util.go b/builtin/logical/pki/ca_util.go index 80b2676f0021..8692486d87da 100644 --- a/builtin/logical/pki/ca_util.go +++ b/builtin/logical/pki/ca_util.go @@ -4,6 +4,7 @@ import ( "time" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -53,7 +54,9 @@ func (b *backend) getGenerationParams( return } - errorResp = validateKeyTypeLength(role.KeyType, role.KeyBits) + if err := certutil.ValidateKeyTypeLength(role.KeyType, role.KeyBits); err != nil { + errorResp = logical.ErrorResponse(err.Error()) + } return } diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go index 90c78d388dbf..b82c28539f8d 100644 --- a/builtin/logical/pki/cert_util.go +++ b/builtin/logical/pki/cert_util.go @@ -1,13 +1,10 @@ package pki import ( - "bytes" "context" "crypto" "crypto/ecdsa" - "crypto/rand" "crypto/rsa" - "crypto/sha1" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" @@ -17,7 +14,6 @@ import ( "net" "net/url" "regexp" - "strconv" "strings" "time" @@ -27,95 +23,14 @@ import ( "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" - glob "github.com/ryanuber/go-glob" - "golang.org/x/crypto/cryptobyte" - cbbasn1 "golang.org/x/crypto/cryptobyte/asn1" + "github.com/ryanuber/go-glob" "golang.org/x/net/idna" ) -type certExtKeyUsage int - -const ( - anyExtKeyUsage certExtKeyUsage = 1 << iota - serverAuthExtKeyUsage - clientAuthExtKeyUsage - codeSigningExtKeyUsage - emailProtectionExtKeyUsage - ipsecEndSystemExtKeyUsage - ipsecTunnelExtKeyUsage - ipsecUserExtKeyUsage - timeStampingExtKeyUsage - ocspSigningExtKeyUsage - microsoftServerGatedCryptoExtKeyUsage - netscapeServerGatedCryptoExtKeyUsage - microsoftCommercialCodeSigningExtKeyUsage - microsoftKernelCodeSigningExtKeyUsage -) - -type dataBundle struct { - params *creationParameters - signingBundle *caInfoBundle - csr *x509.CertificateRequest - role *roleEntry - req *logical.Request - apiData *framework.FieldData -} - -type creationParameters struct { - Subject pkix.Name - DNSNames []string - EmailAddresses []string - IPAddresses []net.IP - URIs []*url.URL - OtherSANs map[string][]string - IsCA bool - KeyType string - KeyBits int - NotAfter time.Time - KeyUsage x509.KeyUsage - ExtKeyUsage certExtKeyUsage - ExtKeyUsageOIDs []string - PolicyIdentifiers []string - BasicConstraintsValidForNonCA bool - - // Only used when signing a CA cert - UseCSRValues bool - PermittedDNSDomains []string - - // URLs to encode into the certificate - URLs *urlEntries - - // The maximum path length to encode - MaxPathLength int - - // The duration the certificate will use NotBefore - NotBeforeDuration time.Duration -} - -type caInfoBundle struct { - certutil.ParsedCertBundle - URLs *urlEntries -} - -func (b *caInfoBundle) GetCAChain() []*certutil.CertBlock { - chain := []*certutil.CertBlock{} - - // Include issuing CA in Chain, not including Root Authority - if (len(b.Certificate.AuthorityKeyId) > 0 && - !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) || - (len(b.Certificate.AuthorityKeyId) == 0 && - !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) { - - chain = append(chain, &certutil.CertBlock{ - Certificate: b.Certificate, - Bytes: b.CertificateBytes, - }) - if b.CAChain != nil && len(b.CAChain) > 0 { - chain = append(chain, b.CAChain...) - } - } - - return chain +type inputBundle struct { + role *roleEntry + req *logical.Request + apiData *framework.FieldData } var ( @@ -123,8 +38,7 @@ var ( // when doing the idna conversion, this appears to only affect output, not // input, so it will allow e.g. host^123.example.com straight through. So // we still need to use this to check the output. - hostnameRegex = regexp.MustCompile(`^(\*\.)?(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`) - oidExtensionBasicConstraints = []int{2, 5, 29, 19} + hostnameRegex = regexp.MustCompile(`^(\*\.)?(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`) ) func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { @@ -148,39 +62,9 @@ func getFormat(data *framework.FieldData) string { return format } -func validateKeyTypeLength(keyType string, keyBits int) *logical.Response { - switch keyType { - case "rsa": - switch keyBits { - case 2048: - case 4096: - case 8192: - default: - return logical.ErrorResponse(fmt.Sprintf( - "unsupported bit length for RSA key: %d", keyBits)) - } - case "ec": - switch keyBits { - case 224: - case 256: - case 384: - case 521: - default: - return logical.ErrorResponse(fmt.Sprintf( - "unsupported bit length for EC key: %d", keyBits)) - } - case "any": - default: - return logical.ErrorResponse(fmt.Sprintf( - "unknown key type %s", keyType)) - } - - return nil -} - // Fetches the CA info. Unlike other certificates, the CA info is stored // in the backend as a CertBundle, because we are storing its private key -func fetchCAInfo(ctx context.Context, req *logical.Request) (*caInfoBundle, error) { +func fetchCAInfo(ctx context.Context, req *logical.Request) (*certutil.CAInfoBundle, error) { bundleEntry, err := req.Storage.Get(ctx, "config/ca_bundle") if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch local CA certificate/key: %v", err)} @@ -203,14 +87,14 @@ func fetchCAInfo(ctx context.Context, req *logical.Request) (*caInfoBundle, erro return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} } - caInfo := &caInfoBundle{*parsedBundle, nil} + caInfo := &certutil.CAInfoBundle{*parsedBundle, nil} entries, err := getURLs(ctx, req) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)} } if entries == nil { - entries = &urlEntries{ + entries = &certutil.URLEntries{ IssuingCertificates: []string{}, CRLDistributionPoints: []string{}, OCSPServers: []string{}, @@ -289,7 +173,7 @@ func fetchCertBySerial(ctx context.Context, req *logical.Request, prefix, serial // Given a set of requested names for a certificate, verifies that all of them // match the various toggles set in the role for controlling issuance. // If one does not pass, it is returned in the string argument. -func validateNames(data *dataBundle, names []string) string { +func validateNames(data *inputBundle, names []string) string { for _, name := range names { sanitizedName := name emailDomain := name @@ -464,7 +348,7 @@ func validateNames(data *dataBundle, names []string) string { // isn't allowed, it will be returned as the first string. If a value isn't // allowed, it will be returned as the second string. Empty strings + error // means everything is okay. -func validateOtherSANs(data *dataBundle, requested map[string][]string) (string, string, error) { +func validateOtherSANs(data *inputBundle, requested map[string][]string) (string, string, error) { for _, val := range data.role.AllowedOtherSANs { if val == "*" { // Anything is allowed @@ -522,7 +406,7 @@ func parseOtherSANs(others []string) (map[string][]string, error) { return result, nil } -func validateSerialNumber(data *dataBundle, serialNumber string) string { +func validateSerialNumber(data *inputBundle, serialNumber string) string { valid := false if len(data.role.AllowedSerialNumbers) > 0 { for _, currSerialNumber := range data.role.AllowedSerialNumbers { @@ -547,53 +431,54 @@ func validateSerialNumber(data *dataBundle, serialNumber string) string { func generateCert(ctx context.Context, b *backend, - data *dataBundle, + input *inputBundle, + caSign *certutil.CAInfoBundle, isCA bool) (*certutil.ParsedCertBundle, error) { - if data.role == nil { + if input.role == nil { return nil, errutil.InternalError{Err: "no role found in data bundle"} } - if data.role.KeyType == "rsa" && data.role.KeyBits < 2048 { + if input.role.KeyType == "rsa" && input.role.KeyBits < 2048 { return nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} } - err := generateCreationBundle(b, data) + data, err := generateCreationBundle(b, input, caSign, nil) if err != nil { return nil, err } - if data.params == nil { + if data.Params == nil { return nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} } if isCA { - data.params.IsCA = isCA - data.params.PermittedDNSDomains = data.apiData.Get("permitted_dns_domains").([]string) + data.Params.IsCA = isCA + data.Params.PermittedDNSDomains = input.apiData.Get("permitted_dns_domains").([]string) - if data.signingBundle == nil { + if data.SigningBundle == nil { // Generating a self-signed root certificate - entries, err := getURLs(ctx, data.req) + entries, err := getURLs(ctx, input.req) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)} } if entries == nil { - entries = &urlEntries{ + entries = &certutil.URLEntries{ IssuingCertificates: []string{}, CRLDistributionPoints: []string{}, OCSPServers: []string{}, } } - data.params.URLs = entries + data.Params.URLs = entries - if data.role.MaxPathLength == nil { - data.params.MaxPathLength = -1 + if input.role.MaxPathLength == nil { + data.Params.MaxPathLength = -1 } else { - data.params.MaxPathLength = *data.role.MaxPathLength + data.Params.MaxPathLength = *input.role.MaxPathLength } } } - parsedBundle, err := createCertificate(data) + parsedBundle, err := certutil.CreateCertificate(data) if err != nil { return nil, err } @@ -603,16 +488,17 @@ func generateCert(ctx context.Context, // N.B.: This is only meant to be used for generating intermediate CAs. // It skips some sanity checks. -func generateIntermediateCSR(b *backend, data *dataBundle) (*certutil.ParsedCSRBundle, error) { - err := generateCreationBundle(b, data) +func generateIntermediateCSR(b *backend, input *inputBundle) (*certutil.ParsedCSRBundle, error) { + creation, err := generateCreationBundle(b, input, nil, nil) if err != nil { return nil, err } - if data.params == nil { + if creation.Params == nil { return nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} } - parsedBundle, err := createCSR(data) + addBasicConstraints := input.apiData != nil && input.apiData.Get("add_basic_constraints").(bool) + parsedBundle, err := certutil.CreateCSR(creation, addBasicConstraints) if err != nil { return nil, err } @@ -621,7 +507,8 @@ func generateIntermediateCSR(b *backend, data *dataBundle) (*certutil.ParsedCSRB } func signCert(b *backend, - data *dataBundle, + data *inputBundle, + caSign *certutil.CAInfoBundle, isCA bool, useCSRValues bool) (*certutil.ParsedCertBundle, error) { @@ -708,24 +595,22 @@ func signCert(b *backend, } - data.csr = csr - - err = generateCreationBundle(b, data) + creation, err := generateCreationBundle(b, data, caSign, csr) if err != nil { return nil, err } - if data.params == nil { + if creation.Params == nil { return nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} } - data.params.IsCA = isCA - data.params.UseCSRValues = useCSRValues + creation.Params.IsCA = isCA + creation.Params.UseCSRValues = useCSRValues if isCA { - data.params.PermittedDNSDomains = data.apiData.Get("permitted_dns_domains").([]string) + creation.Params.PermittedDNSDomains = data.apiData.Get("permitted_dns_domains").([]string) } - parsedBundle, err := signCertificate(data) + parsedBundle, err := certutil.SignCertificate(creation) if err != nil { return nil, err } @@ -734,35 +619,35 @@ func signCert(b *backend, } // generateCreationBundle is a shared function that reads parameters supplied -// from the various endpoints and generates a creationParameters with the +// from the various endpoints and generates a CreationParameters with the // parameters that can be used to issue or sign -func generateCreationBundle(b *backend, data *dataBundle) error { +func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle, csr *x509.CertificateRequest) (*certutil.CreationBundle, error) { // Read in names -- CN, DNS and email addresses var cn string var ridSerialNumber string dnsNames := []string{} emailAddresses := []string{} { - if data.csr != nil && data.role.UseCSRCommonName { - cn = data.csr.Subject.CommonName + if csr != nil && data.role.UseCSRCommonName { + cn = csr.Subject.CommonName } if cn == "" { cn = data.apiData.Get("common_name").(string) if cn == "" && data.role.RequireCN { - return errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true, unless "require_cn" is set to false`} + return nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true, unless "require_cn" is set to false`} } } ridSerialNumber = data.apiData.Get("serial_number").(string) // only take serial number from CSR if one was not supplied via API - if ridSerialNumber == "" && data.csr != nil { - ridSerialNumber = data.csr.Subject.SerialNumber + if ridSerialNumber == "" && csr != nil { + ridSerialNumber = csr.Subject.SerialNumber } - if data.csr != nil && data.role.UseCSRSANs { - dnsNames = data.csr.DNSNames - emailAddresses = data.csr.EmailAddresses + if csr != nil && data.role.UseCSRSANs { + dnsNames = csr.DNSNames + emailAddresses = csr.EmailAddresses } if cn != "" && !data.apiData.Get("exclude_cn_from_sans").(bool) { @@ -782,7 +667,7 @@ func generateCreationBundle(b *backend, data *dataBundle) error { ) converted, err := p.ToASCII(cn) if err != nil { - return errutil.UserError{Err: err.Error()} + return nil, errutil.UserError{Err: err.Error()} } if hostnameRegex.MatchString(converted) { dnsNames = append(dnsNames, converted) @@ -790,7 +675,7 @@ func generateCreationBundle(b *backend, data *dataBundle) error { } } - if data.csr == nil || !data.role.UseCSRSANs { + if csr == nil || !data.role.UseCSRSANs { cnAltRaw, ok := data.apiData.GetOk("alt_names") if ok { cnAlt := strutil.ParseDedupLowercaseAndSortStrings(cnAltRaw.(string), ",") @@ -806,7 +691,7 @@ func generateCreationBundle(b *backend, data *dataBundle) error { ) converted, err := p.ToASCII(v) if err != nil { - return errutil.UserError{Err: err.Error()} + return nil, errutil.UserError{Err: err.Error()} } if hostnameRegex.MatchString(converted) { dnsNames = append(dnsNames, converted) @@ -821,7 +706,7 @@ func generateCreationBundle(b *backend, data *dataBundle) error { if cn != "" { badName := validateNames(data, []string{cn}) if len(badName) != 0 { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "common name %s not allowed by this role", badName)} } } @@ -829,7 +714,7 @@ func generateCreationBundle(b *backend, data *dataBundle) error { if ridSerialNumber != "" { badName := validateSerialNumber(data, ridSerialNumber) if len(badName) != 0 { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "serial_number %s not allowed by this role", badName)} } } @@ -837,13 +722,13 @@ func generateCreationBundle(b *backend, data *dataBundle) error { // Check for bad email and/or DNS names badName := validateNames(data, dnsNames) if len(badName) != 0 { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "subject alternate name %s not allowed by this role", badName)} } badName = validateNames(data, emailAddresses) if len(badName) != 0 { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "email address %s not allowed by this role", badName)} } } @@ -852,17 +737,17 @@ func generateCreationBundle(b *backend, data *dataBundle) error { if sans := data.apiData.Get("other_sans").([]string); len(sans) > 0 { requested, err := parseOtherSANs(sans) if err != nil { - return errutil.UserError{Err: errwrap.Wrapf("could not parse requested other SAN: {{err}}", err).Error()} + return nil, errutil.UserError{Err: errwrap.Wrapf("could not parse requested other SAN: {{err}}", err).Error()} } badOID, badName, err := validateOtherSANs(data, requested) switch { case err != nil: - return errutil.UserError{Err: err.Error()} + return nil, errutil.UserError{Err: err.Error()} case len(badName) > 0: - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "other SAN %s not allowed for OID %s by this role", badName, badOID)} case len(badOID) > 0: - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "other SAN OID %s not allowed by this role", badOID)} default: otherSANs = requested @@ -872,25 +757,25 @@ func generateCreationBundle(b *backend, data *dataBundle) error { // Get and verify any IP SANs ipAddresses := []net.IP{} { - if data.csr != nil && data.role.UseCSRSANs { - if len(data.csr.IPAddresses) > 0 { + if csr != nil && data.role.UseCSRSANs { + if len(csr.IPAddresses) > 0 { if !data.role.AllowIPSANs { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR")} } - ipAddresses = data.csr.IPAddresses + ipAddresses = csr.IPAddresses } } else { ipAlt := data.apiData.Get("ip_sans").([]string) if len(ipAlt) > 0 { if !data.role.AllowIPSANs { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)} } for _, v := range ipAlt { parsedIP := net.ParseIP(v) if parsedIP == nil { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "the value '%s' is not a valid IP address", v)} } ipAddresses = append(ipAddresses, parsedIP) @@ -901,16 +786,16 @@ func generateCreationBundle(b *backend, data *dataBundle) error { URIs := []*url.URL{} { - if data.csr != nil && data.role.UseCSRSANs { - if len(data.csr.URIs) > 0 { + if csr != nil && data.role.UseCSRSANs { + if len(csr.URIs) > 0 { if len(data.role.AllowedURISANs) == 0 { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "URI Subject Alternative Names are not allowed in this role, but were provided via CSR"), } } // validate uri sans - for _, uri := range data.csr.URIs { + for _, uri := range csr.URIs { valid := false for _, allowed := range data.role.AllowedURISANs { validURI := glob.Glob(allowed, uri.String()) @@ -921,7 +806,7 @@ func generateCreationBundle(b *backend, data *dataBundle) error { } if !valid { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "URI Subject Alternative Names were provided via CSR which are not valid for this role"), } } @@ -933,7 +818,7 @@ func generateCreationBundle(b *backend, data *dataBundle) error { uriAlt := data.apiData.Get("uri_sans").([]string) if len(uriAlt) > 0 { if len(data.role.AllowedURISANs) == 0 { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "URI Subject Alternative Names are not allowed in this role, but were provided via the API"), } } @@ -949,14 +834,14 @@ func generateCreationBundle(b *backend, data *dataBundle) error { } if !valid { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "URI Subject Alternative Names were provided via CSR which are not valid for this role"), } } parsedURI, err := url.Parse(uri) if parsedURI == nil || err != nil { - return errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "the provided URI Subject Alternative Name '%s' is not a valid URI", uri), } } @@ -1010,471 +895,66 @@ func generateCreationBundle(b *backend, data *dataBundle) error { // If it's not self-signed, verify that the issued certificate won't be // valid past the lifetime of the CA certificate - if data.signingBundle != nil && - notAfter.After(data.signingBundle.Certificate.NotAfter) && !data.role.AllowExpirationPastCA { - - return errutil.UserError{Err: fmt.Sprintf( - "cannot satisfy request, as TTL would result in notAfter %s that is beyond the expiration of the CA certificate at %s", notAfter.Format(time.RFC3339Nano), data.signingBundle.Certificate.NotAfter.Format(time.RFC3339Nano))} - } - } + if caSign != nil && + notAfter.After(caSign.Certificate.NotAfter) && !data.role.AllowExpirationPastCA { - data.params = &creationParameters{ - Subject: subject, - DNSNames: dnsNames, - EmailAddresses: emailAddresses, - IPAddresses: ipAddresses, - URIs: URIs, - OtherSANs: otherSANs, - KeyType: data.role.KeyType, - KeyBits: data.role.KeyBits, - NotAfter: notAfter, - KeyUsage: x509.KeyUsage(parseKeyUsages(data.role.KeyUsage)), - ExtKeyUsage: parseExtKeyUsages(data.role), - ExtKeyUsageOIDs: data.role.ExtKeyUsageOIDs, - PolicyIdentifiers: data.role.PolicyIdentifiers, - BasicConstraintsValidForNonCA: data.role.BasicConstraintsValidForNonCA, - NotBeforeDuration: data.role.NotBeforeDuration, + return nil, errutil.UserError{Err: fmt.Sprintf( + "cannot satisfy request, as TTL would result in notAfter %s that is beyond the expiration of the CA certificate at %s", notAfter.Format(time.RFC3339Nano), caSign.Certificate.NotAfter.Format(time.RFC3339Nano))} + } + } + + creation := &certutil.CreationBundle{ + Params: &certutil.CreationParameters{ + Subject: subject, + DNSNames: dnsNames, + EmailAddresses: emailAddresses, + IPAddresses: ipAddresses, + URIs: URIs, + OtherSANs: otherSANs, + KeyType: data.role.KeyType, + KeyBits: data.role.KeyBits, + NotAfter: notAfter, + KeyUsage: x509.KeyUsage(parseKeyUsages(data.role.KeyUsage)), + ExtKeyUsage: parseExtKeyUsages(data.role), + ExtKeyUsageOIDs: data.role.ExtKeyUsageOIDs, + PolicyIdentifiers: data.role.PolicyIdentifiers, + BasicConstraintsValidForNonCA: data.role.BasicConstraintsValidForNonCA, + NotBeforeDuration: data.role.NotBeforeDuration, + }, + SigningBundle: caSign, + CSR: csr, } // Don't deal with URLs or max path length if it's self-signed, as these // normally come from the signing bundle - if data.signingBundle == nil { - return nil + if caSign == nil { + return creation, nil } // This will have been read in from the getURLs function - data.params.URLs = data.signingBundle.URLs + creation.Params.URLs = caSign.URLs // If the max path length in the role is not nil, it was specified at // generation time with the max_path_length parameter; otherwise derive it // from the signing certificate if data.role.MaxPathLength != nil { - data.params.MaxPathLength = *data.role.MaxPathLength + creation.Params.MaxPathLength = *data.role.MaxPathLength } else { switch { - case data.signingBundle.Certificate.MaxPathLen < 0: - data.params.MaxPathLength = -1 - case data.signingBundle.Certificate.MaxPathLen == 0 && - data.signingBundle.Certificate.MaxPathLenZero: + case caSign.Certificate.MaxPathLen < 0: + creation.Params.MaxPathLength = -1 + case caSign.Certificate.MaxPathLen == 0 && + caSign.Certificate.MaxPathLenZero: // The signing function will ensure that we do not issue a CA cert - data.params.MaxPathLength = 0 + creation.Params.MaxPathLength = 0 default: // If this takes it to zero, we handle this case later if // necessary - data.params.MaxPathLength = data.signingBundle.Certificate.MaxPathLen - 1 + creation.Params.MaxPathLength = caSign.Certificate.MaxPathLen - 1 } } - return nil -} - -// addKeyUsages adds appropriate key usages to the template given the creation -// information -func addKeyUsages(data *dataBundle, certTemplate *x509.Certificate) { - if data.params.IsCA { - certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign) - return - } - - certTemplate.KeyUsage = data.params.KeyUsage - - if data.params.ExtKeyUsage&anyExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageAny) - } - - if data.params.ExtKeyUsage&serverAuthExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth) - } - - if data.params.ExtKeyUsage&clientAuthExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth) - } - - if data.params.ExtKeyUsage&codeSigningExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning) - } - - if data.params.ExtKeyUsage&emailProtectionExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection) - } - - if data.params.ExtKeyUsage&ipsecEndSystemExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECEndSystem) - } - - if data.params.ExtKeyUsage&ipsecTunnelExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECTunnel) - } - - if data.params.ExtKeyUsage&ipsecUserExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECUser) - } - - if data.params.ExtKeyUsage&timeStampingExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageTimeStamping) - } - - if data.params.ExtKeyUsage&ocspSigningExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageOCSPSigning) - } - - if data.params.ExtKeyUsageµsoftServerGatedCryptoExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftServerGatedCrypto) - } - - if data.params.ExtKeyUsage&netscapeServerGatedCryptoExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageNetscapeServerGatedCrypto) - } - - if data.params.ExtKeyUsageµsoftCommercialCodeSigningExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftCommercialCodeSigning) - } - - if data.params.ExtKeyUsageµsoftKernelCodeSigningExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftKernelCodeSigning) - } -} - -// addPolicyIdentifiers adds certificate policies extension -// -func addPolicyIdentifiers(data *dataBundle, certTemplate *x509.Certificate) { - for _, oidstr := range data.params.PolicyIdentifiers { - oid, err := stringToOid(oidstr) - if err == nil { - certTemplate.PolicyIdentifiers = append(certTemplate.PolicyIdentifiers, oid) - } - } -} - -// addExtKeyUsageOids adds custom extended key usage OIDs to certificate -func addExtKeyUsageOids(data *dataBundle, certTemplate *x509.Certificate) { - for _, oidstr := range data.params.ExtKeyUsageOIDs { - oid, err := stringToOid(oidstr) - if err == nil { - certTemplate.UnknownExtKeyUsage = append(certTemplate.UnknownExtKeyUsage, oid) - } - } -} - -// Performs the heavy lifting of creating a certificate. Returns -// a fully-filled-in ParsedCertBundle. -func createCertificate(data *dataBundle) (*certutil.ParsedCertBundle, error) { - var err error - result := &certutil.ParsedCertBundle{} - - serialNumber, err := certutil.GenerateSerialNumber() - if err != nil { - return nil, err - } - - if err := certutil.GeneratePrivateKey(data.params.KeyType, - data.params.KeyBits, - result); err != nil { - return nil, err - } - - subjKeyID, err := certutil.GetSubjKeyID(result.PrivateKey) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)} - } - - certTemplate := &x509.Certificate{ - SerialNumber: serialNumber, - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: data.params.NotAfter, - IsCA: false, - SubjectKeyId: subjKeyID, - Subject: data.params.Subject, - DNSNames: data.params.DNSNames, - EmailAddresses: data.params.EmailAddresses, - IPAddresses: data.params.IPAddresses, - URIs: data.params.URIs, - } - if data.params.NotBeforeDuration > 0 { - certTemplate.NotBefore = time.Now().Add(-1 * data.params.NotBeforeDuration) - } - - if err := handleOtherSANs(certTemplate, data.params.OtherSANs); err != nil { - return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} - } - - // Add this before calling addKeyUsages - if data.signingBundle == nil { - certTemplate.IsCA = true - } else if data.params.BasicConstraintsValidForNonCA { - certTemplate.BasicConstraintsValid = true - certTemplate.IsCA = false - } - - // This will only be filled in from the generation paths - if len(data.params.PermittedDNSDomains) > 0 { - certTemplate.PermittedDNSDomains = data.params.PermittedDNSDomains - certTemplate.PermittedDNSDomainsCritical = true - } - - addPolicyIdentifiers(data, certTemplate) - - addKeyUsages(data, certTemplate) - - addExtKeyUsageOids(data, certTemplate) - - certTemplate.IssuingCertificateURL = data.params.URLs.IssuingCertificates - certTemplate.CRLDistributionPoints = data.params.URLs.CRLDistributionPoints - certTemplate.OCSPServer = data.params.URLs.OCSPServers - - var certBytes []byte - if data.signingBundle != nil { - switch data.signingBundle.PrivateKeyType { - case certutil.RSAPrivateKey: - certTemplate.SignatureAlgorithm = x509.SHA256WithRSA - case certutil.ECPrivateKey: - certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 - } - - caCert := data.signingBundle.Certificate - certTemplate.AuthorityKeyId = caCert.SubjectKeyId - - certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, result.PrivateKey.Public(), data.signingBundle.PrivateKey) - } else { - // Creating a self-signed root - if data.params.MaxPathLength == 0 { - certTemplate.MaxPathLen = 0 - certTemplate.MaxPathLenZero = true - } else { - certTemplate.MaxPathLen = data.params.MaxPathLength - } - - switch data.params.KeyType { - case "rsa": - certTemplate.SignatureAlgorithm = x509.SHA256WithRSA - case "ec": - certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 - } - - certTemplate.AuthorityKeyId = subjKeyID - certTemplate.BasicConstraintsValid = true - certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey) - } - - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} - } - - result.CertificateBytes = certBytes - result.Certificate, err = x509.ParseCertificate(certBytes) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} - } - - if data.signingBundle != nil { - if len(data.signingBundle.Certificate.AuthorityKeyId) > 0 && - !bytes.Equal(data.signingBundle.Certificate.AuthorityKeyId, data.signingBundle.Certificate.SubjectKeyId) { - - result.CAChain = []*certutil.CertBlock{ - &certutil.CertBlock{ - Certificate: data.signingBundle.Certificate, - Bytes: data.signingBundle.CertificateBytes, - }, - } - result.CAChain = append(result.CAChain, data.signingBundle.CAChain...) - } - } - - return result, nil -} - -// Creates a CSR. This is currently only meant for use when -// generating an intermediate certificate. -func createCSR(data *dataBundle) (*certutil.ParsedCSRBundle, error) { - var err error - result := &certutil.ParsedCSRBundle{} - - if err := certutil.GeneratePrivateKey(data.params.KeyType, - data.params.KeyBits, - result); err != nil { - return nil, err - } - - // Like many root CAs, other information is ignored - csrTemplate := &x509.CertificateRequest{ - Subject: data.params.Subject, - DNSNames: data.params.DNSNames, - EmailAddresses: data.params.EmailAddresses, - IPAddresses: data.params.IPAddresses, - URIs: data.params.URIs, - } - - if err := handleOtherCSRSANs(csrTemplate, data.params.OtherSANs); err != nil { - return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} - } - - if data.apiData != nil && data.apiData.Get("add_basic_constraints").(bool) { - type basicConstraints struct { - IsCA bool `asn1:"optional"` - MaxPathLen int `asn1:"optional,default:-1"` - } - val, err := asn1.Marshal(basicConstraints{IsCA: true, MaxPathLen: -1}) - if err != nil { - return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} - } - ext := pkix.Extension{ - Id: oidExtensionBasicConstraints, - Value: val, - Critical: true, - } - csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, ext) - } - - switch data.params.KeyType { - case "rsa": - csrTemplate.SignatureAlgorithm = x509.SHA256WithRSA - case "ec": - csrTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 - } - - csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, result.PrivateKey) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} - } - - result.CSRBytes = csr - result.CSR, err = x509.ParseCertificateRequest(csr) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)} - } - - return result, nil -} - -// Performs the heavy lifting of generating a certificate from a CSR. -// Returns a ParsedCertBundle sans private keys. -func signCertificate(data *dataBundle) (*certutil.ParsedCertBundle, error) { - switch { - case data == nil: - return nil, errutil.UserError{Err: "nil data bundle given to signCertificate"} - case data.params == nil: - return nil, errutil.UserError{Err: "nil parameters given to signCertificate"} - case data.signingBundle == nil: - return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"} - case data.csr == nil: - return nil, errutil.UserError{Err: "nil csr given to signCertificate"} - } - - err := data.csr.CheckSignature() - if err != nil { - return nil, errutil.UserError{Err: "request signature invalid"} - } - - result := &certutil.ParsedCertBundle{} - - serialNumber, err := certutil.GenerateSerialNumber() - if err != nil { - return nil, err - } - - marshaledKey, err := x509.MarshalPKIXPublicKey(data.csr.PublicKey) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} - } - subjKeyID := sha1.Sum(marshaledKey) - - caCert := data.signingBundle.Certificate - - certTemplate := &x509.Certificate{ - SerialNumber: serialNumber, - Subject: data.params.Subject, - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: data.params.NotAfter, - SubjectKeyId: subjKeyID[:], - AuthorityKeyId: caCert.SubjectKeyId, - } - if data.params.NotBeforeDuration > 0 { - certTemplate.NotBefore = time.Now().Add(-1 * data.params.NotBeforeDuration) - } - - switch data.signingBundle.PrivateKeyType { - case certutil.RSAPrivateKey: - certTemplate.SignatureAlgorithm = x509.SHA256WithRSA - case certutil.ECPrivateKey: - certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 - } - - if data.params.UseCSRValues { - certTemplate.Subject = data.csr.Subject - certTemplate.Subject.ExtraNames = certTemplate.Subject.Names - - certTemplate.DNSNames = data.csr.DNSNames - certTemplate.EmailAddresses = data.csr.EmailAddresses - certTemplate.IPAddresses = data.csr.IPAddresses - certTemplate.URIs = data.csr.URIs - - for _, name := range data.csr.Extensions { - if !name.Id.Equal(oidExtensionBasicConstraints) { - certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) - } - } - - } else { - certTemplate.DNSNames = data.params.DNSNames - certTemplate.EmailAddresses = data.params.EmailAddresses - certTemplate.IPAddresses = data.params.IPAddresses - certTemplate.URIs = data.params.URIs - } - - if err := handleOtherSANs(certTemplate, data.params.OtherSANs); err != nil { - return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} - } - - addPolicyIdentifiers(data, certTemplate) - - addKeyUsages(data, certTemplate) - - addExtKeyUsageOids(data, certTemplate) - - var certBytes []byte - - certTemplate.IssuingCertificateURL = data.params.URLs.IssuingCertificates - certTemplate.CRLDistributionPoints = data.params.URLs.CRLDistributionPoints - certTemplate.OCSPServer = data.signingBundle.URLs.OCSPServers - - if data.params.IsCA { - certTemplate.BasicConstraintsValid = true - certTemplate.IsCA = true - - if data.signingBundle.Certificate.MaxPathLen == 0 && - data.signingBundle.Certificate.MaxPathLenZero { - return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"} - } - - certTemplate.MaxPathLen = data.params.MaxPathLength - if certTemplate.MaxPathLen == 0 { - certTemplate.MaxPathLenZero = true - } - } else if data.params.BasicConstraintsValidForNonCA { - certTemplate.BasicConstraintsValid = true - certTemplate.IsCA = false - } - - if len(data.params.PermittedDNSDomains) > 0 { - certTemplate.PermittedDNSDomains = data.params.PermittedDNSDomains - certTemplate.PermittedDNSDomainsCritical = true - } - - certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, data.csr.PublicKey, data.signingBundle.PrivateKey) - - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} - } - - result.CertificateBytes = certBytes - result.Certificate, err = x509.ParseCertificate(certBytes) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} - } - - result.CAChain = data.signingBundle.GetCAChain() - - return result, nil + return creation, nil } func convertRespToPKCS8(resp *logical.Response) error { @@ -1539,129 +1019,3 @@ func convertRespToPKCS8(resp *logical.Response) error { return nil } - -func handleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) error { - certTemplate := &x509.Certificate{ - DNSNames: in.DNSNames, - IPAddresses: in.IPAddresses, - EmailAddresses: in.EmailAddresses, - URIs: in.URIs, - } - if err := handleOtherSANs(certTemplate, sans); err != nil { - return err - } - if len(certTemplate.ExtraExtensions) > 0 { - for _, v := range certTemplate.ExtraExtensions { - in.ExtraExtensions = append(in.ExtraExtensions, v) - } - } - return nil -} - -func handleOtherSANs(in *x509.Certificate, sans map[string][]string) error { - // If other SANs is empty we return which causes normal Go stdlib parsing - // of the other SAN types - if len(sans) == 0 { - return nil - } - - var rawValues []asn1.RawValue - - // We need to generate an IMPLICIT sequence for compatibility with OpenSSL - // -- it's an open question what the default for RFC 5280 actually is, see - // https://github.com/openssl/openssl/issues/5091 -- so we have to use - // cryptobyte because using the asn1 package's marshaling always produces - // an EXPLICIT sequence. Note that asn1 is way too magical according to - // agl, and cryptobyte is modeled after the CBB/CBS bits that agl put into - // boringssl. - for oid, vals := range sans { - for _, val := range vals { - var b cryptobyte.Builder - oidStr, err := stringToOid(oid) - if err != nil { - return err - } - b.AddASN1ObjectIdentifier(oidStr) - b.AddASN1(cbbasn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { - b.AddASN1(cbbasn1.UTF8String, func(b *cryptobyte.Builder) { - b.AddBytes([]byte(val)) - }) - }) - m, err := b.Bytes() - if err != nil { - return err - } - rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: m}) - } - } - - // If other SANs is empty we return which causes normal Go stdlib parsing - // of the other SAN types - if len(rawValues) == 0 { - return nil - } - - // Append any existing SANs, sans marshalling - rawValues = append(rawValues, marshalSANs(in.DNSNames, in.EmailAddresses, in.IPAddresses, in.URIs)...) - - // Marshal and add to ExtraExtensions - ext := pkix.Extension{ - // This is the defined OID for subjectAltName - Id: asn1.ObjectIdentifier{2, 5, 29, 17}, - } - var err error - ext.Value, err = asn1.Marshal(rawValues) - if err != nil { - return err - } - in.ExtraExtensions = append(in.ExtraExtensions, ext) - - return nil -} - -// Note: Taken from the Go source code since it's not public, and used in the -// modified function below (which also uses these consts upstream) -const ( - nameTypeEmail = 1 - nameTypeDNS = 2 - nameTypeURI = 6 - nameTypeIP = 7 -) - -// Note: Taken from the Go source code since it's not public, plus changed to not marshal -// marshalSANs marshals a list of addresses into a the contents of an X.509 -// SubjectAlternativeName extension. -func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue { - var rawValues []asn1.RawValue - for _, name := range dnsNames { - rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) - } - for _, email := range emailAddresses { - rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) - } - for _, rawIP := range ipAddresses { - // If possible, we always want to encode IPv4 addresses in 4 bytes. - ip := rawIP.To4() - if ip == nil { - ip = rawIP - } - rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) - } - for _, uri := range uris { - rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())}) - } - return rawValues -} - -func stringToOid(in string) (asn1.ObjectIdentifier, error) { - split := strings.Split(in, ".") - ret := make(asn1.ObjectIdentifier, 0, len(split)) - for _, v := range split { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - ret = append(ret, i) - } - return asn1.ObjectIdentifier(ret), nil -} diff --git a/builtin/logical/pki/cert_util_test.go b/builtin/logical/pki/cert_util_test.go index 911d2c76df9a..bd9517b2ce6b 100644 --- a/builtin/logical/pki/cert_util_test.go +++ b/builtin/logical/pki/cert_util_test.go @@ -139,20 +139,20 @@ func TestPki_MultipleOUs(t *testing.T) { "ttl": 3600, }, } - input := &dataBundle{ + input := &inputBundle{ apiData: apiData, role: &roleEntry{ MaxTTL: 3600, OU: []string{"Z", "E", "V"}, }, } - err := generateCreationBundle(&b, input) + cb, err := generateCreationBundle(&b, input, nil, nil) if err != nil { t.Fatalf("Error: %v", err) } expected := []string{"Z", "E", "V"} - actual := input.params.Subject.OrganizationalUnit + actual := cb.Params.Subject.OrganizationalUnit if !reflect.DeepEqual(expected, actual) { t.Fatalf("Expected %v, got %v", expected, actual) diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go index 0f5266a77b2d..c67e39eaac20 100644 --- a/builtin/logical/pki/fields.go +++ b/builtin/logical/pki/fields.go @@ -11,7 +11,9 @@ func addIssueAndSignCommonFields(fields map[string]*framework.FieldSchema) map[s Description: `If true, the Common Name will not be included in DNS or Email Subject Alternate Names. Defaults to false (CN is included).`, - DisplayName: "Exclude Common Name from Subject Alternative Names (SANs)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Exclude Common Name from Subject Alternative Names (SANs)", + }, } fields["format"] = &framework.FieldSchema{ @@ -22,6 +24,9 @@ or "pem_bundle". If "pem_bundle" any private key and issuing cert will be appended to the certificate pem. Defaults to "pem".`, AllowedValues: []interface{}{"pem", "der", "pem_bundle"}, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "pem", + }, } fields["private_key_format"] = &framework.FieldSchema{ @@ -34,27 +39,36 @@ However, this can be set to "pkcs8" to have the returned private key contain base64-encoded pkcs8 or PEM-encoded pkcs8 instead. Defaults to "der".`, AllowedValues: []interface{}{"", "der", "pem", "pkcs8"}, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "der", + }, } fields["ip_sans"] = &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `The requested IP SANs, if any, in a comma-delimited list`, - DisplayName: "IP Subject Alternative Names (SANs)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "IP Subject Alternative Names (SANs)", + }, } fields["uri_sans"] = &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `The requested URI SANs, if any, in a comma-delimited list.`, - DisplayName: "URI Subject Alternative Names (SANs)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "URI Subject Alternative Names (SANs)", + }, } fields["other_sans"] = &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `Requested other SANs, in an array with the format ;UTF8: for each entry.`, - DisplayName: "Other SANs", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Other SANs", + }, } return fields @@ -85,7 +99,9 @@ in the role, this may be an email address.`, in a comma-delimited list. If email protection is enabled for the role, this may contain email addresses.`, - DisplayName: "DNS/Email Subject Alternative Names (SANs)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "DNS/Email Subject Alternative Names (SANs)", + }, } fields["serial_number"] = &framework.FieldSchema{ @@ -102,7 +118,9 @@ sets the expiration date. If not specified the role default, backend default, or system default TTL is used, in that order. Cannot be larger than the role max TTL.`, - DisplayName: "TTL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "TTL", + }, } return fields @@ -118,7 +136,9 @@ func addCACommonFields(fields map[string]*framework.FieldSchema) map[string]*fra Description: `The requested Subject Alternative Names, if any, in a comma-delimited list. May contain both DNS names and email addresses.`, - DisplayName: "DNS/Email Subject Alternative Names (SANs)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "DNS/Email Subject Alternative Names (SANs)", + }, } fields["common_name"] = &framework.FieldSchema{ @@ -140,14 +160,18 @@ be larger than the mount max TTL. Note: this only has an effect when generating a CA cert or signing a CA cert, not when generating a CSR for an intermediate CA.`, - DisplayName: "TTL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "TTL", + }, } fields["ou"] = &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `If set, OU (OrganizationalUnit) will be set to this value.`, - DisplayName: "OU (Organizational Unit)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "OU (Organizational Unit)", + }, } fields["organization"] = &framework.FieldSchema{ @@ -166,28 +190,36 @@ this value.`, Type: framework.TypeCommaStringSlice, Description: `If set, Locality will be set to this value.`, - DisplayName: "Locality/City", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Locality/City", + }, } fields["province"] = &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `If set, Province will be set to this value.`, - DisplayName: "Province/State", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Province/State", + }, } fields["street_address"] = &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `If set, Street Address will be set to this value.`, - DisplayName: "Street Address", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Street Address", + }, } fields["postal_code"] = &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `If set, Postal Code will be set to this value.`, - DisplayName: "Postal Code", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Postal Code", + }, } fields["serial_number"] = &framework.FieldSchema{ @@ -217,6 +249,9 @@ the private key!`, Description: `The number of bits to use. You will almost certainly want to change this if you adjust the key_type.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 2048, + }, } fields["key_type"] = &framework.FieldSchema{ @@ -225,6 +260,9 @@ the key_type.`, Description: `The type of key to use; defaults to RSA. "rsa" and "ec" are the only valid values.`, AllowedValues: []interface{}{"rsa", "ec"}, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "rsa", + }, } return fields } @@ -241,7 +279,9 @@ func addCAIssueFields(fields map[string]*framework.FieldSchema) map[string]*fram fields["permitted_dns_domains"] = &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `Domains for which this certificate is allowed to sign or issue child certificates. If set, all DNS names (subject and alt) on child certs must be exact matches or subsets of the given domains (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`, - DisplayName: "Permitted DNS Domains", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Permitted DNS Domains", + }, } return fields diff --git a/builtin/logical/pki/path_config_urls.go b/builtin/logical/pki/path_config_urls.go index 93dd81b7b3d5..1644f605e109 100644 --- a/builtin/logical/pki/path_config_urls.go +++ b/builtin/logical/pki/path_config_urls.go @@ -7,6 +7,7 @@ import ( "github.com/asaskevich/govalidator" "github.com/fatih/structs" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -53,7 +54,7 @@ func validateURLs(urls []string) string { return "" } -func getURLs(ctx context.Context, req *logical.Request) (*urlEntries, error) { +func getURLs(ctx context.Context, req *logical.Request) (*certutil.URLEntries, error) { entry, err := req.Storage.Get(ctx, "urls") if err != nil { return nil, err @@ -62,7 +63,7 @@ func getURLs(ctx context.Context, req *logical.Request) (*urlEntries, error) { return nil, nil } - var entries urlEntries + var entries certutil.URLEntries if err := entry.DecodeJSON(&entries); err != nil { return nil, err } @@ -70,7 +71,7 @@ func getURLs(ctx context.Context, req *logical.Request) (*urlEntries, error) { return &entries, nil } -func writeURLs(ctx context.Context, req *logical.Request, entries *urlEntries) error { +func writeURLs(ctx context.Context, req *logical.Request, entries *certutil.URLEntries) error { entry, err := logical.StorageEntryJSON("urls", entries) if err != nil { return err @@ -109,7 +110,7 @@ func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data * return nil, err } if entries == nil { - entries = &urlEntries{ + entries = &certutil.URLEntries{ IssuingCertificates: []string{}, CRLDistributionPoints: []string{}, OCSPServers: []string{}, @@ -141,12 +142,6 @@ func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data * return nil, writeURLs(ctx, req, entries) } -type urlEntries struct { - IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"` - CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"` - OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"` -} - const pathConfigURLsHelpSyn = ` Set the URLs for the issuing CA, CRL distribution points, and OCSP servers. ` diff --git a/builtin/logical/pki/path_intermediate.go b/builtin/logical/pki/path_intermediate.go index 0f7b57f56c76..80788b39a5da 100644 --- a/builtin/logical/pki/path_intermediate.go +++ b/builtin/logical/pki/path_intermediate.go @@ -71,7 +71,7 @@ func (b *backend) pathGenerateIntermediate(ctx context.Context, req *logical.Req } var resp *logical.Response - input := &dataBundle{ + input := &inputBundle{ role: role, req: req, apiData: data, diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go index 1ace8071e7c9..8df1a7c50c37 100644 --- a/builtin/logical/pki/path_issue_sign.go +++ b/builtin/logical/pki/path_issue_sign.go @@ -205,18 +205,17 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d "error fetching CA certificate: %s", caErr)} } - input := &dataBundle{ - req: req, - apiData: data, - role: role, - signingBundle: signingBundle, + input := &inputBundle{ + req: req, + apiData: data, + role: role, } var parsedBundle *certutil.ParsedCertBundle var err error if useCSR { - parsedBundle, err = signCert(b, input, false, useCSRValues) + parsedBundle, err = signCert(b, input, signingBundle, false, useCSRValues) } else { - parsedBundle, err = generateCert(ctx, b, input, false) + parsedBundle, err = generateCert(ctx, b, input, signingBundle, false) } if err != nil { switch err.(type) { diff --git a/builtin/logical/pki/path_roles.go b/builtin/logical/pki/path_roles.go index 4885b3f31222..f5f1f5fdcc65 100644 --- a/builtin/logical/pki/path_roles.go +++ b/builtin/logical/pki/path_roles.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/parseutil" "github.com/hashicorp/vault/sdk/logical" @@ -47,13 +48,17 @@ func pathRoles(b *backend) *framework.Path { requested. The lease duration controls the expiration of certificates issued by this backend. Defaults to the value of max_ttl.`, - DisplayName: "TTL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "TTL", + }, }, "max_ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, Description: "The maximum allowed lease duration", - DisplayName: "Max TTL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Max TTL", + }, }, "allow_localhost": &framework.FieldSchema{ @@ -61,6 +66,9 @@ the value of max_ttl.`, Default: true, Description: `Whether to allow "localhost" as a valid common name in a request`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: true, + }, }, "allowed_domains": &framework.FieldSchema{ @@ -107,6 +115,9 @@ information.`, Default: true, Description: `If set, only valid host names are allowed for CN and SANs. Defaults to true.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: true, + }, }, "allow_ip_sans": &framework.FieldSchema{ @@ -114,20 +125,27 @@ CN and SANs. Defaults to true.`, Default: true, Description: `If set, IP Subject Alternative Names are allowed. Any valid IP is accepted.`, - DisplayName: "Allow IP Subject Alternative Names", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allow IP Subject Alternative Names", + Value: true, + }, }, "allowed_uri_sans": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `If set, an array of allowed URIs to put in the URI Subject Alternative Names. Any valid URI is accepted, these values support globbing.`, - DisplayName: "Allowed URI Subject Alternative Names", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allowed URI Subject Alternative Names", + }, }, "allowed_other_sans": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format ;:. Currently only "utf8" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single "*" which allows any OID and any value (but type must still be utf8).`, - DisplayName: "Allowed Other Subject Alternative Names", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allowed Other Subject Alternative Names", + }, }, "allowed_serial_numbers": &framework.FieldSchema{ @@ -140,6 +158,9 @@ Any valid URI is accepted, these values support globbing.`, Default: true, Description: `If set, certificates are flagged for server auth use. Defaults to true.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: true, + }, }, "client_flag": &framework.FieldSchema{ @@ -147,6 +168,9 @@ Defaults to true.`, Default: true, Description: `If set, certificates are flagged for client auth use. Defaults to true.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: true, + }, }, "code_signing_flag": &framework.FieldSchema{ @@ -186,7 +210,9 @@ https://golang.org/pkg/crypto/x509/#KeyUsage -- simply drop the "KeyUsage" part of the name. To remove all key usages from being set, set this value to an empty list.`, - DisplayValue: "DigitalSignature,KeyAgreement,KeyEncipherment", + DisplayAttrs: &framework.DisplayAttributes{ + Value: "DigitalSignature,KeyAgreement,KeyEncipherment", + }, }, "ext_key_usage": &framework.FieldSchema{ @@ -197,13 +223,17 @@ https://golang.org/pkg/crypto/x509/#ExtKeyUsage -- simply drop the "ExtKeyUsage" part of the name. To remove all key usages from being set, set this value to an empty list.`, - DisplayName: "Extended Key Usage", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Extended Key Usage", + }, }, "ext_key_usage_oids": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `A comma-separated string or list of extended key usage oids.`, - DisplayName: "Extended Key Usage OIDs", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Extended Key Usage OIDs", + }, }, "use_csr_common_name": &framework.FieldSchema{ @@ -213,7 +243,10 @@ this value to an empty list.`, the common name in the CSR will be used. This does *not* include any requested Subject Alternative Names. Defaults to true.`, - DisplayName: "Use CSR Common Name", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Use CSR Common Name", + Value: true, + }, }, "use_csr_sans": &framework.FieldSchema{ @@ -222,14 +255,19 @@ Names. Defaults to true.`, Description: `If set, when used with a signing profile, the SANs in the CSR will be used. This does *not* include the Common Name (cn). Defaults to true.`, - DisplayName: "Use CSR Subject Alternative Names", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Use CSR Subject Alternative Names", + Value: true, + }, }, "ou": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `If set, OU (OrganizationalUnit) will be set to this value in certificates issued by this role.`, - DisplayName: "Organizational Unit", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Organizational Unit", + }, }, "organization": &framework.FieldSchema{ @@ -248,14 +286,18 @@ this value in certificates issued by this role.`, Type: framework.TypeCommaStringSlice, Description: `If set, Locality will be set to this value in certificates issued by this role.`, - DisplayName: "Locality/City", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Locality/City", + }, }, "province": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `If set, Province will be set to this value in certificates issued by this role.`, - DisplayName: "Province/State", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Province/State", + }, }, "street_address": &framework.FieldSchema{ @@ -298,7 +340,9 @@ for "generate_lease".`, Type: framework.TypeBool, Default: true, Description: `If set to false, makes the 'common_name' field optional while generating a certificate.`, - DisplayName: "Use CSR Common Name", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Require Common Name", + }, }, "policy_identifiers": &framework.FieldSchema{ @@ -309,12 +353,17 @@ for "generate_lease".`, "basic_constraints_valid_for_non_ca": &framework.FieldSchema{ Type: framework.TypeBool, Description: `Mark Basic Constraints valid when issuing non-CA certificates.`, - DisplayName: "Basic Constraints Valid for Non-CA", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Basic Constraints Valid for Non-CA", + }, }, "not_before_duration": &framework.FieldSchema{ Type: framework.TypeDurationSecond, Default: 30, Description: `The duration before now the cert needs to be created / signed.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 30, + }, }, }, @@ -552,13 +601,13 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data ), nil } - if errResp := validateKeyTypeLength(entry.KeyType, entry.KeyBits); errResp != nil { - return errResp, nil + if err := certutil.ValidateKeyTypeLength(entry.KeyType, entry.KeyBits); err != nil { + return logical.ErrorResponse(err.Error()), nil } if len(entry.ExtKeyUsageOIDs) > 0 { for _, oidstr := range entry.ExtKeyUsageOIDs { - _, err := stringToOid(oidstr) + _, err := certutil.StringToOid(oidstr) if err != nil { return logical.ErrorResponse(fmt.Sprintf("%q could not be parsed as a valid oid for an extended key usage", oidstr)), nil } @@ -567,7 +616,7 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data if len(entry.PolicyIdentifiers) > 0 { for _, oidstr := range entry.PolicyIdentifiers { - _, err := stringToOid(oidstr) + _, err := certutil.StringToOid(oidstr) if err != nil { return logical.ErrorResponse(fmt.Sprintf("%q could not be parsed as a valid oid for a policy identifier", oidstr)), nil } @@ -614,51 +663,51 @@ func parseKeyUsages(input []string) int { return int(parsedKeyUsages) } -func parseExtKeyUsages(role *roleEntry) certExtKeyUsage { - var parsedKeyUsages certExtKeyUsage +func parseExtKeyUsages(role *roleEntry) certutil.CertExtKeyUsage { + var parsedKeyUsages certutil.CertExtKeyUsage if role.ServerFlag { - parsedKeyUsages |= serverAuthExtKeyUsage + parsedKeyUsages |= certutil.ServerAuthExtKeyUsage } if role.ClientFlag { - parsedKeyUsages |= clientAuthExtKeyUsage + parsedKeyUsages |= certutil.ClientAuthExtKeyUsage } if role.CodeSigningFlag { - parsedKeyUsages |= codeSigningExtKeyUsage + parsedKeyUsages |= certutil.CodeSigningExtKeyUsage } if role.EmailProtectionFlag { - parsedKeyUsages |= emailProtectionExtKeyUsage + parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage } for _, k := range role.ExtKeyUsage { switch strings.ToLower(strings.TrimSpace(k)) { case "any": - parsedKeyUsages |= anyExtKeyUsage + parsedKeyUsages |= certutil.AnyExtKeyUsage case "serverauth": - parsedKeyUsages |= serverAuthExtKeyUsage + parsedKeyUsages |= certutil.ServerAuthExtKeyUsage case "clientauth": - parsedKeyUsages |= clientAuthExtKeyUsage + parsedKeyUsages |= certutil.ClientAuthExtKeyUsage case "codesigning": - parsedKeyUsages |= codeSigningExtKeyUsage + parsedKeyUsages |= certutil.CodeSigningExtKeyUsage case "emailprotection": - parsedKeyUsages |= emailProtectionExtKeyUsage + parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage case "ipsecendsystem": - parsedKeyUsages |= ipsecEndSystemExtKeyUsage + parsedKeyUsages |= certutil.IpsecEndSystemExtKeyUsage case "ipsectunnel": - parsedKeyUsages |= ipsecTunnelExtKeyUsage + parsedKeyUsages |= certutil.IpsecTunnelExtKeyUsage case "ipsecuser": - parsedKeyUsages |= ipsecUserExtKeyUsage + parsedKeyUsages |= certutil.IpsecUserExtKeyUsage case "timestamping": - parsedKeyUsages |= timeStampingExtKeyUsage + parsedKeyUsages |= certutil.TimeStampingExtKeyUsage case "ocspsigning": - parsedKeyUsages |= ocspSigningExtKeyUsage + parsedKeyUsages |= certutil.OcspSigningExtKeyUsage case "microsoftservergatedcrypto": - parsedKeyUsages |= microsoftServerGatedCryptoExtKeyUsage + parsedKeyUsages |= certutil.MicrosoftServerGatedCryptoExtKeyUsage case "netscapeservergatedcrypto": - parsedKeyUsages |= netscapeServerGatedCryptoExtKeyUsage + parsedKeyUsages |= certutil.NetscapeServerGatedCryptoExtKeyUsage } } diff --git a/builtin/logical/pki/path_root.go b/builtin/logical/pki/path_root.go index 2c3d7d7251c4..927aea2f53f1 100644 --- a/builtin/logical/pki/path_root.go +++ b/builtin/logical/pki/path_root.go @@ -7,6 +7,7 @@ import ( "encoding/base64" "encoding/pem" "fmt" + "github.com/hashicorp/vault/sdk/helper/certutil" "reflect" "strings" "time" @@ -139,12 +140,12 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, role.MaxPathLength = &maxPathLength } - input := &dataBundle{ + input := &inputBundle{ req: req, apiData: data, role: role, } - parsedBundle, err := generateCert(ctx, b, input, true) + parsedBundle, err := generateCert(ctx, b, input, nil, true) if err != nil { switch err.(type) { case errutil.UserError: @@ -296,13 +297,12 @@ func (b *backend) pathCASignIntermediate(ctx context.Context, req *logical.Reque role.MaxPathLength = &maxPathLength } - input := &dataBundle{ - req: req, - apiData: data, - signingBundle: signingBundle, - role: role, + input := &inputBundle{ + req: req, + apiData: data, + role: role, } - parsedBundle, err := signCert(b, input, true, useCSRValues) + parsedBundle, err := signCert(b, input, signingBundle, true, useCSRValues) if err != nil { switch err.(type) { case errutil.UserError: @@ -420,7 +420,7 @@ func (b *backend) pathCASignSelfIssued(ctx context.Context, req *logical.Request return nil, errwrap.Wrapf("error converting raw signing bundle to cert bundle: {{err}}", err) } - urls := &urlEntries{} + urls := &certutil.URLEntries{} if signingBundle.URLs != nil { urls = signingBundle.URLs } diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index 5bd9e58d7dab..450d76844779 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -93,7 +93,9 @@ func pathRoles(b *backend) *framework.Path { credential is being generated for other users, Vault uses this admin username to login to remote host and install the generated credential for the other user.`, - DisplayName: "Admin Username", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Admin Username", + }, }, "default_user": &framework.FieldSchema{ Type: framework.TypeString, @@ -102,7 +104,9 @@ func pathRoles(b *backend) *framework.Path { Default username for which a credential will be generated. When the endpoint 'creds/' is used without a username, this value will be used as default username.`, - DisplayName: "Default Username", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Default Username", + }, }, "cidr_list": &framework.FieldSchema{ Type: framework.TypeString, @@ -110,7 +114,9 @@ func pathRoles(b *backend) *framework.Path { [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] Comma separated list of CIDR blocks for which the role is applicable for. CIDR blocks can belong to more than one role.`, - DisplayName: "CIDR List", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "CIDR List", + }, }, "exclude_cidr_list": &framework.FieldSchema{ Type: framework.TypeString, @@ -119,7 +125,9 @@ func pathRoles(b *backend) *framework.Path { Comma separated list of CIDR blocks. IP addresses belonging to these blocks are not accepted by the role. This is particularly useful when big CIDR blocks are being used by the role and certain parts of it needs to be kept out.`, - DisplayName: "Exclude CIDR List", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Exclude CIDR List", + }, }, "port": &framework.FieldSchema{ Type: framework.TypeInt, @@ -129,7 +137,9 @@ func pathRoles(b *backend) *framework.Path { play any role in creation of OTP. For 'otp' type, this is just a way to inform client about the port number to use. Port number will be returned to client by Vault server along with OTP.`, - DisplayValue: 22, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 22, + }, }, "key_type": &framework.FieldSchema{ Type: framework.TypeString, @@ -138,7 +148,9 @@ func pathRoles(b *backend) *framework.Path { Type of key used to login to hosts. It can be either 'otp', 'dynamic' or 'ca'. 'otp' type requires agent to be installed in remote hosts.`, AllowedValues: []interface{}{"otp", "dynamic", "ca"}, - DisplayValue: "ca", + DisplayAttrs: &framework.DisplayAttributes{ + Value: "ca", + }, }, "key_bits": &framework.FieldSchema{ Type: framework.TypeInt, @@ -195,7 +207,9 @@ func pathRoles(b *backend) *framework.Path { requested. The lease duration controls the expiration of certificates issued by this backend. Defaults to the value of max_ttl.`, - DisplayName: "TTL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "TTL", + }, }, "max_ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, @@ -203,7 +217,9 @@ func pathRoles(b *backend) *framework.Path { [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] The maximum allowed lease duration `, - DisplayName: "Max TTL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Max TTL", + }, }, "allowed_critical_options": &framework.FieldSchema{ Type: framework.TypeString, @@ -281,7 +297,9 @@ func pathRoles(b *backend) *framework.Path { When false, the key ID will always be the token display name. The key ID is logged by the SSH server and can be useful for auditing. `, - DisplayName: "Allow User Key IDs", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allow User Key IDs", + }, }, "key_id_format": &framework.FieldSchema{ Type: framework.TypeString, @@ -292,7 +310,9 @@ func pathRoles(b *backend) *framework.Path { the token used to make the request. '{{role_name}}' - The name of the role signing the request. '{{public_key_hash}}' - A SHA256 checksum of the public key that is being signed. `, - DisplayName: "Key ID Format", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Key ID Format", + }, }, "allowed_user_key_lengths": &framework.FieldSchema{ Type: framework.TypeMap, diff --git a/builtin/logical/transit/backend.go b/builtin/logical/transit/backend.go index 58fb5910d1d7..9d8d43b38f32 100644 --- a/builtin/logical/transit/backend.go +++ b/builtin/logical/transit/backend.go @@ -4,20 +4,25 @@ import ( "context" "strings" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/keysutil" "github.com/hashicorp/vault/sdk/logical" ) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend(conf) + + b, err := Backend(ctx, conf) + if err != nil { + return nil, err + } if err := b.Setup(ctx, conf); err != nil { return nil, err } return b, nil } -func Backend(conf *logical.BackendConfig) *backend { +func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) { var b backend b.Backend = &framework.Backend{ PathsSpecial: &logical.Paths{ @@ -47,6 +52,7 @@ func Backend(conf *logical.BackendConfig) *backend { b.pathBackup(), b.pathRestore(), b.pathTrim(), + b.pathCacheConfig(), }, Secrets: []*framework.Secret{}, @@ -54,9 +60,24 @@ func Backend(conf *logical.BackendConfig) *backend { BackendType: logical.TypeLogical, } - b.lm = keysutil.NewLockManager(conf.System.CachingDisabled()) + // determine cacheSize to use. Defaults to 0 which means unlimited + cacheSize := 0 + useCache := !conf.System.CachingDisabled() + if useCache { + var err error + cacheSize, err = GetCacheSizeFromStorage(ctx, conf.StorageView) + if err != nil { + return nil, errwrap.Wrapf("Error retrieving cache size from storage: {{err}}", err) + } + } - return &b + var err error + b.lm, err = keysutil.NewLockManager(useCache, cacheSize) + if err != nil { + return nil, err + } + + return &b, nil } type backend struct { @@ -64,6 +85,22 @@ type backend struct { lm *keysutil.LockManager } +func GetCacheSizeFromStorage(ctx context.Context, s logical.Storage) (int, error) { + size := 0 + entry, err := s.Get(ctx, "config/cache") + if err != nil { + return 0, err + } + if entry != nil { + var storedCache configCache + if err := entry.DecodeJSON(&storedCache); err != nil { + return 0, err + } + size = storedCache.Size + } + return size, nil +} + func (b *backend) invalidate(_ context.Context, key string) { if b.Logger().IsDebug() { b.Logger().Debug("invalidating key", "key", key) diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index 800fbf4de0b7..db103782e0d5 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -30,7 +30,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend(config) + b, _ := Backend(context.Background(), config) if b == nil { t.Fatalf("failed to create backend") } @@ -50,7 +50,7 @@ func createBackendWithSysView(t *testing.T) (*backend, logical.Storage) { System: sysView, } - b := Backend(conf) + b, _ := Backend(context.Background(), conf) if b == nil { t.Fatal("failed to create backend") } @@ -63,6 +63,49 @@ func createBackendWithSysView(t *testing.T) (*backend, logical.Storage) { return b, storage } +func createBackendWithSysViewWithStorage(t *testing.T, s logical.Storage) *backend { + sysView := logical.TestSystemView() + + conf := &logical.BackendConfig{ + StorageView: s, + System: sysView, + } + + b, _ := Backend(context.Background(), conf) + if b == nil { + t.Fatal("failed to create backend") + } + + err := b.Backend.Setup(context.Background(), conf) + if err != nil { + t.Fatal(err) + } + + return b +} + +func createBackendWithForceNoCacheWithSysViewWithStorage(t *testing.T, s logical.Storage) *backend { + sysView := logical.TestSystemView() + sysView.CachingDisabledVal = true + + conf := &logical.BackendConfig{ + StorageView: s, + System: sysView, + } + + b, _ := Backend(context.Background(), conf) + if b == nil { + t.Fatal("failed to create backend") + } + + err := b.Backend.Setup(context.Background(), conf) + if err != nil { + t.Fatal(err) + } + + return b +} + func TestTransit_RSA(t *testing.T) { testTransit_RSA(t, "rsa-2048") testTransit_RSA(t, "rsa-4096") @@ -1294,16 +1337,17 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT func TestPolicyFuzzing(t *testing.T) { var be *backend sysView := logical.TestSystemView() + sysView.CachingDisabledVal = true conf := &logical.BackendConfig{ System: sysView, } - be = Backend(conf) + be, _ = Backend(context.Background(), conf) be.Setup(context.Background(), conf) testPolicyFuzzingCommon(t, be) sysView.CachingDisabledVal = true - be = Backend(conf) + be, _ = Backend(context.Background(), conf) be.Setup(context.Background(), conf) testPolicyFuzzingCommon(t, be) } diff --git a/builtin/logical/transit/path_cache_config.go b/builtin/logical/transit/path_cache_config.go new file mode 100644 index 000000000000..3351249691d2 --- /dev/null +++ b/builtin/logical/transit/path_cache_config.go @@ -0,0 +1,106 @@ +package transit + +import ( + "context" + "errors" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathCacheConfig() *framework.Path { + return &framework.Path{ + Pattern: "cache-config", + Fields: map[string]*framework.FieldSchema{ + "size": &framework.FieldSchema{ + Type: framework.TypeInt, + Required: false, + Default: 0, + Description: `Size of cache, use 0 for an unlimited cache size, defaults to 0`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathCacheConfigRead, + Summary: "Returns the size of the active cache", + }, + + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCacheConfigWrite, + Summary: "Configures a new cache of the specified size", + }, + + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathCacheConfigWrite, + Summary: "Configures a new cache of the specified size", + }, + }, + + HelpSynopsis: pathCacheConfigHelpSyn, + HelpDescription: pathCacheConfigHelpDesc, + } +} + +func (b *backend) pathCacheConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // get target size + cacheSize := d.Get("size").(int) + if cacheSize < 0 { + return logical.ErrorResponse("size must be greater or equal to 0"), logical.ErrInvalidRequest + } + + // store cache size + entry, err := logical.StorageEntryJSON("config/cache", &configCache{ + Size: cacheSize, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + resp := &logical.Response{ + Warnings: []string{"cache configurations will be applied when this backend is restarted"}, + } + + return resp, nil +} + +type configCache struct { + Size int `json:"size"` +} + +func (b *backend) pathCacheConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // error if no cache is configured + if !b.lm.GetUseCache() { + return nil, errors.New( + "caching is disabled for this transit mount", + ) + } + + // Compare current and stored cache sizes. If they are different warn the user. + currentCacheSize := b.lm.GetCacheSize() + storedCacheSize, err := GetCacheSizeFromStorage(ctx, req.Storage) + if err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "size": storedCacheSize, + }, + } + + if currentCacheSize != storedCacheSize { + resp.Warnings = []string{"This cache size will not be applied until the transit mount is reloaded"} + } + + return resp, nil +} + +const pathCacheConfigHelpSyn = `Configure caching strategy` + +const pathCacheConfigHelpDesc = ` +This path is used to configure and query the cache size of the active cache, a size of 0 means unlimited. +` diff --git a/builtin/logical/transit/path_cache_config_test.go b/builtin/logical/transit/path_cache_config_test.go new file mode 100644 index 000000000000..6cca1b265676 --- /dev/null +++ b/builtin/logical/transit/path_cache_config_test.go @@ -0,0 +1,80 @@ +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +const targetCacheSize = 12345 + +func TestTransit_CacheConfig(t *testing.T) { + b1, storage := createBackendWithSysView(t) + + doReq := func(b *backend, req *logical.Request) *logical.Response { + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req) + } + return resp + } + + doErrReq := func(b *backend, req *logical.Request) { + resp, err := b.HandleRequest(context.Background(), req) + if err == nil { + if resp == nil || !resp.IsError() { + t.Fatalf("expected error; req:\n%#v\n", *req) + } + } + } + + validateResponse := func(resp *logical.Response, expectedCacheSize int, expectedWarning bool) { + actualCacheSize, ok := resp.Data["size"].(int) + if !ok { + t.Fatalf("No size returned") + } + if expectedCacheSize != actualCacheSize { + t.Fatalf("testAccReadCacheConfig expected: %d got: %d", expectedCacheSize, actualCacheSize) + } + // check for the presence/absence of warnings - warnings are expected if a cache size has been + // configured but not yet applied by reloading the plugin + warningCheckPass := expectedWarning == (len(resp.Warnings) > 0) + if !warningCheckPass { + t.Fatalf( + "testAccSteporeadCacheConfig warnings error.\n"+ + "expect warnings: %t but number of warnings was: %d", + expectedWarning, len(resp.Warnings), + ) + } + } + + writeReq := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "cache-config", + Data: map[string]interface{}{ + "size": targetCacheSize, + }, + } + + readReq := &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "cache-config", + } + + // test steps + // b1 should spin up with an unlimited cache + validateResponse(doReq(b1, readReq), 0, false) + doReq(b1, writeReq) + validateResponse(doReq(b1, readReq), targetCacheSize, true) + + // b2 should spin up with a configured cache + b2 := createBackendWithSysViewWithStorage(t, storage) + validateResponse(doReq(b2, readReq), targetCacheSize, false) + + // b3 enables transit without a cache, trying to read it should error + b3 := createBackendWithForceNoCacheWithSysViewWithStorage(t, storage) + doErrReq(b3, readReq) +} diff --git a/builtin/logical/transit/path_random_test.go b/builtin/logical/transit/path_random_test.go index 260b22f573e6..0b59d76c6511 100644 --- a/builtin/logical/transit/path_random_test.go +++ b/builtin/logical/transit/path_random_test.go @@ -14,8 +14,9 @@ func TestTransit_Random(t *testing.T) { var b *backend sysView := logical.TestSystemView() storage := &logical.InmemStorage{} + sysView.CachingDisabledVal = true - b = Backend(&logical.BackendConfig{ + b, _ = Backend(context.Background(), &logical.BackendConfig{ StorageView: storage, System: sysView, }) diff --git a/command/agent.go b/command/agent.go index 2884004441d0..227f263a2b24 100644 --- a/command/agent.go +++ b/command/agent.go @@ -8,6 +8,7 @@ import ( "net" "net/http" "os" + "path" "sort" "strings" "sync" @@ -25,6 +26,7 @@ import ( "github.com/hashicorp/vault/command/agent/auth/gcp" "github.com/hashicorp/vault/command/agent/auth/jwt" "github.com/hashicorp/vault/command/agent/auth/kubernetes" + "github.com/hashicorp/vault/command/agent/auth/pcf" "github.com/hashicorp/vault/command/agent/cache" "github.com/hashicorp/vault/command/agent/config" "github.com/hashicorp/vault/command/agent/sink" @@ -320,9 +322,15 @@ func (c *AgentCommand) Run(args []string) int { } } + // Check if a default namespace has been set + mountPath := config.AutoAuth.Method.MountPath + if config.AutoAuth.Method.Namespace != "" { + mountPath = path.Join(config.AutoAuth.Method.Namespace, mountPath) + } + authConfig := &auth.AuthConfig{ Logger: c.logger.Named(fmt.Sprintf("auth.%s", config.AutoAuth.Method.Type)), - MountPath: config.AutoAuth.Method.MountPath, + MountPath: mountPath, Config: config.AutoAuth.Method.Config, } switch config.AutoAuth.Method.Type { @@ -342,6 +350,8 @@ func (c *AgentCommand) Run(args []string) int { method, err = kubernetes.NewKubernetesAuthMethod(authConfig) case "approle": method, err = approle.NewApproleAuthMethod(authConfig) + case "pcf": + method, err = pcf.NewPCFAuthMethod(authConfig) default: c.UI.Error(fmt.Sprintf("Unknown auth method %q", config.AutoAuth.Method.Type)) return 1 diff --git a/command/agent/auth/aws/aws.go b/command/agent/auth/aws/aws.go index fdac099e99eb..d3ea6007202d 100644 --- a/command/agent/auth/aws/aws.go +++ b/command/agent/auth/aws/aws.go @@ -134,6 +134,14 @@ func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { } } + nonceRaw, ok := conf.Config["nonce"] + if ok { + a.nonce, ok = nonceRaw.(string) + if !ok { + return nil, errors.New("could not convert 'nonce' value into string") + } + } + if a.authType == typeIAM { // Check for an optional custom frequency at which we should poll for creds. diff --git a/command/agent/auth/pcf/pcf.go b/command/agent/auth/pcf/pcf.go new file mode 100644 index 000000000000..3aa74762189d --- /dev/null +++ b/command/agent/auth/pcf/pcf.go @@ -0,0 +1,82 @@ +package pcf + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "time" + + pcf "github.com/hashicorp/vault-plugin-auth-pcf" + "github.com/hashicorp/vault-plugin-auth-pcf/signatures" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agent/auth" +) + +type pcfMethod struct { + mountPath string + roleName string +} + +func NewPCFAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + a := &pcfMethod{ + mountPath: conf.MountPath, + } + if raw, ok := conf.Config["role"]; ok { + if roleName, ok := raw.(string); ok { + a.roleName = roleName + } else { + return nil, errors.New("could not convert 'role' config value to string") + } + } else { + return nil, errors.New("missing 'role' value") + } + return a, nil +} + +func (p *pcfMethod) Authenticate(ctx context.Context, client *api.Client) (string, map[string]interface{}, error) { + pathToClientCert := os.Getenv(pcf.EnvVarInstanceCertificate) + if pathToClientCert == "" { + return "", nil, fmt.Errorf("missing %q value", pcf.EnvVarInstanceCertificate) + } + certBytes, err := ioutil.ReadFile(pathToClientCert) + if err != nil { + return "", nil, err + } + pathToClientKey := os.Getenv(pcf.EnvVarInstanceKey) + if pathToClientKey == "" { + return "", nil, fmt.Errorf("missing %q value", pcf.EnvVarInstanceKey) + } + signingTime := time.Now().UTC() + signatureData := &signatures.SignatureData{ + SigningTime: signingTime, + Role: p.roleName, + CFInstanceCertContents: string(certBytes), + } + signature, err := signatures.Sign(pathToClientKey, signatureData) + if err != nil { + return "", nil, err + } + data := map[string]interface{}{ + "role": p.roleName, + "cf_instance_cert": string(certBytes), + "signing_time": signingTime.Format(signatures.TimeFormat), + "signature": signature, + } + return fmt.Sprintf("%s/login", p.mountPath), data, nil +} + +func (p *pcfMethod) NewCreds() chan struct{} { + return nil +} + +func (p *pcfMethod) CredSuccess() {} + +func (p *pcfMethod) Shutdown() {} diff --git a/command/agent/cache/api_proxy.go b/command/agent/cache/api_proxy.go index 9cc3bef63d2c..f0a919121a0a 100644 --- a/command/agent/cache/api_proxy.go +++ b/command/agent/cache/api_proxy.go @@ -41,8 +41,13 @@ func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, fwReq := client.NewRequest(req.Request.Method, req.Request.URL.Path) fwReq.BodyBytes = req.RequestBody + query := req.Request.URL.Query() + if len(query) != 0 { + fwReq.Params = query + } + // Make the request to Vault and get the response - ap.logger.Info("forwarding request", "path", req.Request.URL.Path, "method", req.Request.Method) + ap.logger.Info("forwarding request", "method", req.Request.Method, "path", req.Request.URL.Path) resp, err := client.RawRequestWithContext(ctx, fwReq) if resp == nil && err != nil { diff --git a/command/agent/cache/api_proxy_test.go b/command/agent/cache/api_proxy_test.go index e8ac4637cf3e..b90f579c3f00 100644 --- a/command/agent/cache/api_proxy_test.go +++ b/command/agent/cache/api_proxy_test.go @@ -1,9 +1,10 @@ package cache import ( + "net/http" "testing" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -42,6 +43,53 @@ func TestAPIProxy(t *testing.T) { } if !result.Initialized || result.Sealed || result.Standby { - t.Fatalf("bad sys/health response") + t.Fatalf("bad sys/health response: %#v", result) + } +} + +func TestAPIProxy_queryParams(t *testing.T) { + // Set up an agent that points to a standby node for this particular test + // since it needs to proxy a /sys/health?standbyok=true request to a standby + cleanup, client, _, _ := setupClusterAndAgentOnStandby(namespace.RootContext(nil), t, nil) + defer cleanup() + + proxier, err := NewAPIProxy(&APIProxyConfig{ + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + }) + if err != nil { + t.Fatal(err) + } + + r := client.NewRequest("GET", "/v1/sys/health") + req, err := r.ToHTTP() + if err != nil { + t.Fatal(err) + } + + // Add a query parameter for testing + q := req.URL.Query() + q.Add("standbyok", "true") + req.URL.RawQuery = q.Encode() + + resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{ + Request: req, + }) + if err != nil { + t.Fatal(err) + } + + var result api.HealthResponse + err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result) + if err != nil { + t.Fatal(err) + } + + if !result.Initialized || result.Sealed || !result.Standby { + t.Fatalf("bad sys/health response: %#v", result) + } + + if resp.Response.StatusCode != http.StatusOK { + t.Fatalf("exptected standby to return 200, got: %v", resp.Response.StatusCode) } } diff --git a/command/agent/cache/cache_test.go b/command/agent/cache/cache_test.go index c4b15693072d..b9a4d8f7cf0e 100644 --- a/command/agent/cache/cache_test.go +++ b/command/agent/cache/cache_test.go @@ -34,10 +34,22 @@ path "*" { ` // setupClusterAndAgent is a helper func used to set up a test cluster and -// caching agent. It returns a cleanup func that should be deferred immediately -// along with two clients, one for direct cluster communication and another to -// talk to the caching agent. +// caching agent against the active node. It returns a cleanup func that should +// be deferred immediately along with two clients, one for direct cluster +// communication and another to talk to the caching agent. func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { + return setupClusterAndAgentCommon(ctx, t, coreConfig, false) +} + +// setupClusterAndAgentOnStandby is a helper func used to set up a test cluster +// and caching agent against a standby node. It returns a cleanup func that +// should be deferred immediately along with two clients, one for direct cluster +// communication and another to talk to the caching agent. +func setupClusterAndAgentOnStandby(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { + return setupClusterAndAgentCommon(ctx, t, coreConfig, true) +} + +func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig, onStandby bool) (func(), *api.Client, *api.Client, *LeaseCache) { t.Helper() if ctx == nil { @@ -70,21 +82,30 @@ func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.C cores := cluster.Cores vault.TestWaitActive(t, cores[0].Core) - // clusterClient is the client that is used to talk directly to the cluster. - clusterClient := cores[0].Client + activeClient := cores[0].Client + standbyClient := cores[1].Client + + // clienToUse is the client for the agent to point to. + clienToUse := activeClient + if onStandby { + clienToUse = standbyClient + } // Add an admin policy - if err := clusterClient.Sys().PutPolicy("admin", policyAdmin); err != nil { + if err := activeClient.Sys().PutPolicy("admin", policyAdmin); err != nil { t.Fatal(err) } // Set up the userpass auth backend and an admin user. Used for getting a token // for the agent later down in this func. - clusterClient.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ + err := activeClient.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ Type: "userpass", }) + if err != nil { + t.Fatal(err) + } - _, err := clusterClient.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ + _, err = activeClient.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ "password": "bar", "policies": []string{"admin"}, }) @@ -94,7 +115,7 @@ func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.C // Set up env vars for agent consumption origEnvVaultAddress := os.Getenv(api.EnvVaultAddress) - os.Setenv(api.EnvVaultAddress, clusterClient.Address()) + os.Setenv(api.EnvVaultAddress, clienToUse.Address()) origEnvVaultCACert := os.Getenv(api.EnvVaultCACert) os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir)) @@ -108,7 +129,7 @@ func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.C // Create the API proxier apiProxy, err := NewAPIProxy(&APIProxyConfig{ - Client: clusterClient, + Client: clienToUse, Logger: cacheLogger.Named("apiproxy"), }) if err != nil { @@ -118,7 +139,7 @@ func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.C // Create the lease cache proxier and set its underlying proxier to // the API proxier. leaseCache, err := NewLeaseCache(&LeaseCacheConfig{ - Client: clusterClient, + Client: clienToUse, BaseContext: ctx, Proxier: apiProxy, Logger: cacheLogger.Named("leasecache"), @@ -142,7 +163,7 @@ func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.C go server.Serve(listener) // testClient is the client that is used to talk to the agent for proxying/caching behavior. - testClient, err := clusterClient.Clone() + testClient, err := activeClient.Clone() if err != nil { t.Fatal(err) } @@ -171,7 +192,7 @@ func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.C listener.Close() } - return cleanup, clusterClient, testClient, leaseCache + return cleanup, clienToUse, testClient, leaseCache } func tokenRevocationValidation(t *testing.T, sampleSpace map[string]string, expected map[string]string, leaseCache *LeaseCache) { diff --git a/command/agent/cache/cachememdb/cache_memdb.go b/command/agent/cache/cachememdb/cache_memdb.go index ad1321c4e179..a28ad9a0c286 100644 --- a/command/agent/cache/cachememdb/cache_memdb.go +++ b/command/agent/cache/cachememdb/cache_memdb.go @@ -3,6 +3,7 @@ package cachememdb import ( "errors" "fmt" + "sync/atomic" memdb "github.com/hashicorp/go-memdb" ) @@ -13,7 +14,7 @@ const ( // CacheMemDB is the underlying cache database for storing indexes. type CacheMemDB struct { - db *memdb.MemDB + db *atomic.Value } // New creates a new instance of CacheMemDB. @@ -23,9 +24,12 @@ func New() (*CacheMemDB, error) { return nil, err } - return &CacheMemDB{ - db: db, - }, nil + c := &CacheMemDB{ + db: new(atomic.Value), + } + c.db.Store(db) + + return c, nil } func newDB() (*memdb.MemDB, error) { @@ -129,7 +133,9 @@ func (c *CacheMemDB) Get(indexName string, indexValues ...interface{}) (*Index, return nil, fmt.Errorf("invalid index name %q", indexName) } - raw, err := c.db.Txn(false).First(tableNameIndexer, indexName, indexValues...) + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + raw, err := txn.First(tableNameIndexer, indexName, indexValues...) if err != nil { return nil, err } @@ -152,7 +158,7 @@ func (c *CacheMemDB) Set(index *Index) error { return errors.New("nil index provided") } - txn := c.db.Txn(true) + txn := c.db.Load().(*memdb.MemDB).Txn(true) defer txn.Abort() if err := txn.Insert(tableNameIndexer, index); err != nil { @@ -174,7 +180,9 @@ func (c *CacheMemDB) GetByPrefix(indexName string, indexValues ...interface{}) ( indexName = indexName + "_prefix" // Get all the objects - iter, err := c.db.Txn(false).Get(tableNameIndexer, indexName, indexValues...) + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + iter, err := txn.Get(tableNameIndexer, indexName, indexValues...) if err != nil { return nil, err } @@ -207,7 +215,7 @@ func (c *CacheMemDB) Evict(indexName string, indexValues ...interface{}) error { return nil } - txn := c.db.Txn(true) + txn := c.db.Load().(*memdb.MemDB).Txn(true) defer txn.Abort() if err := txn.Delete(tableNameIndexer, index); err != nil { @@ -226,7 +234,7 @@ func (c *CacheMemDB) Flush() error { return err } - c.db = newDB + c.db.Store(newDB) return nil } diff --git a/command/agent/cache/handler.go b/command/agent/cache/handler.go index 183acd1bbf39..8ac13294b9dd 100644 --- a/command/agent/cache/handler.go +++ b/command/agent/cache/handler.go @@ -22,11 +22,11 @@ import ( func Handler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - logger.Info("received request", "path", r.URL.Path, "method", r.Method) + logger.Info("received request", "method", r.Method, "path", r.URL.Path) token := r.Header.Get(consts.AuthHeaderName) if token == "" && inmemSink != nil { - logger.Debug("using auto auth token", "path", r.URL.Path, "method", r.Method) + logger.Debug("using auto auth token", "method", r.Method, "path", r.URL.Path) token = inmemSink.(sink.SinkReader).Token() } @@ -148,7 +148,7 @@ func processTokenLookupResponse(ctx context.Context, logger hclog.Logger, inmemS return nil } - logger.Info("stripping auto-auth token from the response", "path", req.Request.URL.Path, "method", req.Request.Method) + logger.Info("stripping auto-auth token from the response", "method", req.Request.Method, "path", req.Request.URL.Path) secret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody)) if err != nil { return fmt.Errorf("failed to parse token lookup response: %v", err) diff --git a/command/agent/cache/lease_cache.go b/command/agent/cache/lease_cache.go index 2c82f7eeb471..6b459b170cbe 100644 --- a/command/agent/cache/lease_cache.go +++ b/command/agent/cache/lease_cache.go @@ -202,11 +202,11 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, // in between this upgrade so we can simply return that. Otherwise, this request // will be the one performing the cache write. if sendResp != nil { - c.logger.Debug("returning cached response", "path", req.Request.URL.Path) + c.logger.Debug("returning cached response", "method", req.Request.Method, "path", req.Request.URL.Path) return sendResp, nil } - c.logger.Debug("forwarding request", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("forwarding request", "method", req.Request.Method, "path", req.Request.URL.Path) // Pass the request down and get a response resp, err := c.proxier.Send(ctx, req) @@ -254,7 +254,7 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, // Fast path for responses with no secrets if secret == nil { - c.logger.Debug("pass-through response; no secret in response", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("pass-through response; no secret in response", "method", req.Request.Method, "path", req.Request.URL.Path) return resp, nil } @@ -265,14 +265,14 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, return nil, err } if !secret.Renewable && !tokenRenewable { - c.logger.Debug("pass-through response; secret not renewable", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("pass-through response; secret not renewable", "method", req.Request.Method, "path", req.Request.URL.Path) return resp, nil } var renewCtxInfo *cachememdb.ContextInfo switch { case secret.LeaseID != "": - c.logger.Debug("processing lease response", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("processing lease response", "method", req.Request.Method, "path", req.Request.URL.Path) entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) if err != nil { return nil, err @@ -280,7 +280,7 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, // If the lease belongs to a token that is not managed by the agent, // return the response without caching it. if entry == nil { - c.logger.Debug("pass-through lease response; token not managed by agent", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("pass-through lease response; token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path) return resp, nil } @@ -291,7 +291,7 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, index.LeaseToken = req.Token case secret.Auth != nil: - c.logger.Debug("processing auth response", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("processing auth response", "method", req.Request.Method, "path", req.Request.URL.Path) // Check if this token creation request resulted in a non-orphan token, and if so // correctly set the parentCtx to the request's token context. @@ -304,11 +304,11 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, // If parent token is not managed by the agent, child shouldn't be // either. if entry == nil { - c.logger.Debug("pass-through auth response; parent token not managed by agent", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("pass-through auth response; parent token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path) return resp, nil } - c.logger.Debug("setting parent context", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("setting parent context", "method", req.Request.Method, "path", req.Request.URL.Path) parentCtx = entry.RenewCtxInfo.Ctx entry.TokenParent = req.Token @@ -321,7 +321,7 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, default: // We shouldn't be hitting this, but will err on the side of caution and // simply proxy. - c.logger.Debug("pass-through response; secret without lease and token", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("pass-through response; secret without lease and token", "method", req.Request.Method, "path", req.Request.URL.Path) return resp, nil } @@ -353,7 +353,7 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, } // Store the index in the cache - c.logger.Debug("storing response into the cache", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("storing response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path) err = c.db.Set(index) if err != nil { c.logger.Error("failed to cache the proxied response", "error", err) @@ -378,7 +378,7 @@ func (c *LeaseCache) createCtxInfo(ctx context.Context) *cachememdb.ContextInfo func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, req *SendRequest, secret *api.Secret) { defer func() { id := ctx.Value(contextIndexID).(string) - c.logger.Debug("evicting index from cache", "id", id, "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("evicting index from cache", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) err := c.db.Evict(cachememdb.IndexNameID, id) if err != nil { c.logger.Error("failed to evict index", "id", id, "error", err) @@ -402,7 +402,7 @@ func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, return } - c.logger.Debug("initiating renewal", "path", req.Request.URL.Path, "method", req.Request.Method) + c.logger.Debug("initiating renewal", "method", req.Request.Method, "path", req.Request.URL.Path) go renewer.Renew() defer renewer.Stop() diff --git a/command/agent/cert_with_name_end_to_end_test.go b/command/agent/cert_with_name_end_to_end_test.go index 54135b49f5e3..0500dc8997c9 100644 --- a/command/agent/cert_with_name_end_to_end_test.go +++ b/command/agent/cert_with_name_end_to_end_test.go @@ -119,8 +119,8 @@ func testCertWithNameEndToEnd(t *testing.T, ahWrapping bool) { } ahConfig := &auth.AuthHandlerConfig{ - Logger: logger.Named("auth.handler"), - Client: client, + Logger: logger.Named("auth.handler"), + Client: client, EnableReauthOnNewCredentials: true, } if ahWrapping { diff --git a/command/agent/cert_with_no_name_end_to_end_test.go b/command/agent/cert_with_no_name_end_to_end_test.go index e6bb683ca407..d6394de37e85 100644 --- a/command/agent/cert_with_no_name_end_to_end_test.go +++ b/command/agent/cert_with_no_name_end_to_end_test.go @@ -116,8 +116,8 @@ func testCertWithNoNAmeEndToEnd(t *testing.T, ahWrapping bool) { } ahConfig := &auth.AuthHandlerConfig{ - Logger: logger.Named("auth.handler"), - Client: client, + Logger: logger.Named("auth.handler"), + Client: client, EnableReauthOnNewCredentials: true, } if ahWrapping { diff --git a/command/agent/config/config.go b/command/agent/config/config.go index 27398638ccb4..b8450b51e185 100644 --- a/command/agent/config/config.go +++ b/command/agent/config/config.go @@ -11,10 +11,10 @@ import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/sdk/helper/parseutil" - "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/parseutil" ) // Config is the configuration for the vault server. @@ -60,6 +60,7 @@ type Method struct { MountPath string `hcl:"mount_path"` WrapTTLRaw interface{} `hcl:"wrap_ttl"` WrapTTL time.Duration `hcl:"-"` + Namespace string `hcl:"namespace"` Config map[string]interface{} } @@ -340,6 +341,9 @@ func parseMethod(result *Config, list *ast.ObjectList) error { m.WrapTTLRaw = nil } + // Canonicalize namespace path if provided + m.Namespace = namespace.Canonicalize(m.Namespace) + result.AutoAuth.Method = &m return nil } diff --git a/command/agent/config/config_test.go b/command/agent/config/config_test.go index c7b5f4de2539..aee2a0108ddf 100644 --- a/command/agent/config/config_test.go +++ b/command/agent/config/config_test.go @@ -112,6 +112,7 @@ func TestLoadConfigFile(t *testing.T) { Method: &Method{ Type: "aws", MountPath: "auth/aws", + Namespace: "my-namespace/", Config: map[string]interface{}{ "role": "foobar", }, diff --git a/command/agent/config/test-fixtures/config-embedded-type.hcl b/command/agent/config/test-fixtures/config-embedded-type.hcl index c00c0f57e8c4..f17336078a65 100644 --- a/command/agent/config/test-fixtures/config-embedded-type.hcl +++ b/command/agent/config/test-fixtures/config-embedded-type.hcl @@ -3,6 +3,7 @@ pid_file = "./pidfile" auto_auth { method "aws" { mount_path = "auth/aws" + namespace = "my-namespace" config = { role = "foobar" } diff --git a/command/agent/config/test-fixtures/config.hcl b/command/agent/config/test-fixtures/config.hcl index cd3a8f9df03e..096190d04d84 100644 --- a/command/agent/config/test-fixtures/config.hcl +++ b/command/agent/config/test-fixtures/config.hcl @@ -3,6 +3,7 @@ pid_file = "./pidfile" auto_auth { method { type = "aws" + namespace = "/my-namespace" config = { role = "foobar" } diff --git a/command/agent/pcf_end_to_end_test.go b/command/agent/pcf_end_to_end_test.go new file mode 100644 index 000000000000..2cf1ab5c91b0 --- /dev/null +++ b/command/agent/pcf_end_to_end_test.go @@ -0,0 +1,170 @@ +package agent + +import ( + "context" + "io/ioutil" + "os" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" + credPCF "github.com/hashicorp/vault-plugin-auth-pcf" + "github.com/hashicorp/vault-plugin-auth-pcf/testing/certificates" + pcfAPI "github.com/hashicorp/vault-plugin-auth-pcf/testing/pcf" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agent/auth" + agentpcf "github.com/hashicorp/vault/command/agent/auth/pcf" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestPCFEndToEnd(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + CredentialBackends: map[string]logical.Factory{ + "pcf": credPCF.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + if err := client.Sys().EnableAuthWithOptions("pcf", &api.EnableAuthOptions{ + Type: "pcf", + }); err != nil { + t.Fatal(err) + } + + testIPAddress := "127.0.0.1" + + // Generate some valid certs that look like the ones we get from PCF. + testPCFCerts, err := certificates.Generate(pcfAPI.FoundServiceGUID, pcfAPI.FoundOrgGUID, pcfAPI.FoundSpaceGUID, pcfAPI.FoundAppGUID, testIPAddress) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := testPCFCerts.Close(); err != nil { + t.Fatal(err) + } + }() + + // Start a mock server representing their API. + mockPCFAPI := pcfAPI.MockServer(false) + defer mockPCFAPI.Close() + + // Configure a CA certificate like a Vault operator would in setting up PCF. + if _, err := client.Logical().Write("auth/pcf/config", map[string]interface{}{ + "identity_ca_certificates": testPCFCerts.CACertificate, + "pcf_api_addr": mockPCFAPI.URL, + "pcf_username": pcfAPI.AuthUsername, + "pcf_password": pcfAPI.AuthPassword, + }); err != nil { + t.Fatal(err) + } + + // Configure a role to be used for logging in, another thing a Vault operator would do. + if _, err := client.Logical().Write("auth/pcf/roles/test-role", map[string]interface{}{ + "bound_instance_ids": pcfAPI.FoundServiceGUID, + "bound_organization_ids": pcfAPI.FoundOrgGUID, + "bound_space_ids": pcfAPI.FoundSpaceGUID, + "bound_application_ids": pcfAPI.FoundAppGUID, + }); err != nil { + t.Fatal(err) + } + + os.Setenv(credPCF.EnvVarInstanceCertificate, testPCFCerts.PathToInstanceCertificate) + os.Setenv(credPCF.EnvVarInstanceKey, testPCFCerts.PathToInstanceKey) + + ctx, cancelFunc := context.WithCancel(context.Background()) + timer := time.AfterFunc(30*time.Second, func() { + cancelFunc() + }) + defer timer.Stop() + + am, err := agentpcf.NewPCFAuthMethod(&auth.AuthConfig{ + MountPath: "auth/pcf", + Config: map[string]interface{}{ + "role": "test-role", + }, + }) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + + ah := auth.NewAuthHandler(ahConfig) + go ah.Run(ctx, am) + defer func() { + <-ah.DoneCh + }() + + tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + tokenSinkFileName := tmpFile.Name() + tmpFile.Close() + os.Remove(tokenSinkFileName) + t.Logf("output: %s", tokenSinkFileName) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": tokenSinkFileName, + }, + WrapTTL: 10 * time.Second, + } + + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + defer func() { + <-ss.DoneCh + }() + + if stat, err := os.Lstat(tokenSinkFileName); err == nil { + t.Fatalf("expected err but got %s", stat) + } else if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + // Wait 2 seconds for the env variables to be detected and an auth to be generated. + time.Sleep(time.Second * 2) + + token, err := readToken(tokenSinkFileName) + if err != nil { + t.Fatal(err) + } + + if token.Token == "" { + t.Fatal("expected token but didn't receive it") + } +} diff --git a/command/audit_list.go b/command/audit_list.go index be3c87807579..392d55bae222 100644 --- a/command/audit_list.go +++ b/command/audit_list.go @@ -94,13 +94,13 @@ func (c *AuditListCommand) Run(args []string) int { return 2 } - if len(audits) == 0 { - c.UI.Output(fmt.Sprintf("No audit devices are enabled.")) - return 0 - } - switch Format(c.UI) { case "table": + if len(audits) == 0 { + c.UI.Output(fmt.Sprintf("No audit devices are enabled.")) + return 2 + } + if c.flagDetailed { c.UI.Output(tableOutput(c.detailedAudits(audits), nil)) return 0 diff --git a/command/base_predict_test.go b/command/base_predict_test.go index c09dc9f4cc21..99152bd27dff 100644 --- a/command/base_predict_test.go +++ b/command/base_predict_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/posener/complete" ) @@ -333,12 +334,14 @@ func TestPredict_Plugins(t *testing.T) { "centrify", "cert", "consul", + "elasticsearch-database-plugin", "gcp", "gcpkms", "github", "hana-database-plugin", "influxdb-database-plugin", "jwt", + "kmip", "kubernetes", "kv", "ldap", @@ -354,6 +357,7 @@ func TestPredict_Plugins(t *testing.T) { "nomad", "oidc", "okta", + "pcf", "pki", "postgresql", "postgresql-database-plugin", @@ -377,6 +381,15 @@ func TestPredict_Plugins(t *testing.T) { p.client = tc.client act := p.plugins() + + if !strutil.StrListContains(act, "kmip") { + for i, v := range tc.exp { + if v == "kmip" { + tc.exp = append(tc.exp[:i], tc.exp[i+1:]...) + break + } + } + } if !reflect.DeepEqual(act, tc.exp) { t.Errorf("expected %q to be %q", act, tc.exp) } diff --git a/command/commands.go b/command/commands.go index 28690a19425b..e5cccce585c7 100644 --- a/command/commands.go +++ b/command/commands.go @@ -27,6 +27,7 @@ import ( credCentrify "github.com/hashicorp/vault-plugin-auth-centrify" credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" credOIDC "github.com/hashicorp/vault-plugin-auth-jwt" + credPCF "github.com/hashicorp/vault-plugin-auth-pcf" credAws "github.com/hashicorp/vault/builtin/credential/aws" credCert "github.com/hashicorp/vault/builtin/credential/cert" credGitHub "github.com/hashicorp/vault/builtin/credential/github" @@ -52,6 +53,7 @@ import ( physMSSQL "github.com/hashicorp/vault/physical/mssql" physMySQL "github.com/hashicorp/vault/physical/mysql" physPostgreSQL "github.com/hashicorp/vault/physical/postgresql" + physRaft "github.com/hashicorp/vault/physical/raft" physS3 "github.com/hashicorp/vault/physical/s3" physSpanner "github.com/hashicorp/vault/physical/spanner" physSwift "github.com/hashicorp/vault/physical/swift" @@ -144,6 +146,7 @@ var ( "s3": physS3.NewS3Backend, "spanner": physSpanner.NewBackend, "swift": physSwift.NewSwiftBackend, + "raft": physRaft.NewRaftBackend, "zookeeper": physZooKeeper.NewZooKeeperBackend, } ) @@ -162,6 +165,7 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { "ldap": &credLdap.CLIHandler{}, "oidc": &credOIDC.CLIHandler{}, "okta": &credOkta.CLIHandler{}, + "pcf": &credPCF.CLIHandler{}, "radius": &credUserpass.CLIHandler{ DefaultMount: "radius", }, @@ -323,6 +327,31 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { ShutdownCh: MakeShutdownCh(), }, nil }, + "operator raft configuration": func() (cli.Command, error) { + return &OperatorRaftConfigurationCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft join": func() (cli.Command, error) { + return &OperatorRaftJoinCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft remove-peer": func() (cli.Command, error) { + return &OperatorRaftRemovePeerCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot restore": func() (cli.Command, error) { + return &OperatorRaftSnapshotRestoreCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot save": func() (cli.Command, error) { + return &OperatorRaftSnapshotSaveCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "operator rekey": func() (cli.Command, error) { return &OperatorRekeyCommand{ BaseCommand: getBaseCommand(), diff --git a/command/kv_list.go b/command/kv_list.go index faedd6f3f429..0066c1f13215 100644 --- a/command/kv_list.go +++ b/command/kv_list.go @@ -93,6 +93,15 @@ func (c *KVListCommand) Run(args []string) int { c.UI.Error(fmt.Sprintf("Error listing %s: %s", path, err)) return 2 } + + _, ok := extractListData(secret) + if Format(c.UI) != "table" { + if secret == nil || secret.Data == nil || !ok { + OutputData(c.UI, map[string]interface{}{}) + return 2 + } + } + if secret == nil || secret.Data == nil { c.UI.Error(fmt.Sprintf("No value found at %s", path)) return 2 @@ -103,7 +112,7 @@ func (c *KVListCommand) Run(args []string) int { return OutputSecret(c.UI, secret) } - if _, ok := extractListData(secret); !ok { + if !ok { c.UI.Error(fmt.Sprintf("No entries found at %s", path)) return 2 } diff --git a/command/kv_metadata.go b/command/kv_metadata.go index ee4dca9381c5..badb08a48fd1 100644 --- a/command/kv_metadata.go +++ b/command/kv_metadata.go @@ -24,9 +24,9 @@ Usage: vault kv metadata [options] [args] Vault's key-value store. Here are some simple examples, and more detailed examples are available in the subcommands or the documentation. - Create or update a metadata entry for a key: + Create or update a metadata entry for a key: - $ vault kv metadata put -max-versions=5 secret/foo + $ vault kv metadata put -max-versions=5 -delete-version-after=3h25m19s secret/foo Get the metadata for a key, this provides information about each existing version: diff --git a/command/kv_metadata_put.go b/command/kv_metadata_put.go index 7afef5cadb3c..9561651df700 100644 --- a/command/kv_metadata_put.go +++ b/command/kv_metadata_put.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "strings" + "time" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -15,9 +16,10 @@ var _ cli.CommandAutocomplete = (*KVMetadataPutCommand)(nil) type KVMetadataPutCommand struct { *BaseCommand - flagMaxVersions int - flagCASRequired bool - testStdin io.Reader // for tests + flagMaxVersions int + flagCASRequired bool + flagDeleteVersionAfter time.Duration + testStdin io.Reader // for tests } func (c *KVMetadataPutCommand) Synopsis() string { @@ -30,16 +32,20 @@ Usage: vault metadata kv put [options] KEY This command can be used to create a blank key in the key-value store or to update key configuration for a specified key. - - Create a key in the key-value store with no data: + + Create a key in the key-value store with no data: $ vault kv metadata put secret/foo - Set a max versions setting on the key: + Set a max versions setting on the key: $ vault kv metadata put -max-versions=5 secret/foo - Require Check-and-Set for this key: + Set delete-version-after on the key: + + $ vault kv metadata put -delete-version-after=3h25m19s secret/foo + + Require Check-and-Set for this key: $ vault kv metadata put -cas-required secret/foo @@ -69,6 +75,19 @@ func (c *KVMetadataPutCommand) Flags() *FlagSets { Usage: `If true the key will require the cas parameter to be set on all write requests. If false, the backend’s configuration will be used.`, }) + f.DurationVar(&DurationVar{ + Name: "delete-version-after", + Target: &c.flagDeleteVersionAfter, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: `Specifies the length of time before a version is deleted. + If not set, the backend's configured delete-version-after is used. Cannot be + greater than the backend's delete-version-after. The delete-version-after is + specified as a numeric string with a suffix like "30s" or + "3h25m19s".`, + }) + return set } @@ -122,6 +141,10 @@ func (c *KVMetadataPutCommand) Run(args []string) int { "cas_required": c.flagCASRequired, } + if c.flagDeleteVersionAfter > 0 { + data["delete_version_after"] = c.flagDeleteVersionAfter.String() + } + secret, err := client.Logical().Write(path, data) if err != nil { c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) diff --git a/command/kv_patch.go b/command/kv_patch.go index f8009ae1bcc7..5c6dfde25453 100644 --- a/command/kv_patch.go +++ b/command/kv_patch.go @@ -119,8 +119,12 @@ func (c *KVPatchCommand) Run(args []string) int { return 2 } - // First, do a read + // First, do a read. + // Note that we don't want to see curl output for the read request. + curOutputCurl := client.OutputCurlString() + client.SetOutputCurlString(false) secret, err := kvReadRequest(client, path, nil) + client.SetOutputCurlString(curOutputCurl) if err != nil { c.UI.Error(fmt.Sprintf("Error doing pre-read at %s: %s", path, err)) return 2 diff --git a/command/list.go b/command/list.go index 56762151ac24..e29b842a9cf1 100644 --- a/command/list.go +++ b/command/list.go @@ -82,6 +82,15 @@ func (c *ListCommand) Run(args []string) int { c.UI.Error(fmt.Sprintf("Error listing %s: %s", path, err)) return 2 } + + _, ok := extractListData(secret) + if Format(c.UI) != "table" { + if secret == nil || secret.Data == nil || !ok { + OutputData(c.UI, map[string]interface{}{}) + return 2 + } + } + if secret == nil { c.UI.Error(fmt.Sprintf("No value found at %s", path)) return 2 @@ -97,7 +106,7 @@ func (c *ListCommand) Run(args []string) int { return OutputSecret(c.UI, secret) } - if _, ok := extractListData(secret); !ok { + if !ok { c.UI.Error(fmt.Sprintf("No entries found at %s", path)) return 2 } diff --git a/command/namespace_list.go b/command/namespace_list.go index 893e1a76e4cd..f06579b0d898 100644 --- a/command/namespace_list.go +++ b/command/namespace_list.go @@ -71,6 +71,15 @@ func (c *NamespaceListCommand) Run(args []string) int { c.UI.Error(fmt.Sprintf("Error listing namespaces: %s", err)) return 2 } + + _, ok := extractListData(secret) + if Format(c.UI) != "table" { + if secret == nil || secret.Data != nil || !ok { + OutputData(c.UI, map[string]interface{}{}) + return 2 + } + } + if secret == nil { c.UI.Error(fmt.Sprintf("No namespaces found")) return 2 @@ -85,7 +94,7 @@ func (c *NamespaceListCommand) Run(args []string) int { return OutputSecret(c.UI, secret) } - if _, ok := extractListData(secret); !ok { + if !ok { c.UI.Error(fmt.Sprintf("No entries found")) return 2 } diff --git a/command/operator_raft_configuration.go b/command/operator_raft_configuration.go new file mode 100644 index 000000000000..7fe4e0b9b543 --- /dev/null +++ b/command/operator_raft_configuration.go @@ -0,0 +1,72 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var _ cli.Command = (*OperatorRaftConfigurationCommand)(nil) +var _ cli.CommandAutocomplete = (*OperatorRaftConfigurationCommand)(nil) + +type OperatorRaftConfigurationCommand struct { + *BaseCommand +} + +func (c *OperatorRaftConfigurationCommand) Synopsis() string { + return "Returns the raft cluster configuration" +} + +func (c *OperatorRaftConfigurationCommand) Help() string { + helpText := ` +Usage: vault operator raft configuration + + Provides the details of all the peers in the raft cluster. + + $ vault operator raft configuration + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftConfigurationCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + return set +} + +func (c *OperatorRaftConfigurationCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftConfigurationCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftConfigurationCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + secret, err := client.Logical().Read("sys/storage/raft/configuration") + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading the raft cluster configuration: %s", err)) + return 2 + } + + OutputSecret(c.UI, secret) + + return 0 +} diff --git a/command/operator_raft_join.go b/command/operator_raft_join.go new file mode 100644 index 000000000000..528007c66fa4 --- /dev/null +++ b/command/operator_raft_join.go @@ -0,0 +1,139 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var _ cli.Command = (*OperatorRaftJoinCommand)(nil) +var _ cli.CommandAutocomplete = (*OperatorRaftJoinCommand)(nil) + +type OperatorRaftJoinCommand struct { + flagRaftRetry bool + flagLeaderCACert string + flagLeaderClientCert string + flagLeaderClientKey string + *BaseCommand +} + +func (c *OperatorRaftJoinCommand) Synopsis() string { + return "Joins a node to the raft cluster" +} + +func (c *OperatorRaftJoinCommand) Help() string { + helpText := ` +Usage: vault operator raft join [options] + + Join the current node as a peer to the raft cluster by providing the address + of the raft leader node. + + $ vault operator raft join "http://127.0.0.2:8200" + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftJoinCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "leader-ca-cert", + Target: &c.flagLeaderCACert, + Completion: complete.PredictNothing, + Usage: "CA cert to communicate with raft leader.", + }) + + f.StringVar(&StringVar{ + Name: "leader-client-cert", + Target: &c.flagLeaderClientCert, + Completion: complete.PredictNothing, + Usage: "Client cert to to authenticate to raft leader.", + }) + + f.StringVar(&StringVar{ + Name: "leader-client-key", + Target: &c.flagLeaderClientKey, + Completion: complete.PredictNothing, + Usage: "Client key to to authenticate to raft leader.", + }) + + f.BoolVar(&BoolVar{ + Name: "retry", + Target: &c.flagRaftRetry, + Default: false, + Usage: "Continuously retry joining the raft cluster upon failures.", + }) + + return set +} + +func (c *OperatorRaftJoinCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftJoinCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftJoinCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + leaderAPIAddr := "" + + args = f.Args() + switch len(args) { + case 1: + leaderAPIAddr = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 1, got %d)", len(args))) + return 1 + } + + if len(leaderAPIAddr) == 0 { + c.UI.Error("leader api address is required") + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + resp, err := client.Sys().RaftJoin(&api.RaftJoinRequest{ + LeaderAPIAddr: leaderAPIAddr, + LeaderCACert: c.flagLeaderCACert, + LeaderClientCert: c.flagLeaderClientCert, + LeaderClientKey: c.flagLeaderClientKey, + Retry: c.flagRaftRetry, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error joining the node to the raft cluster: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + default: + return OutputData(c.UI, resp) + } + + out := []string{} + out = append(out, "Key | Value") + out = append(out, fmt.Sprintf("Joined | %t", resp.Joined)) + c.UI.Output(tableOutput(out, nil)) + + return 0 +} diff --git a/command/operator_raft_remove_peer.go b/command/operator_raft_remove_peer.go new file mode 100644 index 000000000000..2ff62d30d5ab --- /dev/null +++ b/command/operator_raft_remove_peer.go @@ -0,0 +1,89 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var _ cli.Command = (*OperatorRaftRemovePeerCommand)(nil) +var _ cli.CommandAutocomplete = (*OperatorRaftRemovePeerCommand)(nil) + +type OperatorRaftRemovePeerCommand struct { + *BaseCommand +} + +func (c *OperatorRaftRemovePeerCommand) Synopsis() string { + return "Removes a node from the raft cluster" +} + +func (c *OperatorRaftRemovePeerCommand) Help() string { + helpText := ` +Usage: vault operator raft remove-peer + + Removes a node from the raft cluster. + + $ vault operator raft remove-peer node1 + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftRemovePeerCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + return set +} + +func (c *OperatorRaftRemovePeerCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftRemovePeerCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftRemovePeerCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + serverID := "" + + args = f.Args() + switch len(args) { + case 1: + serverID = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 1, got %d)", len(args))) + return 1 + } + + if len(serverID) == 0 { + c.UI.Error("Server id is required") + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + _, err = client.Logical().Write("sys/storage/raft/remove-peer", map[string]interface{}{ + "server_id": serverID, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error removing the peer from raft cluster: %s", err)) + return 2 + } + + c.UI.Output("Peer removed successfully!") + + return 0 +} diff --git a/command/operator_raft_snapshot_restore.go b/command/operator_raft_snapshot_restore.go new file mode 100644 index 000000000000..c3f9cf19e256 --- /dev/null +++ b/command/operator_raft_snapshot_restore.go @@ -0,0 +1,104 @@ +package command + +import ( + "fmt" + "os" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var _ cli.Command = (*OperatorRaftSnapshotRestoreCommand)(nil) +var _ cli.CommandAutocomplete = (*OperatorRaftSnapshotRestoreCommand)(nil) + +type OperatorRaftSnapshotRestoreCommand struct { + flagForce bool + *BaseCommand +} + +func (c *OperatorRaftSnapshotRestoreCommand) Synopsis() string { + return "Installs the provided snapshot, returning the cluster to the state defined in it." +} + +func (c *OperatorRaftSnapshotRestoreCommand) Help() string { + helpText := ` +Usage: vault operator raft snapshot restore + + Installs the provided snapshot, returning the cluster to the state defined in it. + + $ vault operator raft snapshot restore raft.snap + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftSnapshotRestoreCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "force", + Target: &c.flagForce, + Default: false, + Usage: "This bypasses checks ensuring the Autounseal or shamir keys are consistent with the snapshot data.", + }) + + return set +} + +func (c *OperatorRaftSnapshotRestoreCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftSnapshotRestoreCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftSnapshotRestoreCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + snapFile := "" + + args = f.Args() + switch len(args) { + case 1: + snapFile = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 1, got %d)", len(args))) + return 1 + } + + if len(snapFile) == 0 { + c.UI.Error("Snapshot file name is required") + return 1 + } + + snapReader, err := os.Open(snapFile) + if err != nil { + c.UI.Error(fmt.Sprintf("Error opening policy file: %s", err)) + return 2 + } + defer snapReader.Close() + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + err = client.Sys().RaftSnapshotRestore(snapReader, c.flagForce) + if err != nil { + c.UI.Error(fmt.Sprintf("Error installing the snapshot: %s", err)) + return 2 + } + + return 0 +} diff --git a/command/operator_raft_snapshot_save.go b/command/operator_raft_snapshot_save.go new file mode 100644 index 000000000000..b9eadec02b10 --- /dev/null +++ b/command/operator_raft_snapshot_save.go @@ -0,0 +1,94 @@ +package command + +import ( + "fmt" + "os" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var _ cli.Command = (*OperatorRaftSnapshotSaveCommand)(nil) +var _ cli.CommandAutocomplete = (*OperatorRaftSnapshotSaveCommand)(nil) + +type OperatorRaftSnapshotSaveCommand struct { + *BaseCommand +} + +func (c *OperatorRaftSnapshotSaveCommand) Synopsis() string { + return "Saves a snapshot of the current state of the raft cluster into a file." +} + +func (c *OperatorRaftSnapshotSaveCommand) Help() string { + helpText := ` +Usage: vault operator raft snapshot save + + Saves a snapshot of the current state of the raft cluster into a file. + + $ vault operator raft snapshot save raft.snap + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftSnapshotSaveCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + return set +} + +func (c *OperatorRaftSnapshotSaveCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftSnapshotSaveCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftSnapshotSaveCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + path := "" + + args = f.Args() + switch len(args) { + case 1: + path = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 1, got %d)", len(args))) + return 1 + } + + if len(path) == 0 { + c.UI.Error("Output file name is required") + return 1 + } + + snapFile, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + c.UI.Error(fmt.Sprintf("Error opening output file: %s", err)) + return 2 + } + defer snapFile.Close() + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + err = client.Sys().RaftSnapshot(snapFile) + if err != nil { + c.UI.Error(fmt.Sprintf("Error taking the snapshot: %s", err)) + return 2 + } + + return 0 +} diff --git a/command/operator_rekey.go b/command/operator_rekey.go index 724d9ad3caed..d437a9259102 100644 --- a/command/operator_rekey.go +++ b/command/operator_rekey.go @@ -685,12 +685,22 @@ func (c *OperatorRekeyCommand) printUnsealKeys(client *api.Client, status *api.R if len(resp.PGPFingerprints) > 0 && resp.Backup { c.UI.Output("") - c.UI.Output(wrapAtLength(fmt.Sprintf( - "The encrypted unseal keys are backed up to \"core/unseal-keys-backup\"" + - "in the storage backend. Remove these keys at any time using " + - "\"vault operator rekey -backup-delete\". Vault does not automatically " + - "remove these keys.", - ))) + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "The encrypted unseal keys are backed up to \"core/unseal-keys-backup\" " + + "in the storage backend. Remove these keys at any time using " + + "\"vault operator rekey -backup-delete\". Vault does not automatically " + + "remove these keys.", + ))) + case "recovery", "hsm": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "The encrypted unseal keys are backed up to \"core/recovery-keys-backup\" " + + "in the storage backend. Remove these keys at any time using " + + "\"vault operator rekey -backup-delete -target=recovery\". Vault does not automatically " + + "remove these keys.", + ))) + } } switch status.VerificationRequired { diff --git a/command/path_help.go b/command/path_help.go index 2ce4a38bfd42..d50fe8eab427 100644 --- a/command/path_help.go +++ b/command/path_help.go @@ -1,6 +1,7 @@ package command import ( + "encoding/json" "fmt" "strings" @@ -44,13 +45,15 @@ Usage: vault path-help [options] PATH Each secret engine produces different help output. + If -format is specified as JSON, the output will be in OpenAPI format. + ` + c.Flags().Help() return strings.TrimSpace(helpText) } func (c *PathHelpCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP) + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) } func (c *PathHelpCommand) AutocompleteArgs() complete.Predictor { @@ -97,6 +100,17 @@ func (c *PathHelpCommand) Run(args []string) int { return 2 } - c.UI.Output(help.Help) + switch c.flagFormat { + case "json": + b, err := json.Marshal(help.OpenAPI) + if err != nil { + c.UI.Error(fmt.Sprintf("Error marshaling OpenAPI: %s", err)) + return 2 + } + c.UI.Output(string(b)) + default: + c.UI.Output(help.Help) + } + return 0 } diff --git a/command/seal_migration_test.go b/command/seal_migration_test.go index 837c0c7a2646..373b6dd7e773 100644 --- a/command/seal_migration_test.go +++ b/command/seal_migration_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/vault/shamir" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" + shamirseal "github.com/hashicorp/vault/vault/seal/shamir" ) func TestSealMigration(t *testing.T) { @@ -28,7 +29,7 @@ func TestSealMigration(t *testing.T) { if err != nil { t.Fatal(err) } - shamirSeal := vault.NewDefaultSeal() + shamirSeal := vault.NewDefaultSeal(shamirseal.NewSeal(logger.Named("shamir"))) coreConfig := &vault.CoreConfig{ Seal: shamirSeal, Physical: phys, @@ -113,7 +114,7 @@ func TestSealMigration(t *testing.T) { newSeal := vault.NewAutoSeal(seal.NewTestSeal(nil)) newSeal.SetCore(core) autoSeal = newSeal - if err := adjustCoreForSealMigration(core, newSeal, nil); err != nil { + if err := adjustCoreForSealMigration(logger, core, newSeal, nil); err != nil { t.Fatal(err) } @@ -210,7 +211,7 @@ func TestSealMigration(t *testing.T) { core := cluster.Cores[0].Core - if err := adjustCoreForSealMigration(core, altSeal, autoSeal); err != nil { + if err := adjustCoreForSealMigration(logger, core, altSeal, autoSeal); err != nil { t.Fatal(err) } @@ -248,7 +249,7 @@ func TestSealMigration(t *testing.T) { core := cluster.Cores[0].Core - if err := adjustCoreForSealMigration(core, shamirSeal, altSeal); err != nil { + if err := adjustCoreForSealMigration(logger, core, shamirSeal, altSeal); err != nil { t.Fatal(err) } diff --git a/command/server.go b/command/server.go index 50de4462c9ea..6c27b3787be4 100644 --- a/command/server.go +++ b/command/server.go @@ -47,6 +47,7 @@ import ( "github.com/hashicorp/vault/sdk/version" "github.com/hashicorp/vault/vault" vaultseal "github.com/hashicorp/vault/vault/seal" + shamirseal "github.com/hashicorp/vault/vault/seal/shamir" "github.com/mitchellh/cli" testing "github.com/mitchellh/go-testing-interface" "github.com/posener/complete" @@ -106,6 +107,8 @@ type ServerCommand struct { flagDevAutoSeal bool flagTestVerifyOnly bool flagCombineLogs bool + flagTestServerConfig bool + flagDevConsul bool } type ServerListener struct { @@ -290,6 +293,13 @@ func (c *ServerCommand) Flags() *FlagSets { Hidden: true, }) + f.BoolVar(&BoolVar{ + Name: "dev-consul", + Target: &c.flagDevConsul, + Default: false, + Hidden: true, + }) + // TODO: should the below flags be public? f.BoolVar(&BoolVar{ Name: "combine-logs", @@ -305,6 +315,13 @@ func (c *ServerCommand) Flags() *FlagSets { Hidden: true, }) + f.BoolVar(&BoolVar{ + Name: "test-server-config", + Target: &c.flagTestServerConfig, + Default: false, + Hidden: true, + }) + // End internal-only flags. return set @@ -391,7 +408,20 @@ func (c *ServerCommand) Run(args []string) int { // Load the configuration var config *server.Config if c.flagDev { - config = server.DevConfig(c.flagDevHA, c.flagDevTransactional) + var devStorageType string + switch { + case c.flagDevConsul: + devStorageType = "consul" + case c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional_ha" + case !c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional" + case c.flagDevHA && !c.flagDevTransactional: + devStorageType = "inmem_ha" + default: + devStorageType = "inmem" + } + config = server.DevConfig(devStorageType) if c.flagDevListenAddr != "" { config.Listeners[0].Config["address"] = c.flagDevListenAddr } @@ -485,6 +515,10 @@ func (c *ServerCommand) Run(args []string) int { c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type)) return 1 } + if config.Storage.Type == "raft" && len(config.ClusterAddr) == 0 { + c.UI.Error("Cluster address must be set when using raft storage") + return 1 + } namedStorageLogger := c.logger.Named("storage." + config.Storage.Type) allLoggers = append(allLoggers, namedStorageLogger) backend, err := factory(config.Storage.Config, namedStorageLogger) @@ -533,7 +567,7 @@ func (c *ServerCommand) Run(args []string) int { var seal vault.Seal sealLogger := c.logger.Named(sealType) allLoggers = append(allLoggers, sealLogger) - seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal()) + seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("shamir")))) if sealConfigError != nil { if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { c.UI.Error(fmt.Sprintf( @@ -964,7 +998,7 @@ CLUSTER_SYNTHESIS_COMPLETE: })) // Before unsealing with stored keys, setup seal migration if needed - if err := adjustCoreForSealMigration(core, barrierSeal, unwrapSeal); err != nil { + if err := adjustCoreForSealMigration(c.logger, core, barrierSeal, unwrapSeal); err != nil { c.UI.Error(err.Error()) return 1 } @@ -1146,6 +1180,7 @@ CLUSTER_SYNTHESIS_COMPLETE: } } + // server defaults server := &http.Server{ Handler: handler, ReadHeaderTimeout: 10 * time.Second, @@ -1153,9 +1188,56 @@ CLUSTER_SYNTHESIS_COMPLETE: IdleTimeout: 5 * time.Minute, ErrorLog: c.logger.StandardLogger(nil), } + + // override server defaults with config values for read/write/idle timeouts if configured + if readHeaderTimeoutInterface, ok := ln.config["http_read_header_timeout"]; ok { + readHeaderTimeout, err := parseutil.ParseDurationSecond(readHeaderTimeoutInterface) + if err != nil { + c.UI.Error(fmt.Sprintf("Could not parse a time value for http_read_header_timeout %v", readHeaderTimeout)) + return 1 + } + server.ReadHeaderTimeout = readHeaderTimeout + } + + if readTimeoutInterface, ok := ln.config["http_read_timeout"]; ok { + readTimeout, err := parseutil.ParseDurationSecond(readTimeoutInterface) + if err != nil { + c.UI.Error(fmt.Sprintf("Could not parse a time value for http_read_timeout %v", readTimeout)) + return 1 + } + server.ReadTimeout = readTimeout + } + + if writeTimeoutInterface, ok := ln.config["http_write_timeout"]; ok { + writeTimeout, err := parseutil.ParseDurationSecond(writeTimeoutInterface) + if err != nil { + c.UI.Error(fmt.Sprintf("Could not parse a time value for http_write_timeout %v", writeTimeout)) + return 1 + } + server.WriteTimeout = writeTimeout + } + + if idleTimeoutInterface, ok := ln.config["http_idle_timeout"]; ok { + idleTimeout, err := parseutil.ParseDurationSecond(idleTimeoutInterface) + if err != nil { + c.UI.Error(fmt.Sprintf("Could not parse a time value for http_idle_timeout %v", idleTimeout)) + return 1 + } + server.IdleTimeout = idleTimeout + } + + // server config tests can exit now + if c.flagTestServerConfig { + continue + } + go server.Serve(ln.Listener) } + if c.flagTestServerConfig { + return 0 + } + if sealConfigError != nil { init, err := core.Initialized(context.Background()) if err != nil { diff --git a/command/server/config.go b/command/server/config.go index 3f03200bae9b..19351dbe58a4 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -80,13 +80,13 @@ type Config struct { } // DevConfig is a Config that is used for dev mode of Vault. -func DevConfig(ha, transactional bool) *Config { +func DevConfig(storageType string) *Config { ret := &Config{ DisableMlock: true, EnableRawEndpoint: true, Storage: &Storage{ - Type: "inmem", + Type: storageType, }, Listeners: []*Listener{ @@ -109,15 +109,6 @@ func DevConfig(ha, transactional bool) *Config { }, } - switch { - case ha && transactional: - ret.Storage.Type = "inmem_transactional_ha" - case !ha && transactional: - ret.Storage.Type = "inmem_transactional" - case ha && !transactional: - ret.Storage.Type = "inmem_ha" - } - return ret } diff --git a/command/server_devfourcluster.go b/command/server_devfourcluster.go index 31299a54d1ee..5c316ab643c0 100644 --- a/command/server_devfourcluster.go +++ b/command/server_devfourcluster.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/version" "github.com/hashicorp/vault/vault" + shamirseal "github.com/hashicorp/vault/vault/seal/shamir" testing "github.com/mitchellh/go-testing-interface" "github.com/pkg/errors" ) @@ -85,7 +86,7 @@ func (c *ServerCommand) enableFourClusterDev(base *vault.CoreConfig, info map[st return errors.New("") } base.Physical = backend - base.Seal = vault.NewDefaultSeal() + base.Seal = vault.NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("shamir"))) testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, diff --git a/command/server_test.go b/command/server_test.go index 6bdf21fc5593..4722d0f63fc6 100644 --- a/command/server_test.go +++ b/command/server_test.go @@ -41,19 +41,30 @@ func testRandomPort(tb testing.TB) int { return l.Addr().(*net.TCPAddr).Port } -func testBaseHCL(tb testing.TB) string { +func testBaseHCL(tb testing.TB, listenerExtras string) string { tb.Helper() return strings.TrimSpace(fmt.Sprintf(` disable_mlock = true listener "tcp" { - address = "127.0.0.1:%d" - tls_disable = "true" + address = "127.0.0.1:%d" + tls_disable = "true" + %s } - `, testRandomPort(tb))) + `, testRandomPort(tb), listenerExtras)) } const ( + goodListenerTimeouts = `http_read_header_timeout = 12 + http_read_timeout = "34s" + http_write_timeout = "56m" + http_idle_timeout = "78h"` + + badListenerReadHeaderTimeout = `http_read_header_timeout = "12km"` + badListenerReadTimeout = `http_read_timeout = "34日"` + badListenerWriteTimeout = `http_write_timeout = "56lbs"` + badListenerIdleTimeout = `http_idle_timeout = "78gophers"` + inmemHCL = ` backend "inmem_ha" { advertise_addr = "http://127.0.0.1:8200" @@ -204,24 +215,63 @@ func TestServer(t *testing.T) { contents string exp string code int + flag string }{ { "common_ha", - testBaseHCL(t) + inmemHCL, + testBaseHCL(t, "") + inmemHCL, "(HA available)", 0, + "-test-verify-only", }, { "separate_ha", - testBaseHCL(t) + inmemHCL + haInmemHCL, + testBaseHCL(t, "") + inmemHCL + haInmemHCL, "HA Storage:", 0, + "-test-verify-only", }, { "bad_separate_ha", - testBaseHCL(t) + inmemHCL + badHAInmemHCL, + testBaseHCL(t, "") + inmemHCL + badHAInmemHCL, "Specified HA storage does not support HA", 1, + "-test-verify-only", + }, + { + "good_listener_timeout_config", + testBaseHCL(t, goodListenerTimeouts) + inmemHCL, + "", + 0, + "-test-server-config", + }, + { + "bad_listener_read_header_timeout_config", + testBaseHCL(t, badListenerReadHeaderTimeout) + inmemHCL, + "Could not parse a time value for http_read_header_timeout", + 1, + "-test-server-config", + }, + { + "bad_listener_read_timeout_config", + testBaseHCL(t, badListenerReadTimeout) + inmemHCL, + "Could not parse a time value for http_read_timeout", + 1, + "-test-server-config", + }, + { + "bad_listener_write_timeout_config", + testBaseHCL(t, badListenerWriteTimeout) + inmemHCL, + "Could not parse a time value for http_write_timeout", + 1, + "-test-server-config", + }, + { + "bad_listener_idle_timeout_config", + testBaseHCL(t, badListenerIdleTimeout) + inmemHCL, + "Could not parse a time value for http_idle_timeout", + 1, + "-test-server-config", }, } @@ -242,7 +292,7 @@ func TestServer(t *testing.T) { code := cmd.Run([]string{ "-config", f.Name(), - "-test-verify-only", + tc.flag, }) output := ui.ErrorWriter.String() + ui.OutputWriter.String() if code != tc.code { diff --git a/command/server_util.go b/command/server_util.go index f7b681505744..2f4ca9defd2f 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -4,8 +4,10 @@ import ( "context" "fmt" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/vault" vaultseal "github.com/hashicorp/vault/vault/seal" + shamirseal "github.com/hashicorp/vault/vault/seal/shamir" "github.com/pkg/errors" ) @@ -13,7 +15,7 @@ var ( onEnterprise = false ) -func adjustCoreForSealMigration(core *vault.Core, barrierSeal, unwrapSeal vault.Seal) error { +func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal, unwrapSeal vault.Seal) error { existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background()) if err != nil { return fmt.Errorf("Error checking for existing seal: %s", err) @@ -61,7 +63,7 @@ func adjustCoreForSealMigration(core *vault.Core, barrierSeal, unwrapSeal vault. switch existBarrierSealConfig.Type { case vaultseal.Shamir: // The value reflected in config is what we're going to - existSeal = vault.NewDefaultSeal() + existSeal = vault.NewDefaultSeal(shamirseal.NewSeal(logger.Named("shamir"))) newSeal = barrierSeal newBarrierSealConfig := &vault.SealConfig{ Type: newSeal.BarrierType(), diff --git a/command/token_create.go b/command/token_create.go index 7c454ff9ba9c..3192cd9f282a 100644 --- a/command/token_create.go +++ b/command/token_create.go @@ -29,6 +29,7 @@ type TokenCreateCommand struct { flagType string flagMetadata map[string]string flagPolicies []string + flagEntityAlias string } func (c *TokenCreateCommand) Synopsis() string { @@ -176,6 +177,16 @@ func (c *TokenCreateCommand) Flags() *FlagSets { "specified multiple times to attach multiple policies.", }) + f.StringVar(&StringVar{ + Name: "entity-alias", + Target: &c.flagEntityAlias, + Default: "", + Usage: "Name of the entity alias to associate with during token creation. " + + "Only works in combination with -role argument and used entity alias " + + "must be listed in allowed_entity_aliases. If this has been specified, " + + "the entity will not be inherited from the parent.", + }) + return set } @@ -224,6 +235,7 @@ func (c *TokenCreateCommand) Run(args []string) int { ExplicitMaxTTL: c.flagExplicitMaxTTL.String(), Period: c.flagPeriod.String(), Type: c.flagType, + EntityAlias: c.flagEntityAlias, } var secret *api.Secret diff --git a/go.mod b/go.mod index 7a301bf881b2..cbb166222115 100644 --- a/go.mod +++ b/go.mod @@ -7,32 +7,28 @@ replace github.com/hashicorp/vault/api => ./api replace github.com/hashicorp/vault/sdk => ./sdk require ( - cloud.google.com/go v0.37.4 - github.com/Azure/azure-sdk-for-go v27.1.0+incompatible + cloud.google.com/go v0.39.0 + github.com/Azure/azure-sdk-for-go v29.0.0+incompatible github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v11.7.1+incompatible - github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Microsoft/go-winio v0.4.12 // indirect github.com/NYTimes/gziphandler v1.1.1 github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/SAP/go-hdb v0.14.1 github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect - github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6 + github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 - github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da + github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e github.com/armon/go-radix v1.0.0 github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf - github.com/aws/aws-sdk-go v1.19.11 - github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/aws/aws-sdk-go v1.19.39 github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/boombuler/barcode v1.0.0 // indirect github.com/cenkalti/backoff v2.1.1+incompatible // indirect github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 - github.com/circonus-labs/circonus-gometrics v2.2.7+incompatible // indirect - github.com/circonus-labs/circonusllhist v0.1.3 // indirect github.com/cockroachdb/apd v1.1.0 // indirect github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect @@ -50,6 +46,7 @@ require ( github.com/go-sql-driver/mysql v1.4.1 github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df + github.com/gogo/protobuf v1.2.1 github.com/golang/protobuf v1.3.1 github.com/google/go-github v17.0.0+incompatible github.com/google/go-querystring v1.0.0 // indirect @@ -58,30 +55,35 @@ require ( github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-gcp-common v0.5.0 - github.com/hashicorp/go-hclog v0.8.0 - github.com/hashicorp/go-memdb v1.0.0 + github.com/hashicorp/go-hclog v0.9.2 + github.com/hashicorp/go-memdb v1.0.2 + github.com/hashicorp/go-msgpack v0.5.5 github.com/hashicorp/go-multierror v1.0.0 - github.com/hashicorp/go-rootcerts v1.0.0 + github.com/hashicorp/go-rootcerts v1.0.1 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-uuid v1.0.1 github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf - github.com/hashicorp/vault-plugin-auth-alicloud v0.5.1 - github.com/hashicorp/vault-plugin-auth-azure v0.5.1 - github.com/hashicorp/vault-plugin-auth-centrify v0.5.1 - github.com/hashicorp/vault-plugin-auth-gcp v0.5.1 - github.com/hashicorp/vault-plugin-auth-jwt v0.5.1 - github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.1 - github.com/hashicorp/vault-plugin-secrets-ad v0.5.1 - github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.1 - github.com/hashicorp/vault-plugin-secrets-azure v0.5.1 - github.com/hashicorp/vault-plugin-secrets-gcp v0.5.2 - github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.1 - github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20190416155133-fd495225dea0 - github.com/hashicorp/vault/api v1.0.1 - github.com/hashicorp/vault/sdk v0.1.8 + github.com/hashicorp/raft v1.1.1-0.20190703171940-f639636d18e0 + github.com/hashicorp/raft-snapshot v1.0.1 + github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190703042722-a8d100740e20 + github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20190703042725-86deab7df8e2 + github.com/hashicorp/vault-plugin-auth-centrify v0.5.2-0.20190703042729-bdd19ebba78a + github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190703042733-7a9fc78f2664 + github.com/hashicorp/vault-plugin-auth-jwt v0.5.2-0.20190703042737-804281c53c5f + github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.2-0.20190703042741-1a51335bffd3 + github.com/hashicorp/vault-plugin-auth-pcf v0.0.0-20190703042745-a8a201a8e0ec + github.com/hashicorp/vault-plugin-database-elasticsearch v0.0.0-20190619214355-1541bbf73c6d + github.com/hashicorp/vault-plugin-secrets-ad v0.5.2-0.20190701201353-a0bef50be687 + github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190621033057-9c576c32b635 + github.com/hashicorp/vault-plugin-secrets-azure v0.5.2-0.20190509203638-8a60a8656fb0 + github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20190620162751-272efd334652 + github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190516000311-88f9a4f11829 + github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20190626201950-a6e92ff82578 + github.com/hashicorp/vault/api v1.0.3-0.20190703041405-a2810eb6965d + github.com/hashicorp/vault/sdk v0.1.12-0.20190703041405-a2810eb6965d github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect github.com/jackc/pgx v3.3.0+incompatible // indirect @@ -91,8 +93,7 @@ require ( github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f github.com/kr/pretty v0.1.0 github.com/kr/text v0.1.0 - github.com/lib/pq v1.0.0 - github.com/marstr/guid v1.1.0 // indirect + github.com/lib/pq v1.1.1 github.com/mattn/go-colorable v0.0.9 github.com/michaelklishin/rabbit-hole v1.5.0 github.com/mitchellh/cli v1.0.0 @@ -103,6 +104,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.0 github.com/ncw/swift v1.0.47 github.com/oklog/run v1.0.0 + github.com/onsi/ginkgo v1.7.0 // indirect github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v0.1.1 // indirect @@ -118,14 +120,14 @@ require ( github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 // indirect - github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect + go.etcd.io/bbolt v1.3.2 go.etcd.io/etcd v0.0.0-20190412021913-f29b1ada1971 - golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c - golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 + golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a - google.golang.org/api v0.3.2 - google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 - google.golang.org/grpc v1.20.0 + google.golang.org/api v0.5.0 + google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c + google.golang.org/grpc v1.20.1 gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/ory-am/dockertest.v3 v3.3.4 gopkg.in/square/go-jose.v2 v2.3.1 diff --git a/go.sum b/go.sum index ce7a6082ddd5..004b9a9852e3 100644 --- a/go.sum +++ b/go.sum @@ -2,11 +2,15 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +cloud.google.com/go v0.39.0 h1:UgQP9na6OTfp4dsAiz/eFpFA1C6tPdH5wiRdi19tuMw= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/azure-sdk-for-go v27.1.0+incompatible h1:/aGULErVaJsMdtew1p9OhEo8FeBzfRKRzJbc8NWJv/w= -github.com/Azure/azure-sdk-for-go v27.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v29.0.0+incompatible h1:CYPU39ULbGjQBo3gXIqiWouK0C4F+Pt2Zx5CqGvqknE= +github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v11.7.1+incompatible h1:M2YZIajBBVekV86x0rr1443Lc1F/Ylxb9w+5EtSyX3Q= @@ -16,6 +20,8 @@ github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= +github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= @@ -32,6 +38,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6 h1:5RwdKFlGKokYBbq4M2ZZ0LzfxdK4e1L4rwQH+76wPkE= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD16bhpKNAanfcDDVU+J0NXqsgHIvGbbe/sy+r6Rs0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -40,6 +48,8 @@ github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go. github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e h1:h0gP0hBU6DsA5IQduhLWGOEfIUKzJS5hhXQBSgHuF/g= github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -47,8 +57,8 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.19.11 h1:tqaTGER6Byw3QvsjGW0p018U2UOqaJPeJuzoaF7jjoQ= -github.com/aws/aws-sdk-go v1.19.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.39 h1:pIez14zQWSd/TER2Scohm7aCEG2TgoyXSOX6srOKt6o= +github.com/aws/aws-sdk-go v1.19.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= @@ -59,6 +69,7 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYE github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boombuler/barcode v1.0.0 h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f h1:ZMEzE7R0WNqgbHplzSBaYJhJi5AZWTCK9baU0ebzG6g= @@ -71,15 +82,19 @@ github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f h1:gJzxr github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= -github.com/circonus-labs/circonus-gometrics v2.2.7+incompatible h1:Rk92ZMiCn5qFDI9nIMJiJj2cLxMaMamq4JUWI0gqU8s= -github.com/circonus-labs/circonus-gometrics v2.2.7+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/coreos/go-oidc v2.0.0+incompatible h1:+RStIopZ8wooMx+Vs5Bt8zMXxV1ABl5LbakNExNmZIg= @@ -139,6 +154,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-ldap/ldap v3.0.2+incompatible h1:kD5HQcAzlQ7yrhfn+h+MSABeAy/jAJhvIJ/QDllP44g= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -152,9 +169,11 @@ github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFG github.com/gogo/protobuf v1.0.0 h1:2jyBKDKU/8v3v2xVR2PtiWQviFUyiaGk2rpfyFT8rTM= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= @@ -176,6 +195,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCy github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= @@ -190,6 +211,7 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= @@ -215,6 +237,8 @@ github.com/hashicorp/consul/api v1.0.1 h1:LkHu3cLXjya4lgrAyZVe/CUBXgJ7AcDWKSeCjA github.com/hashicorp/consul/api v1.0.1/go.mod h1:LQlewHPiuaRhn1mP2XE4RrjnlRgOeWa/ZM0xWLCen2M= github.com/hashicorp/consul/sdk v0.1.0 h1:tTfutTNVUTDXpNM4YCImLfiiY3yCDpfgS6tNlUioIUE= github.com/hashicorp/consul/sdk v0.1.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -225,20 +249,29 @@ github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/ github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-memdb v1.0.0 h1:K1O4N2VPndZiTrdH3lmmf5bemr9Xw81KjVwhReIUjTQ= -github.com/hashicorp/go-memdb v1.0.0/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M= +github.com/hashicorp/go-memdb v1.0.2 h1:AIjzJlwIxz2inhZqRJZfe6D15lPeF0/cZyS1BVlnlHg= +github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M= github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.0.0 h1:/gQ1sNR8/LHpoxKRQq4PmLBuacfZb4tC93e9B30o/7c= -github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -249,6 +282,8 @@ github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1 github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -261,32 +296,46 @@ github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG67 github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf h1:U/40PQvWkaXCDdK9QHKf1pVDVcA+NIDVbzzonFGkgIA= github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf/go.mod h1:BDngVi1f4UA6aJq9WYTgxhfWSE1+42xshvstLU2fRGk= +github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= +github.com/hashicorp/raft v1.1.1-0.20190620170237-51bdddce2dbd h1:Le9Y3L217BgdnQUyZeaEK5a+Xwmk8oEEr9Cara/vL+Q= +github.com/hashicorp/raft v1.1.1-0.20190620170237-51bdddce2dbd/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.1.1-0.20190703171940-f639636d18e0 h1:msEDtkZC3STZq6Pthlju+jKruuNHXCZAWhghDK47HcM= +github.com/hashicorp/raft v1.1.1-0.20190703171940-f639636d18e0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/raft-snapshot v1.0.1 h1:cx002JsTEAfAP0pIuANlDtTXg/pi2Db6YbRRmLQTQKw= +github.com/hashicorp/raft-snapshot v1.0.1/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/vault-plugin-auth-alicloud v0.5.1 h1:CldlLfMGlcXy+5CvnNsOWJjE9/C1i+Nho4ClSJe+63k= -github.com/hashicorp/vault-plugin-auth-alicloud v0.5.1/go.mod h1:v0d6/ft2ESFHG/PB2pqcwDPlvtAWWfOmfsY0nfbIMy0= -github.com/hashicorp/vault-plugin-auth-azure v0.5.1 h1:1CTmC68zYhp/cKuHW8K0QbnWEetFK7UUu5jaWhmzbHw= -github.com/hashicorp/vault-plugin-auth-azure v0.5.1/go.mod h1:D/slkpcqcZMqslj1X9jfU9aIOrC41LVkfDQ9lFhYg0o= -github.com/hashicorp/vault-plugin-auth-centrify v0.5.1 h1:kHWphxtASUJVYgqvfr6KjCN74qWLJeLhSRE5kBQ4iiQ= -github.com/hashicorp/vault-plugin-auth-centrify v0.5.1/go.mod h1:GHplZPj7NfPWdeCkgTRnNzbjVP5IW5MNm7+MMsjobpQ= +github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190703042722-a8d100740e20 h1:4fQyIvvqsJyrljx1yg2JU4992/306vXkmD0jzXLQb0w= +github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190703042722-a8d100740e20/go.mod h1:n7Qq516yYvKGzkCsow6SChiuIGY5sQqPmxUZwwPWlcA= +github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20190703042725-86deab7df8e2 h1:tCXUJ5rr1wimkCQzyEkRlsW++YoWXtuBiLcpVMqLLL8= +github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20190703042725-86deab7df8e2/go.mod h1:+IkeFkqttSpd7ikxs2jnZHPitNlPHmvCgJ/jTVAoAqI= +github.com/hashicorp/vault-plugin-auth-centrify v0.5.2-0.20190703042729-bdd19ebba78a h1:C5lCRLlYodkj7JnWpq+gj62wqOzoMKBV6WZF6BhXOJQ= +github.com/hashicorp/vault-plugin-auth-centrify v0.5.2-0.20190703042729-bdd19ebba78a/go.mod h1:ogz+bqR2NRGXc0d81Axz5hsqqbAbpLFVcHqcvvbfJec= github.com/hashicorp/vault-plugin-auth-gcp v0.5.1 h1:8DR00s+Wmc21i3sfzvsqW88VMdf6NI2ue+onGoHshww= github.com/hashicorp/vault-plugin-auth-gcp v0.5.1/go.mod h1:eLj92eX8MPI4vY1jaazVLF2sVbSAJ3LRHLRhF/pUmlI= -github.com/hashicorp/vault-plugin-auth-jwt v0.5.1 h1:d9WLI7oF6VMtwBZwS5bbChc4kW+UwNZUKIGXH6wnnTc= -github.com/hashicorp/vault-plugin-auth-jwt v0.5.1/go.mod h1:5VU7gc6/BEEFQW/viqMs3LBxI1D1cxJmKqKQEP3JUP4= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.1 h1:q6DGb12Vw/CpZ9xDWAmpzxVRKeClFqRFgbIZ3fZcvuY= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.1/go.mod h1:qCDsm0njdfUrnN5sFKMLjxGjZKjQf2qB6dReQ4gr4YI= -github.com/hashicorp/vault-plugin-secrets-ad v0.5.1 h1:BdiASUZLOvOUs317EnaUNjGxTSw0PYGQA7zJZhDKLC4= -github.com/hashicorp/vault-plugin-secrets-ad v0.5.1/go.mod h1:EH9CI8+0aWRBz8eIgGth0QjttmHWlGvn+8ZmX/ZUetE= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.1 h1:72K91p4uLhT/jgtBq2zV5Wn8ocvny4sAN56XOcTxK1w= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.1/go.mod h1:MspbyD2pPrYgBnYIawkBsFinaDb9lx9PA6uBYOG+d8I= -github.com/hashicorp/vault-plugin-secrets-azure v0.5.1 h1:6XFAkvpQl4zrXpZLSW9TCfF2z0mb2vwbrNmX2nzn480= -github.com/hashicorp/vault-plugin-secrets-azure v0.5.1/go.mod h1:9D3lbhWkN7kTCIrQl8yxMU4IkisAY3SYZaRvseih6ZE= -github.com/hashicorp/vault-plugin-secrets-gcp v0.5.2 h1:oH5EVMJCOHb81Ib9E7/ps1WrN3zkS6SnkbCW4tlk6Ro= -github.com/hashicorp/vault-plugin-secrets-gcp v0.5.2/go.mod h1:2VjVlKHTwqvcVCkZBhYks+HASDzQ4/bIsJoOpO2YJFY= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.1 h1:v25YWb7eMPe9DjGsUexRRuWwPlFNh+lbEGOeNrZalf8= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.1/go.mod h1:seBkt6x33ZT20koMcUwV/viMomnXDipsLgK5KUKz2ik= -github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20190416155133-fd495225dea0 h1:pb5DaAqWgGsKWFMsGotmQJREcv/B1EZGHO1hqWOxDAI= -github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20190416155133-fd495225dea0/go.mod h1:PIjaafaRr2QlkGl2SNhIywxlejeW0iMUtmx8u9u/a6c= +github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190703042733-7a9fc78f2664 h1:pL67rxYrJ8WHogVSbFkJmkrH1LT4c6MbdybSqTKqugo= +github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190703042733-7a9fc78f2664/go.mod h1:6pCTXpVaz6OVBa8x1EQmrH5g1XvMYL1lClZqc+n5BX4= +github.com/hashicorp/vault-plugin-auth-jwt v0.5.2-0.20190703042737-804281c53c5f h1:FrHSop0Act6sEm0j7rDrksDKXSy/ks4RbQGhjYyquDY= +github.com/hashicorp/vault-plugin-auth-jwt v0.5.2-0.20190703042737-804281c53c5f/go.mod h1:tAg7a0aE4j+lZ0dT+lQhkiBmyarGnaql3jW510O5MjA= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.2-0.20190703042741-1a51335bffd3 h1:z06zwNi/CA4U/qCl0fSXIwZWcxGzNrRozdfukhHLUEw= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.2-0.20190703042741-1a51335bffd3/go.mod h1:NxYIwPh0O8eDZiR8NjUIKwwfveM0OsVlryMtknkj3o8= +github.com/hashicorp/vault-plugin-auth-pcf v0.0.0-20190703042745-a8a201a8e0ec h1:VEsxzk8tk6jB369aOmIfK9iinrr2jMLHQBwA8IxUYwY= +github.com/hashicorp/vault-plugin-auth-pcf v0.0.0-20190703042745-a8a201a8e0ec/go.mod h1:0HwcklMPNEM5RATfzBwmvbOcroMpASToy9tCTXYJnNU= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.0.0-20190619214355-1541bbf73c6d h1:tHSfqnFZ7K/85dPgH3ApSP93TbzkUHGkOWPnBRjWHIM= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.0.0-20190619214355-1541bbf73c6d/go.mod h1:855Fcz9eNj3I3ZXVm+GnvSdy8mJx67tfG7CId9VfVlo= +github.com/hashicorp/vault-plugin-secrets-ad v0.5.2-0.20190701201353-a0bef50be687 h1:dlbFaUPvrZNXP4DvhB5u71XWzJNfnMMzurZPcJtfszo= +github.com/hashicorp/vault-plugin-secrets-ad v0.5.2-0.20190701201353-a0bef50be687/go.mod h1:pRJAA/Gl+UaSYeULx9ecXZ3f1oxIRATRfmLZKP3HUiM= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190621033057-9c576c32b635 h1:IMyMZI6YiX0FtOlG6fcXSFePKudI+MUBwqvz/3GHgac= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190621033057-9c576c32b635/go.mod h1:BwAuulKwAmq/YckIQCsBLAetJC/cAet3ErMsoTtvkB0= +github.com/hashicorp/vault-plugin-secrets-azure v0.5.2-0.20190509203638-8a60a8656fb0 h1:VaW9pSSP2qeR+BtOVuOkGzQ/fH9ODgiVgUrHhzNyGKw= +github.com/hashicorp/vault-plugin-secrets-azure v0.5.2-0.20190509203638-8a60a8656fb0/go.mod h1:fohWrL5nPCb/nIhpXhvspAKJuvtIu+m9OHgNhQg04xw= +github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20190620162751-272efd334652 h1:c8k3bf2dNH04gESeNEhhP8Jf71wiD7iJ4xwEcz70CVI= +github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20190620162751-272efd334652/go.mod h1:2VjVlKHTwqvcVCkZBhYks+HASDzQ4/bIsJoOpO2YJFY= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190516000311-88f9a4f11829 h1:Ipz+e4xLEDlSg9VkqGeJV3HuNCqNF0kPFlk3jXBZ1dg= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190516000311-88f9a4f11829/go.mod h1:HSCPRY1NbVn8Vfpn1qQD4fx+Y7hVo8kUlvoix5mmve8= +github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20190626201950-a6e92ff82578 h1:wzRbe3eDq6fs5eMEi44IO7/1yb8vzoqRIOSLphSzgJk= +github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20190626201950-a6e92ff82578/go.mod h1:0UXCoeKxYD9fPISis75//2M3EtE6D/dpVU6OuyiC5UY= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= @@ -313,9 +362,11 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22 github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869 h1:BvV6PYcRz0yGnWXNZrd5wginNT1GfFfPvvWpPbjfFL8= github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -332,11 +383,11 @@ github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -367,12 +418,13 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/pointerstructure v0.0.0-20190323210102-2db4bb651397 h1:GwYLdFxg/9bWW+e6YMWDNrKZ43gbKKkgC9mtmxai4o0= -github.com/mitchellh/pointerstructure v0.0.0-20190323210102-2db4bb651397/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= +github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8 h1:1CO5wil3HuiVLrUQ2ovSTO+6AfNOA5EMkHHVyHE9IwA= +github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -399,6 +451,8 @@ github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/ory/dockertest v3.3.4+incompatible h1:VrpM6Gqg7CrPm3bL4Wm1skO+zFWLbh7/Xb5kGEbJRh8= github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -423,6 +477,7 @@ github.com/pquerna/otp v1.1.0/go.mod h1:Zad1CMQfSQZI5KLpahDiSUX4tMMREnXw98IvL1nh github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 h1:13pIdM2tpaDi4OVe24fgoIS7ZTqMt0QI+bwQsX5hq+g= @@ -432,11 +487,13 @@ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L7EX0km2LYM8HKpNWRiouxjE3XHkyGc= github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be h1:MoyXp/VjXUwM0GyDcdwT7Ubea2gxOSHpPaFo3qV+Y2A= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -458,8 +515,10 @@ github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= @@ -496,6 +555,8 @@ go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -509,6 +570,8 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -532,8 +595,12 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190514140710-3ec191127204/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -558,11 +625,19 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190410170021-cc4d4f50624c h1:OUGWoQpM/o3TxM7Fp3CEqRpaYCbg4H1hOVPnZoUtr2U= +golang.org/x/sys v0.0.0-20190410170021-cc4d4f50624c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -583,10 +658,14 @@ google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3Bn google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.2 h1:iTp+3yyl/KOtxa/d1/JUE0GGSoR6FuW5udver22iwpw= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.5.0 h1:lj9SyhMzyoa38fgFF0oO2T6pjs5IzkLPKfVtxpyCRMM= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw= +google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80 h1:GL7nK1hkDKrkor0eVOYcMdIsUGErFnaC2gpBOVC+vbI= google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -594,6 +673,9 @@ google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c h1:m9avZ3wyOWBR0fLC+qWbMBulk+Jiiqelngssgp8jfIs= +google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -602,6 +684,8 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0 h1:DlsSIrgEBuZAUFJcta2B5i/lzeHHbnfkNFAfFXLVFYQ= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= diff --git a/helper/awsutil/region_test.go b/helper/awsutil/region_test.go index c8d15c9ff09e..4b208a6cd5bd 100644 --- a/helper/awsutil/region_test.go +++ b/helper/awsutil/region_test.go @@ -110,6 +110,12 @@ func TestGetOrDefaultRegion_ConfigFileUnfound(t *testing.T) { } func TestGetOrDefaultRegion_EC2InstanceMetadataPreferredFourth(t *testing.T) { + if !shouldTestFiles { + // In some test environments, like a CI environment, we may not have the + // permissions to write to the ~/.aws/config file. Thus, this test is off + // by default but can be set to on for local development. + t.SkipNow() + } configuredRegion := "" cleanupEnv := setEnvRegion(t, "") diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go index 699f3d6fcd8a..93ea322eadec 100644 --- a/helper/builtinplugins/registry.go +++ b/helper/builtinplugins/registry.go @@ -11,6 +11,7 @@ import ( credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" credJWT "github.com/hashicorp/vault-plugin-auth-jwt" credKube "github.com/hashicorp/vault-plugin-auth-kubernetes" + credPCF "github.com/hashicorp/vault-plugin-auth-pcf" credAppId "github.com/hashicorp/vault/builtin/credential/app-id" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" credAws "github.com/hashicorp/vault/builtin/credential/aws" @@ -21,6 +22,7 @@ import ( credRadius "github.com/hashicorp/vault/builtin/credential/radius" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch" dbCass "github.com/hashicorp/vault/plugins/database/cassandra" dbHana "github.com/hashicorp/vault/plugins/database/hana" dbInflux "github.com/hashicorp/vault/plugins/database/influxdb" @@ -75,6 +77,7 @@ func newRegistry() *registry { "ldap": credLdap.Factory, "oidc": credJWT.Factory, "okta": credOkta.Factory, + "pcf": credPCF.Factory, "radius": credRadius.Factory, "userpass": credUserpass.Factory, }, @@ -86,12 +89,13 @@ func newRegistry() *registry { "mysql-rds-database-plugin": dbMysql.New(credsutil.NoneLength, dbMysql.LegacyMetadataLen, dbMysql.LegacyUsernameLen), "mysql-legacy-database-plugin": dbMysql.New(credsutil.NoneLength, dbMysql.LegacyMetadataLen, dbMysql.LegacyUsernameLen), - "postgresql-database-plugin": dbPostgres.New, - "mssql-database-plugin": dbMssql.New, - "cassandra-database-plugin": dbCass.New, - "mongodb-database-plugin": dbMongo.New, - "hana-database-plugin": dbHana.New, - "influxdb-database-plugin": dbInflux.New, + "postgresql-database-plugin": dbPostgres.New, + "mssql-database-plugin": dbMssql.New, + "cassandra-database-plugin": dbCass.New, + "mongodb-database-plugin": dbMongo.New, + "hana-database-plugin": dbHana.New, + "influxdb-database-plugin": dbInflux.New, + "elasticsearch-database-plugin": dbElastic.New, }, logicalBackends: map[string]logical.Factory{ "ad": logicalAd.Factory, diff --git a/helper/identity/templating.go b/helper/identity/templating.go index 0d739c832ba4..d98d61bfebf0 100644 --- a/helper/identity/templating.go +++ b/helper/identity/templating.go @@ -1,10 +1,14 @@ package identity import ( + "encoding/json" "errors" "fmt" + "strconv" "strings" + "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/namespace" ) @@ -15,23 +19,106 @@ var ( ErrTemplateValueNotFound = errors.New("no value could be found for one of the template directives") ) +const ( + ACLTemplating = iota // must be the first value for backwards compatibility + JSONTemplating +) + type PopulateStringInput struct { - ValidityCheckOnly bool String string + ValidityCheckOnly bool Entity *Entity Groups []*Group Namespace *namespace.Namespace + Mode int // processing mode, ACLTemplate or JSONTemplating + Now time.Time // optional, defaults to current time + + templateHandler templateHandlerFunc + groupIDs []string + groupNames []string +} + +// templateHandlerFunc allows generating string outputs based on data type, and +// different handlers can be used based on mode. For example in ACL mode, strings +// are emitted verbatim, but they're wrapped in double quotes for JSON mode. And +// some structures, like slices, might be rendered in one mode but prohibited in +// another. +type templateHandlerFunc func(interface{}, ...string) (string, error) + +// aclTemplateHandler processes known parameter data types when operating +// in ACL mode. +func aclTemplateHandler(v interface{}, keys ...string) (string, error) { + switch t := v.(type) { + case string: + if t == "" { + return "", ErrTemplateValueNotFound + } + return t, nil + case []string: + return "", ErrTemplateValueNotFound + case map[string]string: + if len(keys) > 0 { + val, ok := t[keys[0]] + if ok { + return val, nil + } + } + return "", ErrTemplateValueNotFound + } + + return "", fmt.Errorf("unknown type: %T", v) } -func PopulateString(p *PopulateStringInput) (bool, string, error) { - if p == nil { - return false, "", errors.New("nil input") +// jsonTemplateHandler processes known parameter data types when operating +// in JSON mode. +func jsonTemplateHandler(v interface{}, keys ...string) (string, error) { + jsonMarshaller := func(v interface{}) (string, error) { + enc, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(enc), nil + } + + switch t := v.(type) { + case string: + return strconv.Quote(t), nil + case []string: + return jsonMarshaller(t) + case map[string]string: + if len(keys) > 0 { + return strconv.Quote(t[keys[0]]), nil + } + if t == nil { + return "{}", nil + } + return jsonMarshaller(t) } + return "", fmt.Errorf("unknown type: %T", v) +} + +func PopulateString(p PopulateStringInput) (bool, string, error) { if p.String == "" { return false, "", nil } + // preprocess groups + for _, g := range p.Groups { + p.groupNames = append(p.groupNames, g.Name) + p.groupIDs = append(p.groupIDs, g.ID) + } + + // set up mode-specific handler + switch p.Mode { + case ACLTemplating: + p.templateHandler = aclTemplateHandler + case JSONTemplating: + p.templateHandler = jsonTemplateHandler + default: + return false, "", fmt.Errorf("unknown mode %q", p.Mode) + } + var subst bool splitStr := strings.Split(p.String, "{{") @@ -61,7 +148,7 @@ func PopulateString(p *PopulateStringInput) (bool, string, error) { case 2: subst = true if !p.ValidityCheckOnly { - tmplStr, err := performTemplating(p.Namespace, strings.TrimSpace(splitPiece[0]), p.Entity, p.Groups) + tmplStr, err := performTemplating(strings.TrimSpace(splitPiece[0]), &p) if err != nil { return false, "", err } @@ -76,22 +163,22 @@ func PopulateString(p *PopulateStringInput) (bool, string, error) { return subst, b.String(), nil } -func performTemplating(ns *namespace.Namespace, input string, entity *Entity, groups []*Group) (string, error) { +func performTemplating(input string, p *PopulateStringInput) (string, error) { + performAliasTemplating := func(trimmed string, alias *Alias) (string, error) { switch { case trimmed == "id": - return alias.ID, nil + return p.templateHandler(alias.ID) + case trimmed == "name": - if alias.Name == "" { - return "", ErrTemplateValueNotFound - } - return alias.Name, nil + return p.templateHandler(alias.Name) + + case trimmed == "metadata": + return p.templateHandler(alias.Metadata) + case strings.HasPrefix(trimmed, "metadata."): - val, ok := alias.Metadata[strings.TrimPrefix(trimmed, "metadata.")] - if !ok { - return "", ErrTemplateValueNotFound - } - return val, nil + split := strings.SplitN(trimmed, ".", 2) + return p.templateHandler(alias.Metadata, split[1]) } return "", ErrTemplateValueNotFound @@ -100,34 +187,45 @@ func performTemplating(ns *namespace.Namespace, input string, entity *Entity, gr performEntityTemplating := func(trimmed string) (string, error) { switch { case trimmed == "id": - return entity.ID, nil + return p.templateHandler(p.Entity.ID) + case trimmed == "name": - if entity.Name == "" { - return "", ErrTemplateValueNotFound - } - return entity.Name, nil + return p.templateHandler(p.Entity.Name) + + case trimmed == "metadata": + return p.templateHandler(p.Entity.Metadata) + case strings.HasPrefix(trimmed, "metadata."): - val, ok := entity.Metadata[strings.TrimPrefix(trimmed, "metadata.")] - if !ok { - return "", ErrTemplateValueNotFound - } - return val, nil + split := strings.SplitN(trimmed, ".", 2) + return p.templateHandler(p.Entity.Metadata, split[1]) + + case trimmed == "group_names": + return p.templateHandler(p.groupNames) + + case trimmed == "group_ids": + return p.templateHandler(p.groupIDs) + case strings.HasPrefix(trimmed, "aliases."): split := strings.SplitN(strings.TrimPrefix(trimmed, "aliases."), ".", 2) if len(split) != 2 { return "", errors.New("invalid alias selector") } - var found *Alias - for _, alias := range entity.Aliases { - if split[0] == alias.MountAccessor { - found = alias + var alias *Alias + for _, a := range p.Entity.Aliases { + if split[0] == a.MountAccessor { + alias = a break } } - if found == nil { - return "", errors.New("alias not found") + if alias == nil { + if p.Mode == ACLTemplating { + return "", errors.New("alias not found") + } + + // An empty alias is sufficient for generating defaults + alias = &Alias{Metadata: make(map[string]string)} } - return performAliasTemplating(split[1], found) + return performAliasTemplating(split[1], alias) } return "", ErrTemplateValueNotFound @@ -137,12 +235,16 @@ func performTemplating(ns *namespace.Namespace, input string, entity *Entity, gr var ids bool selectorSplit := strings.SplitN(trimmed, ".", 2) + switch { case len(selectorSplit) != 2: return "", errors.New("invalid groups selector") + case selectorSplit[0] == "ids": ids = true + case selectorSplit[0] == "names": + default: return "", errors.New("invalid groups selector") } @@ -153,12 +255,12 @@ func performTemplating(ns *namespace.Namespace, input string, entity *Entity, gr return "", errors.New("invalid groups accessor") } var found *Group - for _, group := range groups { + for _, group := range p.Groups { var compare string if ids { compare = group.ID } else { - if ns != nil && group.NamespaceID == ns.ID { + if p.Namespace != nil && group.NamespaceID == p.Namespace.ID { compare = group.Name } else { continue @@ -180,11 +282,13 @@ func performTemplating(ns *namespace.Namespace, input string, entity *Entity, gr switch { case trimmed == "id": return found.ID, nil + case trimmed == "name": if found.Name == "" { return "", ErrTemplateValueNotFound } return found.Name, nil + case strings.HasPrefix(trimmed, "metadata."): val, ok := found.Metadata[strings.TrimPrefix(trimmed, "metadata.")] if !ok { @@ -196,18 +300,59 @@ func performTemplating(ns *namespace.Namespace, input string, entity *Entity, gr return "", ErrTemplateValueNotFound } + performTimeTemplating := func(trimmed string) (string, error) { + now := p.Now + if now.IsZero() { + now = time.Now() + } + + opsSplit := strings.SplitN(trimmed, ".", 3) + + if opsSplit[0] != "now" { + return "", fmt.Errorf("invalid time selector %q", opsSplit[0]) + } + + result := now + switch len(opsSplit) { + case 1: + // return current time + case 2: + return "", errors.New("missing time operand") + + case 3: + duration, err := time.ParseDuration(opsSplit[2]) + if err != nil { + return "", errwrap.Wrapf("invalid duration: {{err}}", err) + } + + switch opsSplit[1] { + case "plus": + result = result.Add(duration) + case "minus": + result = result.Add(-duration) + default: + return "", fmt.Errorf("invalid time operator %q", opsSplit[1]) + } + } + + return strconv.FormatInt(result.Unix(), 10), nil + } + switch { case strings.HasPrefix(input, "identity.entity."): - if entity == nil { + if p.Entity == nil { return "", ErrNoEntityAttachedToToken } return performEntityTemplating(strings.TrimPrefix(input, "identity.entity.")) case strings.HasPrefix(input, "identity.groups."): - if len(groups) == 0 { + if len(p.Groups) == 0 { return "", ErrNoGroupsAttachedToToken } return performGroupsTemplating(strings.TrimPrefix(input, "identity.groups.")) + + case strings.HasPrefix(input, "time."): + return performTimeTemplating(strings.TrimPrefix(input, "time.")) } return "", ErrTemplateValueNotFound diff --git a/helper/identity/templating_test.go b/helper/identity/templating_test.go index 66bf5d545b41..ca92a706d1de 100644 --- a/helper/identity/templating_test.go +++ b/helper/identity/templating_test.go @@ -2,13 +2,22 @@ package identity import ( "errors" + "fmt" + "math" + "strconv" "testing" + "time" "github.com/hashicorp/vault/helper/namespace" ) +// intentionally != time.Now() to catch latent used of time.Now instead of +// passed in values +var testNow = time.Now().Add(100 * time.Hour) + func TestPopulate_Basic(t *testing.T) { var tests = []struct { + mode int name string input string output string @@ -23,7 +32,40 @@ func TestPopulate_Basic(t *testing.T) { aliasMetadata map[string]string groupName string groupMetadata map[string]string + groupMemberships []string + now time.Time }{ + // time.* tests. Keep tests with time.Now() at the front to avoid false + // positives due to the second changing during the test + { + name: "time now", + input: "{{time.now}}", + output: strconv.Itoa(int(testNow.Unix())), + now: testNow, + }, + { + name: "time plus", + input: "{{time.now.plus.1h}}", + output: strconv.Itoa(int(testNow.Unix() + (60 * 60))), + now: testNow, + }, + { + name: "time plus", + input: "{{time.now.minus.5m}}", + output: strconv.Itoa(int(testNow.Unix() - (5 * 60))), + now: testNow, + }, + { + name: "invalid operator", + input: "{{time.now.divide.5m}}", + err: errors.New("invalid time operator \"divide\""), + }, + { + name: "time missing operand", + input: "{{time.now.plus}}", + err: errors.New("missing time operand"), + }, + { name: "no_templating", input: "path foobar {", @@ -86,12 +128,13 @@ func TestPopulate_Basic(t *testing.T) { }, { name: "alias_id_name", - input: "path {{ identity.entity.name}} {\n\tval = {{identity.entity.aliases.foomount.id}}\n}", + input: "path {{ identity.entity.name}} {\n\tval = {{identity.entity.aliases.foomount.id}} nval = {{identity.entity.aliases.foomount.name}}\n}", entityName: "entityName", aliasAccessor: "foomount", aliasID: "aliasID", + aliasName: "aliasName", metadata: map[string]string{"foo": "bar"}, - output: "path entityName {\n\tval = aliasID\n}", + output: "path entityName {\n\tval = aliasID nval = aliasName\n}", }, { name: "alias_id_name_bad_selector", @@ -143,6 +186,149 @@ func TestPopulate_Basic(t *testing.T) { groupName: "groupName", err: errors.New("entity is not a member of group \"hroupName\""), }, + { + name: "metadata_object_disallowed", + input: "{{identity.entity.metadata}}", + metadata: map[string]string{"foo": "bar"}, + err: ErrTemplateValueNotFound, + }, + { + name: "alias_metadata_object_disallowed", + input: "{{identity.entity.aliases.foomount.metadata}}", + aliasAccessor: "foomount", + aliasMetadata: map[string]string{"foo": "bar"}, + err: ErrTemplateValueNotFound, + }, + { + name: "group_names_disallowed", + input: "{{identity.entity.group_names}}", + groupMemberships: []string{"foo", "bar"}, + err: ErrTemplateValueNotFound, + }, + { + name: "group_ids_disallowed", + input: "{{identity.entity.group_ids}}", + groupMemberships: []string{"foo", "bar"}, + err: ErrTemplateValueNotFound, + }, + + // missing selector cases + { + mode: JSONTemplating, + name: "entity id", + input: "{{identity.entity.id}}", + output: `"entityID"`, + }, + { + mode: JSONTemplating, + name: "entity name", + input: "{{identity.entity.name}}", + entityName: "entityName", + output: `"entityName"`, + }, + { + mode: JSONTemplating, + name: "entity name missing", + input: "{{identity.entity.name}}", + output: `""`, + }, + { + mode: JSONTemplating, + name: "alias name/id", + input: "{{identity.entity.aliases.foomount.id}} {{identity.entity.aliases.foomount.name}}", + aliasAccessor: "foomount", + aliasID: "aliasID", + aliasName: "aliasName", + output: `"aliasID" "aliasName"`, + }, + { + mode: JSONTemplating, + name: "one metadata key", + input: "{{identity.entity.metadata.color}}", + metadata: map[string]string{"foo": "bar", "color": "green"}, + output: `"green"`, + }, + { + mode: JSONTemplating, + name: "one metadata key not found", + input: "{{identity.entity.metadata.size}}", + metadata: map[string]string{"foo": "bar", "color": "green"}, + output: `""`, + }, + { + mode: JSONTemplating, + name: "all entity metadata", + input: "{{identity.entity.metadata}}", + metadata: map[string]string{"foo": "bar", "color": "green"}, + output: `{"color":"green","foo":"bar"}`, + }, + { + mode: JSONTemplating, + name: "null entity metadata", + input: "{{identity.entity.metadata}}", + output: `{}`, + }, + { + mode: JSONTemplating, + name: "group_names", + input: "{{identity.entity.group_names}}", + groupMemberships: []string{"foo", "bar"}, + output: `["foo","bar"]`, + }, + { + mode: JSONTemplating, + name: "group_ids", + input: "{{identity.entity.group_ids}}", + groupMemberships: []string{"foo", "bar"}, + output: `["foo_0","bar_1"]`, + }, + { + mode: JSONTemplating, + name: "one alias metadata key", + input: "{{identity.entity.aliases.aws_123.metadata.color}}", + aliasAccessor: "aws_123", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `"green"`, + }, + { + mode: JSONTemplating, + name: "one alias metadata key not found", + input: "{{identity.entity.aliases.aws_123.metadata.size}}", + aliasAccessor: "aws_123", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `""`, + }, + { + mode: JSONTemplating, + name: "one alias metadata, accessor not found", + input: "{{identity.entity.aliases.aws_123.metadata.size}}", + aliasAccessor: "not_gonna_match", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `""`, + }, + { + mode: JSONTemplating, + name: "all alias metadata", + input: "{{identity.entity.aliases.aws_123.metadata}}", + aliasAccessor: "aws_123", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `{"color":"green","foo":"bar"}`, + }, + { + mode: JSONTemplating, + name: "null alias metadata", + input: "{{identity.entity.aliases.aws_123.metadata}}", + aliasAccessor: "aws_123", + output: `{}`, + }, + { + mode: JSONTemplating, + name: "all alias metadata, accessor not found", + input: "{{identity.entity.aliases.aws_123.metadata}}", + aliasAccessor: "not_gonna_match", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `{}`, + }, } for _, test := range tests { @@ -156,7 +342,7 @@ func TestPopulate_Basic(t *testing.T) { } if test.aliasAccessor != "" { entity.Aliases = []*Alias{ - &Alias{ + { MountAccessor: test.aliasAccessor, ID: test.aliasID, Name: test.aliasName, @@ -173,12 +359,24 @@ func TestPopulate_Basic(t *testing.T) { NamespaceID: namespace.RootNamespace.ID, }) } - subst, out, err := PopulateString(&PopulateStringInput{ + + if test.groupMemberships != nil { + for i, groupName := range test.groupMemberships { + groups = append(groups, &Group{ + ID: fmt.Sprintf("%s_%d", groupName, i), + Name: groupName, + }) + } + } + + subst, out, err := PopulateString(PopulateStringInput{ + Mode: test.mode, ValidityCheckOnly: test.validityCheckOnly, String: test.input, Entity: entity, Groups: groups, Namespace: namespace.RootNamespace, + Now: test.now, }) if err != nil { if test.err == nil { @@ -189,10 +387,108 @@ func TestPopulate_Basic(t *testing.T) { } } if out != test.output { - t.Fatalf("%s: bad output: %s", test.name, out) + t.Fatalf("%s: bad output: %s, expected: %s", test.name, out, test.output) } if err == nil && !subst && out != test.input { t.Fatalf("%s: bad subst flag", test.name) } } } + +func TestPopulate_CurrentTime(t *testing.T) { + now := time.Now() + + // Test that an unset Now parameter results in current time + input := PopulateStringInput{ + Mode: JSONTemplating, + String: `{{time.now}}`, + } + + _, out, err := PopulateString(input) + if err != nil { + t.Fatal(err) + } + + nowPopulated, err := strconv.Atoi(out) + if err != nil { + t.Fatal(err) + } + + diff := math.Abs(float64(int64(nowPopulated) - now.Unix())) + if diff > 1 { + t.Fatalf("expected time within 1 second. Got diff of: %f", diff) + } +} + +func TestPopulate_FullObject(t *testing.T) { + var testEntity = &Entity{ + ID: "abc-123", + Name: "Entity Name", + Metadata: map[string]string{ + "color": "green", + "size": "small", + "non-printable": "\"\n\t", + }, + Aliases: []*Alias{ + { + MountAccessor: "aws_123", + Metadata: map[string]string{ + "service": "ec2", + "region": "west", + }, + }, + }, + } + + var testGroups = []*Group{ + {ID: "a08b0c02", Name: "g1"}, + {ID: "239bef91", Name: "g2"}, + } + + template := ` + { + "id": {{identity.entity.id}}, + "name": {{identity.entity.name}}, + "all metadata": {{identity.entity.metadata}}, + "one metadata key": {{identity.entity.metadata.color}}, + "one metadata key not found": {{identity.entity.metadata.asldfk}}, + "alias metadata": {{identity.entity.aliases.aws_123.metadata}}, + "alias not found metadata": {{identity.entity.aliases.blahblah.metadata}}, + "one alias metadata key": {{identity.entity.aliases.aws_123.metadata.service}}, + "one not found alias metadata key": {{identity.entity.aliases.blahblah.metadata.service}}, + "group names": {{identity.entity.group_names}}, + "group ids": {{identity.entity.group_ids}}, + "repeated and": {"nested element": {{identity.entity.name}}} + }` + + expected := ` + { + "id": "abc-123", + "name": "Entity Name", + "all metadata": {"color":"green","non-printable":"\"\n\t","size":"small"}, + "one metadata key": "green", + "one metadata key not found": "", + "alias metadata": {"region":"west","service":"ec2"}, + "alias not found metadata": {}, + "one alias metadata key": "ec2", + "one not found alias metadata key": "", + "group names": ["g1","g2"], + "group ids": ["a08b0c02","239bef91"], + "repeated and": {"nested element": "Entity Name"} + }` + + input := PopulateStringInput{ + Mode: JSONTemplating, + String: template, + Entity: testEntity, + Groups: testGroups, + } + _, out, err := PopulateString(input) + if err != nil { + t.Fatal(err) + } + + if out != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, out) + } +} diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go index e7b2e0fe51fc..5bb7a72571ce 100644 --- a/helper/storagepacker/storagepacker.go +++ b/helper/storagepacker/storagepacker.go @@ -9,6 +9,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/compressutil" "github.com/hashicorp/vault/sdk/helper/locksutil" @@ -124,60 +125,135 @@ func (s *StoragePacker) BucketKey(itemID string) string { // DeleteItem removes the item from the respective bucket func (s *StoragePacker) DeleteItem(_ context.Context, itemID string) error { - if itemID == "" { - return fmt.Errorf("empty item ID") - } - - bucketKey := s.BucketKey(itemID) + return s.DeleteMultipleItems(context.Background(), nil, itemID) +} - // Read from storage - storageEntry, err := s.view.Get(context.Background(), bucketKey) - if err != nil { - return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) - } - if storageEntry == nil { +func (s *StoragePacker) DeleteMultipleItems(ctx context.Context, logger hclog.Logger, itemIDs ...string) error { + var err error + switch len(itemIDs) { + case 0: + // Nothing return nil - } - uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) - if err != nil { - return errwrap.Wrapf("failed to decompress packed storage value: {{err}}", err) - } - if notCompressed { - uncompressedData = storageEntry.Value + case 1: + logger = hclog.NewNullLogger() + fallthrough + + default: + lockIndexes := make(map[string]struct{}, len(s.storageLocks)) + for _, itemID := range itemIDs { + bucketKey := s.BucketKey(itemID) + if _, ok := lockIndexes[bucketKey]; !ok { + lockIndexes[bucketKey] = struct{}{} + } + } + + lockKeys := make([]string, 0, len(lockIndexes)) + for k := range lockIndexes { + lockKeys = append(lockKeys, k) + } + + locks := locksutil.LocksForKeys(s.storageLocks, lockKeys) + for _, lock := range locks { + lock.Lock() + defer lock.Unlock() + } } - var bucket Bucket - err = proto.Unmarshal(uncompressedData, &bucket) - if err != nil { - return errwrap.Wrapf("failed decoding packed storage entry: {{err}}", err) + if logger == nil { + logger = hclog.NewNullLogger() } - // Look for a matching storage entry - foundIdx := -1 - for itemIdx, item := range bucket.Items { - if item.ID == itemID { - foundIdx = itemIdx - break + bucketCache := make(map[string]*Bucket, len(s.storageLocks)) + + logger.Debug("deleting multiple items from storagepacker; caching and deleting from buckets", "total_items", len(itemIDs)) + + var pctDone int + for idx, itemID := range itemIDs { + bucketKey := s.BucketKey(itemID) + + bucket, bucketFound := bucketCache[bucketKey] + if !bucketFound { + // Read from storage + storageEntry, err := s.view.Get(context.Background(), bucketKey) + if err != nil { + return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) + } + if storageEntry == nil { + return nil + } + + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return errwrap.Wrapf("failed to decompress packed storage value: {{err}}", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + bucket = new(Bucket) + err = proto.Unmarshal(uncompressedData, bucket) + if err != nil { + return errwrap.Wrapf("failed decoding packed storage entry: {{err}}", err) + } + } + + // Look for a matching storage entry + foundIdx := -1 + for itemIdx, item := range bucket.Items { + if item.ID == itemID { + foundIdx = itemIdx + break + } + } + + // If there is a match, remove it from the collection and persist the + // resulting collection + if foundIdx != -1 { + bucket.Items[foundIdx] = bucket.Items[len(bucket.Items)-1] + bucket.Items = bucket.Items[:len(bucket.Items)-1] + if !bucketFound { + bucketCache[bucketKey] = bucket + } + } + + newPctDone := idx * 100.0 / len(itemIDs) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("bucket item removal progress", "percent", pctDone, "items_removed", idx) } } - // If there is a match, remove it from the collection and persist the - // resulting collection - if foundIdx != -1 { - bucket.Items = append(bucket.Items[:foundIdx], bucket.Items[foundIdx+1:]...) + logger.Debug("persisting buckets", "total_buckets", len(bucketCache)) - // Persist bucket entry only if there is an update - err = s.putBucket(&bucket) + // Persist all buckets in the cache; these will be the ones that had + // deletions + pctDone = 0 + idx := 0 + for _, bucket := range bucketCache { + // Fail if the context is canceled, the storage calls will fail anyways + if ctx.Err() != nil { + return ctx.Err() + } + + err = s.putBucket(ctx, bucket) if err != nil { return err } + + newPctDone := idx * 100.0 / len(bucketCache) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("bucket persistence progress", "percent", pctDone, "buckets_persisted", idx) + } + + idx++ } return nil } -func (s *StoragePacker) putBucket(bucket *Bucket) error { +func (s *StoragePacker) putBucket(ctx context.Context, bucket *Bucket) error { if bucket == nil { return fmt.Errorf("nil bucket entry") } @@ -203,7 +279,7 @@ func (s *StoragePacker) putBucket(bucket *Bucket) error { } // Store the compressed value - err = s.view.Put(context.Background(), &logical.StorageEntry{ + err = s.view.Put(ctx, &logical.StorageEntry{ Key: bucket.Key, Value: compressedBucket, }) @@ -298,7 +374,7 @@ func (s *StoragePacker) PutItem(_ context.Context, item *Item) error { } } - return s.putBucket(bucket) + return s.putBucket(context.Background(), bucket) } // NewStoragePacker creates a new storage packer for a given view diff --git a/helper/storagepacker/storagepacker_test.go b/helper/storagepacker/storagepacker_test.go index c69e974fa31b..ef7c1453a979 100644 --- a/helper/storagepacker/storagepacker_test.go +++ b/helper/storagepacker/storagepacker_test.go @@ -2,6 +2,7 @@ package storagepacker import ( "context" + "fmt" "testing" "github.com/golang/protobuf/proto" @@ -177,3 +178,62 @@ func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) { t.Fatalf("bad: expected: %#v\nactual: %#v\n", entity, itemDecoded) } } + +func TestStoragePacker_DeleteMultiple(t *testing.T) { + storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + + // Persist a storage entry + for i := 0; i < 100; i++ { + item := &Item{ + ID: fmt.Sprintf("item%d", i), + } + + err = storagePacker.PutItem(ctx, item) + if err != nil { + t.Fatal(err) + } + + // Verify that it can be read + fetchedItem, err := storagePacker.GetItem(item.ID) + if err != nil { + t.Fatal(err) + } + if fetchedItem == nil { + t.Fatalf("failed to read the stored item") + } + + if item.ID != fetchedItem.ID { + t.Fatalf("bad: item ID; expected: %q\n actual: %q\n", item.ID, fetchedItem.ID) + } + } + + itemsToDelete := make([]string, 0, 50) + for i := 1; i < 100; i += 2 { + itemsToDelete = append(itemsToDelete, fmt.Sprintf("item%d", i)) + } + + err = storagePacker.DeleteMultipleItems(ctx, nil, itemsToDelete...) + if err != nil { + t.Fatal(err) + } + + // Check that the deletion was successful + for i := 0; i < 100; i++ { + fetchedItem, err := storagePacker.GetItem(fmt.Sprintf("item%d", i)) + if err != nil { + t.Fatal(err) + } + + if i%2 == 0 && fetchedItem == nil { + t.Fatal("expected item not found") + } + if i%2 == 1 && fetchedItem != nil { + t.Fatalf("failed to delete item") + } + } +} diff --git a/helper/testhelpers/logical/testing.go b/helper/testhelpers/logical/testing.go index e53331838155..196964534757 100644 --- a/helper/testhelpers/logical/testing.go +++ b/helper/testhelpers/logical/testing.go @@ -295,11 +295,9 @@ func Test(tt TestT, c TestCase) { DisplayName: tokenInfo.Data["display_name"].(string), }) } - if s.RemoteAddr != "" { - req.Connection = &logical.Connection{RemoteAddr: s.RemoteAddr} - } + req.Connection = &logical.Connection{RemoteAddr: s.RemoteAddr} if s.ConnState != nil { - req.Connection = &logical.Connection{ConnState: s.ConnState} + req.Connection.ConnState = s.ConnState } if s.PreFlight != nil { diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 3a02fbcd7568..2f63614f1aba 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -5,20 +5,27 @@ import ( "encoding/base64" "errors" "fmt" + "io/ioutil" "math/rand" "net/http" + "net/url" + "os" "reflect" "sync" + "sync/atomic" "time" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault/cluster" log "github.com/hashicorp/go-hclog" + raftlib "github.com/hashicorp/raft" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/xor" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/physical" @@ -265,10 +272,6 @@ func ConfClusterAndCore(t testing.T, conf *vault.CoreConfig, opts *vault.TestClu "approle": credAppRole.Factory, "userpass": credUserpass.Factory, } - coreConfig.LogicalBackends = map[string]logical.Factory{ - "local-kv": PassthroughWithLocalPathsFactory, - "leased-kv": vault.LeasedPassthroughBackendFactory, - } vault.AddNoopAudit(&coreConfig) cluster := vault.NewTestCluster(t, &coreConfig, opts) cluster.Start() @@ -281,13 +284,6 @@ func ConfClusterAndCore(t testing.T, conf *vault.CoreConfig, opts *vault.TestClu return cluster, core } -func GetClusterAndCore(t testing.T, logger log.Logger, handlerFunc func(*vault.HandlerProperties) http.Handler) (*vault.TestCluster, *vault.TestClusterCore) { - return ConfClusterAndCore(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ - Logger: logger, - HandlerFunc: handlerFunc, - }) -} - func GetPerfReplicatedClusters(t testing.T, conf *vault.CoreConfig, opts *vault.TestClusterOptions) *ReplicatedTestClusters { ret := &ReplicatedTestClusters{} @@ -305,13 +301,18 @@ func GetPerfReplicatedClusters(t testing.T, conf *vault.CoreConfig, opts *vault. // Set this lower so that state populates quickly to standby nodes cluster.HeartbeatInterval = 2 * time.Second - opts1 := *opts - opts1.Logger = logger.Named("perf-pri") - ret.PerfPrimaryCluster, _ = ConfClusterAndCore(t, conf, &opts1) + numCores := opts.NumCores + if numCores == 0 { + numCores = vault.DefaultNumCores + } + + localopts := *opts + localopts.Logger = logger.Named("perf-pri") + ret.PerfPrimaryCluster, _ = ConfClusterAndCore(t, conf, &localopts) - opts2 := *opts - opts1.Logger = logger.Named("perf-sec") - ret.PerfSecondaryCluster, _ = ConfClusterAndCore(t, conf, &opts2) + localopts.Logger = logger.Named("perf-sec") + localopts.FirstCoreNumber += numCores + ret.PerfSecondaryCluster, _ = ConfClusterAndCore(t, conf, &localopts) SetupTwoClusterPerfReplication(t, ret.PerfPrimaryCluster, ret.PerfSecondaryCluster) @@ -319,6 +320,12 @@ func GetPerfReplicatedClusters(t testing.T, conf *vault.CoreConfig, opts *vault. } func GetFourReplicatedClusters(t testing.T, handlerFunc func(*vault.HandlerProperties) http.Handler) *ReplicatedTestClusters { + return GetFourReplicatedClustersWithConf(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + HandlerFunc: handlerFunc, + }) +} + +func GetFourReplicatedClustersWithConf(t testing.T, conf *vault.CoreConfig, opts *vault.TestClusterOptions) *ReplicatedTestClusters { ret := &ReplicatedTestClusters{} logger := log.New(&log.LoggerOptions{ @@ -328,13 +335,26 @@ func GetFourReplicatedClusters(t testing.T, handlerFunc func(*vault.HandlerPrope // Set this lower so that state populates quickly to standby nodes cluster.HeartbeatInterval = 2 * time.Second - ret.PerfPrimaryCluster, _ = GetClusterAndCore(t, logger.Named("perf-pri"), handlerFunc) + numCores := opts.NumCores + if numCores == 0 { + numCores = vault.DefaultNumCores + } + + localopts := *opts + localopts.Logger = logger.Named("perf-pri") + ret.PerfPrimaryCluster, _ = ConfClusterAndCore(t, conf, &localopts) - ret.PerfSecondaryCluster, _ = GetClusterAndCore(t, logger.Named("perf-sec"), handlerFunc) + localopts.Logger = logger.Named("perf-sec") + localopts.FirstCoreNumber += numCores + ret.PerfSecondaryCluster, _ = ConfClusterAndCore(t, conf, &localopts) - ret.PerfPrimaryDRCluster, _ = GetClusterAndCore(t, logger.Named("perf-pri-dr"), handlerFunc) + localopts.Logger = logger.Named("perf-pri-dr") + localopts.FirstCoreNumber += numCores + ret.PerfPrimaryDRCluster, _ = ConfClusterAndCore(t, conf, &localopts) - ret.PerfSecondaryDRCluster, _ = GetClusterAndCore(t, logger.Named("perf-sec-dr"), handlerFunc) + localopts.Logger = logger.Named("perf-sec-dr") + localopts.FirstCoreNumber += numCores + ret.PerfSecondaryDRCluster, _ = ConfClusterAndCore(t, conf, &localopts) builder := &ReplicatedTestClustersBuilder{clusters: ret} builder.setupFourClusterReplication(t) @@ -459,10 +479,16 @@ func getDrToken(t testing.T, tc *vault.TestCluster, id string) string { func (r *ReplicatedTestClustersBuilder) enablePerformanceSecondary(t testing.T) { c := r.clusters.PerfSecondaryCluster.Cores[0] - _, err := c.Client.Logical().Write("sys/replication/performance/secondary/enable", map[string]interface{}{ + postData := map[string]interface{}{ "token": r.perfToken, "ca_file": r.clusters.PerfPrimaryCluster.CACertPEMFile, - }) + } + if r.clusters.PerfPrimaryCluster.ClientAuthRequired { + p := r.clusters.PerfPrimaryCluster.Cores[0] + postData["client_cert_pem"] = string(p.ServerCertPEM) + postData["client_key_pem"] = string(p.ServerKeyPEM) + } + _, err := c.Client.Logical().Write("sys/replication/performance/secondary/enable", postData) if err != nil { t.Fatal(err) } @@ -501,6 +527,21 @@ func (r *ReplicatedTestClustersBuilder) enableDrSecondary(t testing.T, tc *vault EnsureCoresUnsealed(t, tc) } +func EnsureStableActiveNode(t testing.T, cluster *vault.TestCluster) { + activeCore := DeriveActiveCore(t, cluster) + + for i := 0; i < 30; i++ { + leaderResp, err := activeCore.Client.Sys().Leader() + if err != nil { + t.Fatal(err) + } + if !leaderResp.IsSelf { + t.Fatal("unstable active node") + } + time.Sleep(200 * time.Millisecond) + } +} + func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore { for i := 0; i < 10; i++ { for _, core := range cluster.Cores { @@ -533,6 +574,25 @@ func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestCl return cores } +func WaitForNCoresUnsealed(t testing.T, cluster *vault.TestCluster, n int) { + t.Helper() + for i := 0; i < 30; i++ { + unsealed := 0 + for _, core := range cluster.Cores { + if !core.Core.Sealed() { + unsealed++ + } + } + + if unsealed >= n { + return + } + time.Sleep(time.Second) + } + + t.Fatalf("%d cores were not sealed", n) +} + func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) { t.Helper() for i := 0; i < 30; i++ { @@ -609,3 +669,131 @@ func WaitForWAL(t testing.T, c *vault.TestClusterCore, wal uint64) { time.Sleep(1 * time.Second) } } + +func RekeyCluster(t testing.T, cluster *vault.TestCluster) { + client := cluster.Cores[0].Client + + init, err := client.Sys().RekeyInit(&api.RekeyInitRequest{ + SecretShares: 5, + SecretThreshold: 3, + }) + if err != nil { + t.Fatal(err) + } + + var statusResp *api.RekeyUpdateResponse + for j := 0; j < len(cluster.BarrierKeys); j++ { + statusResp, err = client.Sys().RekeyUpdate(base64.StdEncoding.EncodeToString(cluster.BarrierKeys[j]), init.Nonce) + if err != nil { + t.Fatal(err) + } + if statusResp == nil { + t.Fatal("nil status response during unseal") + } + if statusResp.Complete { + break + } + } + + if len(statusResp.KeysB64) != 5 { + t.Fatal("wrong number of keys") + } + + newBarrierKeys := make([][]byte, 5) + for i, key := range statusResp.KeysB64 { + newBarrierKeys[i], err = base64.StdEncoding.DecodeString(key) + if err != nil { + t.Fatal(err) + } + } + + cluster.BarrierKeys = newBarrierKeys +} + +func CreateRaftBackend(t testing.T, logger hclog.Logger, nodeID string) (physical.Backend, func(), error) { + raftDir, err := ioutil.TempDir("", "vault-raft-") + if err != nil { + t.Fatal(err) + } + t.Logf("raft dir: %s", raftDir) + cleanupFunc := func() { + os.RemoveAll(raftDir) + } + + logger.Info("raft dir", "dir", raftDir) + + conf := map[string]string{ + "path": raftDir, + "node_id": nodeID, + "performance_multiplier": "8", + } + + backend, err := raft.NewRaftBackend(conf, logger) + if err != nil { + cleanupFunc() + t.Fatal(err) + } + + return backend, cleanupFunc, nil +} + +type TestRaftServerAddressProvider struct { + Cluster *vault.TestCluster +} + +func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) { + for _, core := range p.Cluster.Cores { + if core.NodeID == string(id) { + parsed, err := url.Parse(core.ClusterAddr()) + if err != nil { + return "", err + } + + return raftlib.ServerAddress(parsed.Host), nil + } + } + + return "", errors.New("could not find cluster addr") +} + +func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { + addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} + + leaderCore := cluster.Cores[0] + leaderAPI := leaderCore.Client.Address() + atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1) + + // Seal the leader so we can install an address provider + { + EnsureCoreSealed(t, leaderCore) + leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + cluster.UnsealCore(t, leaderCore) + vault.TestWaitActive(t, leaderCore.Core) + } + + // Join core1 + { + core := cluster.Cores[1] + core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderAPI, leaderCore.TLSConfig, false) + if err != nil { + t.Fatal(err) + } + + cluster.UnsealCore(t, core) + } + + // Join core2 + { + core := cluster.Cores[2] + core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderAPI, leaderCore.TLSConfig, false) + if err != nil { + t.Fatal(err) + } + + cluster.UnsealCore(t, core) + } + + WaitForNCoresUnsealed(t, cluster, 3) +} diff --git a/http/handler.go b/http/handler.go index be37098bd1c0..92661ed1637d 100644 --- a/http/handler.go +++ b/http/handler.go @@ -69,6 +69,7 @@ var ( // perfStandbyAlwaysForwardPaths is used to check a requested path against // the always forward list perfStandbyAlwaysForwardPaths = pathmanager.New() + alwaysRedirectPaths = pathmanager.New() injectDataIntoTopRoutes = []string{ "/v1/sys/audit", @@ -95,6 +96,13 @@ var ( } ) +func init() { + alwaysRedirectPaths.AddPaths([]string{ + "sys/storage/raft/snapshot", + "sys/storage/raft/snapshot-force", + }) +} + // Handler returns an http.Handler for the API. This can be used on // its own to mount the Vault API within another web server. func Handler(props *vault.HandlerProperties) http.Handler { @@ -117,6 +125,7 @@ func Handler(props *vault.HandlerProperties) http.Handler { mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true))) mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true))) mux.Handle("/v1/sys/rekey-recovery-key/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, true))) + mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core)) for _, path := range injectDataIntoTopRoutes { mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core))) } @@ -472,7 +481,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle } path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) switch { - case !perfStandbyAlwaysForwardPaths.HasPath(path): + case !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path): handler.ServeHTTP(w, r) return case strings.HasPrefix(path, "auth/token/create/"): @@ -526,6 +535,17 @@ func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) { return } + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + if alwaysRedirectPaths.HasPath(path) { + respondStandby(core, w, r.URL) + return + } + // Attempt forwarding the request. If we cannot forward -- perhaps it's // been disabled on the active node -- this will return with an // ErrCannotForward and we simply fall back @@ -596,6 +616,14 @@ func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *l } } + // If vault's core has already written to the response writer do not add any + // additional output. Headers have already been sent. If the response writer + // is set but has not been written to it likely means there was some kind of + // error + if r.ResponseWriter != nil && r.ResponseWriter.Written() { + return nil, true, false + } + if respondErrorCommon(w, r, resp, err) { return resp, false, false } diff --git a/http/logical.go b/http/logical.go index 70dc2bf162db..50529f945866 100644 --- a/http/logical.go +++ b/http/logical.go @@ -27,6 +27,8 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques var data map[string]interface{} var origBody io.ReadCloser + var requestReader io.ReadCloser + var responseWriter io.Writer // Determine the operation var op logical.Operation @@ -75,18 +77,29 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques data = getData } } + if path == "sys/storage/raft/snapshot" { + responseWriter = w + } case "POST", "PUT": op = logical.UpdateOperation // Parse the request if we can if op == logical.UpdateOperation { - origBody, err = parseRequest(core, r, w, &data) - if err == io.EOF { - data = nil - err = nil - } - if err != nil { - return nil, nil, http.StatusBadRequest, err + // If we are uploading a snapshot we don't want to parse it. Instead + // we will simply add the request body to the logical request object + // for later consumption. + if path == "sys/storage/raft/snapshot" || path == "sys/storage/raft/snapshot-force" { + requestReader = r.Body + origBody = r.Body + } else { + origBody, err = parseRequest(core, r, w, &data) + if err == io.EOF { + data = nil + err = nil + } + if err != nil { + return nil, nil, http.StatusBadRequest, err + } } } @@ -136,6 +149,12 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques return nil, nil, http.StatusBadRequest, errwrap.Wrapf(fmt.Sprintf(`failed to parse %s header: {{err}}`, PolicyOverrideHeaderName), err) } + if requestReader != nil { + req.RequestReader = requestReader + } + if responseWriter != nil { + req.ResponseWriter = logical.NewHTTPResponseWriter(responseWriter) + } return req, origBody, 0, nil } @@ -287,6 +306,12 @@ func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request var httpResp *logical.HTTPResponse var ret interface{} + // If vault's core has already written to the response writer do not add any + // additional output. Headers have already been sent. + if req != nil && req.ResponseWriter != nil && req.ResponseWriter.Written() { + return + } + if resp != nil { if resp.Redirect != "" { // If we have a redirect, redirect! We use a 307 code diff --git a/http/sys_raft.go b/http/sys_raft.go new file mode 100644 index 000000000000..1dc4a1b46fa4 --- /dev/null +++ b/http/sys_raft.go @@ -0,0 +1,64 @@ +package http + +import ( + "context" + "crypto/tls" + "io" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/tlsutil" + "github.com/hashicorp/vault/vault" +) + +func handleSysRaftJoin(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "POST", "PUT": + handleSysRaftJoinPost(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysRaftJoinPost(core *vault.Core, w http.ResponseWriter, r *http.Request) { + // Parse the request + var req JoinRequest + if _, err := parseRequest(core, r, w, &req); err != nil && err != io.EOF { + respondError(w, http.StatusBadRequest, err) + return + } + + var tlsConfig *tls.Config + var err error + if len(req.LeaderCACert) != 0 || len(req.LeaderClientCert) != 0 || len(req.LeaderClientKey) != 0 { + tlsConfig, err = tlsutil.ClientTLSConfig([]byte(req.LeaderCACert), []byte(req.LeaderClientCert), []byte(req.LeaderClientKey)) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + } + + joined, err := core.JoinRaftCluster(context.Background(), req.LeaderAPIAddr, tlsConfig, req.Retry) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + resp := JoinResponse{ + Joined: joined, + } + respondOk(w, resp) +} + +type JoinResponse struct { + Joined bool `json:"joined"` +} + +type JoinRequest struct { + LeaderAPIAddr string `json:"leader_api_addr"` + LeaderCACert string `json:"leader_ca_cert":` + LeaderClientCert string `json:"leader_client_cert"` + LeaderClientKey string `json:"leader_client_key"` + Retry bool `json:"retry"` +} diff --git a/physical/consul/consul.go b/physical/consul/consul.go index e050cf08ee43..4db2b3628754 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -237,9 +237,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe logger.Debug("config address parsed", "scheme", parts[0]) logger.Debug("config scheme parsed", "address", parts[1]) } - } else { - return nil, errors.New("address should be host[:port], not URL") - } + } // allow "unix:" or whatever else consul supports in the future } } if scheme, ok := conf["scheme"]; ok { diff --git a/physical/consul/consul_test.go b/physical/consul/consul_test.go index fa45f972f197..b474fb9ae5d8 100644 --- a/physical/consul/consul_test.go +++ b/physical/consul/consul_test.go @@ -231,6 +231,24 @@ func TestConsul_newConsulBackend(t *testing.T) { max_parallel: 4, consistencyMode: "strong", }, + { + name: "Unix socket", + consulConfig: map[string]string{ + "address": "unix:///tmp/.consul.http.sock", + }, + address: "/tmp/.consul.http.sock", + scheme: "http", // Default, not overridden? + + // Defaults + checkTimeout: 5 * time.Second, + redirectAddr: "http://127.0.0.1:8200", + path: "vault/", + service: "vault", + token: "", + max_parallel: 4, + disableReg: false, + consistencyMode: "default", + }, { name: "Scheme in address", consulConfig: map[string]string{ diff --git a/physical/mssql/mssql_test.go b/physical/mssql/mssql_test.go index 9f682b193418..ea723a27c083 100644 --- a/physical/mssql/mssql_test.go +++ b/physical/mssql/mssql_test.go @@ -31,7 +31,7 @@ func TestMSSQLBackend(t *testing.T) { if schema == "" { schema = "test" } - + username := os.Getenv("MSSQL_USERNAME") password := os.Getenv("MSSQL_PASSWORD") @@ -83,7 +83,7 @@ func TestMSSQLBackend_schema(t *testing.T) { if schema == "" { schema = "test" } - + username := os.Getenv("MSSQL_USERNAME") password := os.Getenv("MSSQL_PASSWORD") @@ -93,7 +93,7 @@ func TestMSSQLBackend_schema(t *testing.T) { b, err := NewMSSQLBackend(map[string]string{ "server": server, "database": database, - "schema": schema, + "schema": schema, "table": table, "username": username, "password": password, diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go index d850ec0b27d9..dbd3a0d2ef77 100644 --- a/physical/postgresql/postgresql.go +++ b/physical/postgresql/postgresql.go @@ -6,20 +6,45 @@ import ( "fmt" "strconv" "strings" + "sync" "time" "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/physical" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" - metrics "github.com/armon/go-metrics" + "github.com/armon/go-metrics" "github.com/lib/pq" ) +const ( + + // The lock TTL matches the default that Consul API uses, 15 seconds. + // Used as part of SQL commands to set/extend lock expiry time relative to + // database clock. + PostgreSQLLockTTLSeconds = 15 + + // The amount of time to wait between the lock renewals + PostgreSQLLockRenewInterval = 5 * time.Second + + // PostgreSQLLockRetryInterval is the amount of time to wait + // if a lock fails before trying again. + PostgreSQLLockRetryInterval = time.Second +) + // Verify PostgreSQLBackend satisfies the correct interfaces var _ physical.Backend = (*PostgreSQLBackend)(nil) +// +// HA backend was implemented based on the DynamoDB backend pattern +// With distinction using central postgres clock, hereby avoiding +// possible issues with multiple clocks +// +var _ physical.HABackend = (*PostgreSQLBackend)(nil) +var _ physical.Lock = (*PostgreSQLLock)(nil) + // PostgreSQL Backend is a physical backend that stores data // within a PostgreSQL database. type PostgreSQLBackend struct { @@ -29,8 +54,34 @@ type PostgreSQLBackend struct { get_query string delete_query string list_query string - logger log.Logger - permitPool *physical.PermitPool + + ha_table string + haGetLockValueQuery string + haUpsertLockIdentityExec string + haDeleteLockExec string + + haEnabled bool + logger log.Logger + permitPool *physical.PermitPool +} + +// PostgreSQLLock implements a lock using an PostgreSQL client. +type PostgreSQLLock struct { + backend *PostgreSQLBackend + value, key string + identity string + lock sync.Mutex + + renewTicker *time.Ticker + + // ttlSeconds is how long a lock is valid for + ttlSeconds int + + // renewInterval is how much time to wait between lock renewals. must be << ttl + renewInterval time.Duration + + // retryInterval is how much time to wait between attempts to grab the lock + retryInterval time.Duration } // NewPostgreSQLBackend constructs a PostgreSQL backend using the given @@ -63,6 +114,18 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B maxParInt = physical.DefaultParallelOperations } + maxIdleConnsStr, maxIdleConnsIsSet := conf["max_idle_connections"] + var maxIdleConns int + if maxIdleConnsIsSet { + maxIdleConns, err = strconv.Atoi(maxIdleConnsStr) + if err != nil { + return nil, errwrap.Wrapf("failed parsing max_idle_connections parameter: {{err}}", err) + } + if logger.IsDebug() { + logger.Debug("max_idle_connections set", "max_idle_connections", maxIdleConnsStr) + } + } + // Create PostgreSQL handle for the database. db, err := sql.Open("postgres", connURL) if err != nil { @@ -70,17 +133,25 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B } db.SetMaxOpenConns(maxParInt) - // Determine if we should use an upsert function (versions < 9.5) - var upsert_required bool - upsert_required_query := "SELECT current_setting('server_version_num')::int < 90500" - if err := db.QueryRow(upsert_required_query).Scan(&upsert_required); err != nil { + if maxIdleConnsIsSet { + db.SetMaxIdleConns(maxIdleConns) + } + + // Determine if we should use a function to work around lack of upsert (versions < 9.5) + var upsertAvailable bool + upsertAvailableQuery := "SELECT current_setting('server_version_num')::int >= 90500" + if err := db.QueryRow(upsertAvailableQuery).Scan(&upsertAvailable); err != nil { return nil, errwrap.Wrapf("failed to check for native upsert: {{err}}", err) } + if !upsertAvailable && conf["ha_enabled"] == "true" { + return nil, fmt.Errorf("ha_enabled=true in config but PG version doesn't support HA, must be at least 9.5") + } + // Setup our put strategy based on the presence or absence of a native // upsert. var put_query string - if upsert_required { + if !upsertAvailable { put_query = "SELECT vault_kv_put($1, $2, $3, $4)" } else { put_query = "INSERT INTO " + quoted_table + " VALUES($1, $2, $3, $4)" + @@ -88,6 +159,12 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B " UPDATE SET (parent_path, path, key, value) = ($1, $2, $3, $4)" } + unquoted_ha_table, ok := conf["ha_table"] + if !ok { + unquoted_ha_table = "vault_ha_locks" + } + quoted_ha_table := pq.QuoteIdentifier(unquoted_ha_table) + // Setup the backend. m := &PostgreSQLBackend{ table: quoted_table, @@ -96,10 +173,25 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B get_query: "SELECT value FROM " + quoted_table + " WHERE path = $1 AND key = $2", delete_query: "DELETE FROM " + quoted_table + " WHERE path = $1 AND key = $2", list_query: "SELECT key FROM " + quoted_table + " WHERE path = $1" + - "UNION SELECT DISTINCT substring(substr(path, length($1)+1) from '^.*?/') FROM " + - quoted_table + " WHERE parent_path LIKE $1 || '%'", + " UNION ALL SELECT DISTINCT substring(substr(path, length($1)+1) from '^.*?/') FROM " + quoted_table + + " WHERE parent_path LIKE $1 || '%'", + haGetLockValueQuery: + // only read non expired data + " SELECT ha_value FROM " + quoted_ha_table + " WHERE NOW() <= valid_until AND ha_key = $1 ", + haUpsertLockIdentityExec: + // $1=identity $2=ha_key $3=ha_value $4=TTL in seconds + // update either steal expired lock OR update expiry for lock owned by me + " INSERT INTO " + quoted_ha_table + " as t (ha_identity, ha_key, ha_value, valid_until) VALUES ($1, $2, $3, NOW() + $4 * INTERVAL '1 seconds' ) " + + " ON CONFLICT (ha_key) DO " + + " UPDATE SET (ha_identity, ha_key, ha_value, valid_until) = ($1, $2, $3, NOW() + $4 * INTERVAL '1 seconds') " + + " WHERE (t.valid_until < NOW() AND t.ha_key = $2) OR " + + " (t.ha_identity = $1 AND t.ha_key = $2) ", + haDeleteLockExec: + // $1=ha_identity $2=ha_key + " DELETE FROM " + quoted_ha_table + " WHERE ha_identity=$1 AND ha_key=$2 ", logger: logger, permitPool: physical.NewPermitPool(maxParInt), + haEnabled: conf["ha_enabled"] == "true", } return m, nil @@ -213,3 +305,155 @@ func (m *PostgreSQLBackend) List(ctx context.Context, prefix string) ([]string, return keys, nil } + +// LockWith is used for mutual exclusion based on the given key. +func (p *PostgreSQLBackend) LockWith(key, value string) (physical.Lock, error) { + identity, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + return &PostgreSQLLock{ + backend: p, + key: key, + value: value, + identity: identity, + ttlSeconds: PostgreSQLLockTTLSeconds, + renewInterval: PostgreSQLLockRenewInterval, + retryInterval: PostgreSQLLockRetryInterval, + }, nil +} + +func (p *PostgreSQLBackend) HAEnabled() bool { + return p.haEnabled +} + +// Lock tries to acquire the lock by repeatedly trying to create a record in the +// PostgreSQL table. It will block until either the stop channel is closed or +// the lock could be acquired successfully. The returned channel will be closed +// once the lock in the PostgreSQL table cannot be renewed, either due to an +// error speaking to PostgreSQL or because someone else has taken it. +func (l *PostgreSQLLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + l.lock.Lock() + defer l.lock.Unlock() + + var ( + success = make(chan struct{}) + errors = make(chan error) + leader = make(chan struct{}) + ) + // try to acquire the lock asynchronously + go l.tryToLock(stopCh, success, errors) + + select { + case <-success: + // after acquiring it successfully, we must renew the lock periodically + l.renewTicker = time.NewTicker(l.renewInterval) + go l.periodicallyRenewLock(leader) + case err := <-errors: + return nil, err + case <-stopCh: + return nil, nil + } + + return leader, nil +} + +// Unlock releases the lock by deleting the lock record from the +// PostgreSQL table. +func (l *PostgreSQLLock) Unlock() error { + pg := l.backend + pg.permitPool.Acquire() + defer pg.permitPool.Release() + + if l.renewTicker != nil { + l.renewTicker.Stop() + } + + // Delete lock owned by me + _, err := pg.client.Exec(pg.haDeleteLockExec, l.identity, l.key) + return err +} + +// Value checks whether or not the lock is held by any instance of PostgreSQLLock, +// including this one, and returns the current value. +func (l *PostgreSQLLock) Value() (bool, string, error) { + pg := l.backend + pg.permitPool.Acquire() + defer pg.permitPool.Release() + var result string + err := pg.client.QueryRow(pg.haGetLockValueQuery, l.key).Scan(&result) + + switch err { + case nil: + return true, result, nil + case sql.ErrNoRows: + return false, "", nil + default: + return false, "", err + + } +} + +// tryToLock tries to create a new item in PostgreSQL every `retryInterval`. +// As long as the item cannot be created (because it already exists), it will +// be retried. If the operation fails due to an error, it is sent to the errors +// channel. When the lock could be acquired successfully, the success channel +// is closed. +func (l *PostgreSQLLock) tryToLock(stop <-chan struct{}, success chan struct{}, errors chan error) { + ticker := time.NewTicker(l.retryInterval) + defer ticker.Stop() + + for { + select { + case <-stop: + return + case <-ticker.C: + gotlock, err := l.writeItem() + switch { + case err != nil: + errors <- err + return + case gotlock: + close(success) + return + } + } + } +} + +func (l *PostgreSQLLock) periodicallyRenewLock(done chan struct{}) { + for range l.renewTicker.C { + gotlock, err := l.writeItem() + if err != nil || !gotlock { + close(done) + l.renewTicker.Stop() + return + } + } +} + +// Attempts to put/update the PostgreSQL item using condition expressions to +// evaluate the TTL. Returns true if the lock was obtained, false if not. +// If false error may be nil or non-nil: nil indicates simply that someone +// else has the lock, whereas non-nil means that something unexpected happened. +func (l *PostgreSQLLock) writeItem() (bool, error) { + pg := l.backend + pg.permitPool.Acquire() + defer pg.permitPool.Release() + + // Try steal lock or update expiry on my lock + + sqlResult, err := pg.client.Exec(pg.haUpsertLockIdentityExec, l.identity, l.key, l.value, l.ttlSeconds) + if err != nil { + return false, err + } + if sqlResult == nil { + return false, fmt.Errorf("empty SQL response received") + } + + ar, err := sqlResult.RowsAffected() + if err != nil { + return false, err + } + return ar == 1, nil +} diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go index 7855dc162dae..97d42d084276 100644 --- a/physical/postgresql/postgresql_test.go +++ b/physical/postgresql/postgresql_test.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "testing" + "time" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/testhelpers/docker" @@ -30,56 +31,263 @@ func TestPostgreSQLBackend(t *testing.T) { table = "vault_kv_store" } + hae := os.Getenv("PGHAENABLED") + if hae == "" { + hae = "true" + } + + // Run vault tests logger.Info(fmt.Sprintf("Connection URL: %v", connURL)) - b, err := NewPostgreSQLBackend(map[string]string{ + b1, err := NewPostgreSQLBackend(map[string]string{ "connection_url": connURL, "table": table, + "ha_enabled": hae, }, logger) + if err != nil { t.Fatalf("Failed to create new backend: %v", err) } - pg := b.(*PostgreSQLBackend) - //Read postgres version to test basic connects works + b2, err := NewPostgreSQLBackend(map[string]string{ + "connection_url": connURL, + "table": table, + "ha_enabled": hae, + }, logger) + + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + pg := b1.(*PostgreSQLBackend) + + // Read postgres version to test basic connects works var pgversion string if err = pg.client.QueryRow("SELECT current_setting('server_version_num')").Scan(&pgversion); err != nil { t.Fatalf("Failed to check for Postgres version: %v", err) } logger.Info(fmt.Sprintf("Postgres Version: %v", pgversion)) - //Setup tables and indexes if not exists. - createTableSQL := fmt.Sprintf( - " CREATE TABLE IF NOT EXISTS %v ( "+ - " parent_path TEXT COLLATE \"C\" NOT NULL, "+ - " path TEXT COLLATE \"C\", "+ - " key TEXT COLLATE \"C\", "+ - " value BYTEA, "+ - " CONSTRAINT pkey PRIMARY KEY (path, key) "+ - " ); ", table) + setupDatabaseObjects(t, logger, pg) - _, err = pg.client.Exec(createTableSQL) + defer func() { + pg := b1.(*PostgreSQLBackend) + _, err := pg.client.Exec(fmt.Sprintf(" TRUNCATE TABLE %v ", pg.table)) + if err != nil { + t.Fatalf("Failed to truncate table: %v", err) + } + }() + + logger.Info("Running basic backend tests") + physical.ExerciseBackend(t, b1) + logger.Info("Running list prefix backend tests") + physical.ExerciseBackend_ListPrefix(t, b1) + + ha1, ok := b1.(physical.HABackend) + if !ok { + t.Fatalf("PostgreSQLDB does not implement HABackend") + } + + ha2, ok := b2.(physical.HABackend) + if !ok { + t.Fatalf("PostgreSQLDB does not implement HABackend") + } + + if ha1.HAEnabled() && ha2.HAEnabled() { + logger.Info("Running ha backend tests") + physical.ExerciseHABackend(t, ha1, ha2) + testPostgresSQLLockTTL(t, ha1) + testPostgresSQLLockRenewal(t, ha1) + } +} + +func TestPostgreSQLBackendMaxIdleConnectionsParameter(t *testing.T) { + _, err := NewPostgreSQLBackend(map[string]string{ + "connection_url": "some connection url", + "max_idle_connections": "bad param", + }, logging.NewVaultLogger(log.Debug)) + if err == nil { + t.Error("Expected invalid max_idle_connections param to return error") + } + expectedErrStr := "failed parsing max_idle_connections parameter: strconv.Atoi: parsing \"bad param\": invalid syntax" + if err.Error() != expectedErrStr { + t.Errorf("Expected: \"%s\" but found \"%s\"", expectedErrStr, err.Error()) + } +} + +// Similar to testHABackend, but using internal implementation details to +// trigger the lock failure scenario by setting the lock renew period for one +// of the locks to a higher value than the lock TTL. +func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) { + // Set much smaller lock times to speed up the test. + lockTTL := 3 + renewInterval := time.Second * 1 + retryInterval := time.Second * 1 + longRenewInterval := time.Duration(lockTTL*2) * time.Second + lockkey := "postgresttl" + + var leaderCh <-chan struct{} + + // Get the lock + origLock, err := ha.LockWith(lockkey, "bar") if err != nil { - t.Fatalf("Failed to create table: %v", err) + t.Fatalf("err: %v", err) } + { + // set the first lock renew period to double the expected TTL. + lock := origLock.(*PostgreSQLLock) + lock.renewInterval = longRenewInterval + lock.ttlSeconds = lockTTL - createIndexSQL := fmt.Sprintf(" CREATE INDEX IF NOT EXISTS parent_path_idx ON %v (parent_path); ", table) + // Attempt to lock + leaderCh, err = lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("failed to get leader ch") + } - _, err = pg.client.Exec(createIndexSQL) + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "bar" { + t.Fatalf("bad value: %v", val) + } + } + + // Second acquisition should succeed because the first lock should + // not renew within the 3 sec TTL. + origLock2, err := ha.LockWith(lockkey, "baz") if err != nil { - t.Fatalf("Failed to create index: %v", err) + t.Fatalf("err: %v", err) } + { + lock2 := origLock2.(*PostgreSQLLock) + lock2.renewInterval = renewInterval + lock2.ttlSeconds = lockTTL + lock2.retryInterval = retryInterval - defer func() { - pg := b.(*PostgreSQLBackend) - _, err := pg.client.Exec(fmt.Sprintf(" TRUNCATE TABLE %v ", pg.table)) + // Cancel attempt in 6 sec so as not to block unit tests forever + stopCh := make(chan struct{}) + time.AfterFunc(time.Duration(lockTTL*2)*time.Second, func() { + close(stopCh) + }) + + // Attempt to lock should work + leaderCh2, err := lock2.Lock(stopCh) if err != nil { - t.Fatalf("Failed to truncate table: %v", err) + t.Fatalf("err: %v", err) } - }() + if leaderCh2 == nil { + t.Fatalf("should get leader ch") + } + defer lock2.Unlock() + + // Check the value + held, val, err := lock2.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "baz" { + t.Fatalf("bad value: %v", val) + } + } + + // The first lock should have lost the leader channel + select { + case <-time.After(longRenewInterval * 2): + t.Fatalf("original lock did not have its leader channel closed.") + case <-leaderCh: + } +} + +// Verify that once Unlock is called, we don't keep trying to renew the original +// lock. +func testPostgresSQLLockRenewal(t *testing.T, ha physical.HABackend) { + // Get the lock + origLock, err := ha.LockWith("pgrenewal", "bar") + if err != nil { + t.Fatalf("err: %v", err) + } + + // customize the renewal and watch intervals + lock := origLock.(*PostgreSQLLock) + // lock.renewInterval = time.Second * 1 + + // Attempt to lock + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("failed to get leader ch") + } + + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "bar" { + t.Fatalf("bad value: %v", val) + } + + // Release the lock, which will delete the stored item + if err := lock.Unlock(); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait longer than the renewal time + time.Sleep(1500 * time.Millisecond) + + // Attempt to lock with new lock + newLock, err := ha.LockWith("pgrenewal", "baz") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Cancel attempt after lock ttl + 1s so as not to block unit tests forever + stopCh := make(chan struct{}) + timeout := time.Duration(lock.ttlSeconds)*time.Second + lock.retryInterval + time.Second + time.AfterFunc(timeout, func() { + t.Logf("giving up on lock attempt after %v", timeout) + close(stopCh) + }) + + // Attempt to lock should work + leaderCh2, err := newLock.Lock(stopCh) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh2 == nil { + t.Fatalf("should get leader ch") + } + + // Check the value + held, val, err = newLock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "baz" { + t.Fatalf("bad value: %v", val) + } - physical.ExerciseBackend(t, b) - physical.ExerciseBackend_ListPrefix(t, b) + // Cleanup + newLock.Unlock() } func prepareTestContainer(t *testing.T, logger log.Logger) (cleanup func(), retConnString string) { @@ -92,7 +300,7 @@ func prepareTestContainer(t *testing.T, logger log.Logger) (cleanup func(), retC if err != nil { t.Fatalf("Failed to connect to docker: %s", err) } - //using 11.1 which is currently latest, use hard version for stabillity of tests + // using 11.1 which is currently latest, use hard version for stability of tests resource, err := pool.Run("postgres", "11.1", []string{}) if err != nil { t.Fatalf("Could not start docker Postgres: %s", err) @@ -122,3 +330,42 @@ func prepareTestContainer(t *testing.T, logger log.Logger) (cleanup func(), retC return cleanup, retConnString } + +func setupDatabaseObjects(t *testing.T, logger log.Logger, pg *PostgreSQLBackend) { + var err error + // Setup tables and indexes if not exists. + createTableSQL := fmt.Sprintf( + " CREATE TABLE IF NOT EXISTS %v ( "+ + " parent_path TEXT COLLATE \"C\" NOT NULL, "+ + " path TEXT COLLATE \"C\", "+ + " key TEXT COLLATE \"C\", "+ + " value BYTEA, "+ + " CONSTRAINT pkey PRIMARY KEY (path, key) "+ + " ); ", pg.table) + + _, err = pg.client.Exec(createTableSQL) + if err != nil { + t.Fatalf("Failed to create table: %v", err) + } + + createIndexSQL := fmt.Sprintf(" CREATE INDEX IF NOT EXISTS parent_path_idx ON %v (parent_path); ", pg.table) + + _, err = pg.client.Exec(createIndexSQL) + if err != nil { + t.Fatalf("Failed to create index: %v", err) + } + + createHaTableSQL := + " CREATE TABLE IF NOT EXISTS vault_ha_locks ( " + + " ha_key TEXT COLLATE \"C\" NOT NULL, " + + " ha_identity TEXT COLLATE \"C\" NOT NULL, " + + " ha_value TEXT COLLATE \"C\", " + + " valid_until TIMESTAMP WITH TIME ZONE NOT NULL, " + + " CONSTRAINT ha_key PRIMARY KEY (ha_key) " + + " ); " + + _, err = pg.client.Exec(createHaTableSQL) + if err != nil { + t.Fatalf("Failed to create hatable: %v", err) + } +} diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go new file mode 100644 index 000000000000..d89881ef0558 --- /dev/null +++ b/physical/raft/fsm.go @@ -0,0 +1,655 @@ +package raft + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + metrics "github.com/armon/go-metrics" + protoio "github.com/gogo/protobuf/io" + proto "github.com/golang/protobuf/proto" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/plugin/pb" + bolt "go.etcd.io/bbolt" +) + +const ( + deleteOp uint32 = 1 << iota + putOp + restoreCallbackOp +) + +var ( + // dataBucketName is the value we use for the bucket + dataBucketName = []byte("data") + configBucketName = []byte("config") + latestIndexKey = []byte("latest_indexes") + latestConfigKey = []byte("latest_config") +) + +// Verify FSM satisfies the correct interfaces +var _ physical.Backend = (*FSM)(nil) +var _ physical.Transactional = (*FSM)(nil) +var _ raft.FSM = (*FSM)(nil) +var _ raft.ConfigurationStore = (*FSM)(nil) + +type restoreCallback func(context.Context) error + +// FSMApplyResponse is returned from an FSM apply. It indicates if the apply was +// successful or not. +type FSMApplyResponse struct { + Success bool +} + +// FSM is Vault's primary state storage. It writes updates to an bolt db file +// that lives on local disk. FSM implements raft.FSM and physical.Backend +// interfaces. +type FSM struct { + // latestIndex and latestTerm must stay at the top of this struct to be + // properly 64-bit aligned. + + // latestIndex and latestTerm are the term and index of the last log we + // received + latestIndex *uint64 + latestTerm *uint64 + // latestConfig is the latest server configuration we've seen + latestConfig atomic.Value + + l sync.RWMutex + path string + logger log.Logger + permitPool *physical.PermitPool + noopRestore bool + + db *bolt.DB + + // retoreCb is called after we've restored a snapshot + restoreCb restoreCallback + + // This is just used in tests to disable to storing the latest indexes and + // configs so we can conform to the standard backend tests, which expect to + // additional state in the backend. + storeLatestState bool +} + +// NewFSM constructs a FSM using the given directory +func NewFSM(conf map[string]string, logger log.Logger) (*FSM, error) { + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("'path' must be set") + } + + dbPath := filepath.Join(path, "vault.db") + + boltDB, err := bolt.Open(dbPath, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + + // Initialize the latest term, index, and config values + latestTerm := new(uint64) + latestIndex := new(uint64) + latestConfig := atomic.Value{} + atomic.StoreUint64(latestTerm, 0) + atomic.StoreUint64(latestIndex, 0) + latestConfig.Store((*ConfigurationValue)(nil)) + + err = boltDB.Update(func(tx *bolt.Tx) error { + // make sure we have the necessary buckets created + _, err := tx.CreateBucketIfNotExists(dataBucketName) + if err != nil { + return fmt.Errorf("failed to create bucket: %v", err) + } + b, err := tx.CreateBucketIfNotExists(configBucketName) + if err != nil { + return fmt.Errorf("failed to create bucket: %v", err) + } + // Read in our latest index and term and populate it inmemory + val := b.Get(latestIndexKey) + if val != nil { + var latest IndexValue + err := proto.Unmarshal(val, &latest) + if err != nil { + return err + } + + atomic.StoreUint64(latestTerm, latest.Term) + atomic.StoreUint64(latestIndex, latest.Index) + } + + // Read in our latest config and populate it inmemory + val = b.Get(latestConfigKey) + if val != nil { + var latest ConfigurationValue + err := proto.Unmarshal(val, &latest) + if err != nil { + return err + } + + latestConfig.Store(&latest) + } + return nil + }) + if err != nil { + return nil, err + } + + storeLatestState := true + if _, ok := conf["doNotStoreLatestState"]; ok { + storeLatestState = false + } + + return &FSM{ + path: path, + logger: logger, + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + + db: boltDB, + latestTerm: latestTerm, + latestIndex: latestIndex, + latestConfig: latestConfig, + storeLatestState: storeLatestState, + }, nil +} + +// LatestState returns the latest index and configuration values we have seen on +// this FSM. +func (f *FSM) LatestState() (*IndexValue, *ConfigurationValue) { + return &IndexValue{ + Term: atomic.LoadUint64(f.latestTerm), + Index: atomic.LoadUint64(f.latestIndex), + }, f.latestConfig.Load().(*ConfigurationValue) +} + +func (f *FSM) witnessIndex(i *IndexValue) { + seen, _ := f.LatestState() + if seen.Index < i.Index { + atomic.StoreUint64(f.latestIndex, i.Index) + atomic.StoreUint64(f.latestTerm, i.Term) + } +} + +func (f *FSM) witnessSnapshot(index, term, configurationIndex uint64, configuration raft.Configuration) error { + var indexBytes []byte + latestIndex, _ := f.LatestState() + + latestIndex.Index = index + latestIndex.Term = term + + var err error + indexBytes, err = proto.Marshal(latestIndex) + if err != nil { + return err + } + + protoConfig := raftConfigurationToProtoConfiguration(configurationIndex, configuration) + configBytes, err := proto.Marshal(protoConfig) + if err != nil { + return err + } + + if f.storeLatestState { + err = f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(configBucketName) + err := b.Put(latestConfigKey, configBytes) + if err != nil { + return err + } + + err = b.Put(latestIndexKey, indexBytes) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return err + } + } + + atomic.StoreUint64(f.latestIndex, index) + atomic.StoreUint64(f.latestTerm, term) + f.latestConfig.Store(protoConfig) + + return nil +} + +// Delete deletes the given key from the bolt file. +func (f *FSM) Delete(ctx context.Context, path string) error { + defer metrics.MeasureSince([]string{"raft", "delete"}, time.Now()) + + f.permitPool.Acquire() + defer f.permitPool.Release() + + f.l.RLock() + defer f.l.RUnlock() + + return f.db.Update(func(tx *bolt.Tx) error { + return tx.Bucket(dataBucketName).Delete([]byte(path)) + }) +} + +// Get retrieves the value at the given path from the bolt file. +func (f *FSM) Get(ctx context.Context, path string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"raft", "get"}, time.Now()) + + f.permitPool.Acquire() + defer f.permitPool.Release() + + f.l.RLock() + defer f.l.RUnlock() + + var valCopy []byte + var found bool + + err := f.db.View(func(tx *bolt.Tx) error { + + value := tx.Bucket(dataBucketName).Get([]byte(path)) + if value != nil { + found = true + valCopy = make([]byte, len(value)) + copy(valCopy, value) + } + + return nil + }) + if err != nil { + return nil, err + } + if !found { + return nil, nil + } + + return &physical.Entry{ + Key: path, + Value: valCopy, + }, nil +} + +// Put writes the given entry to the bolt file. +func (f *FSM) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"raft", "put"}, time.Now()) + + f.permitPool.Acquire() + defer f.permitPool.Release() + + f.l.RLock() + defer f.l.RUnlock() + + // Start a write transaction. + return f.db.Update(func(tx *bolt.Tx) error { + return tx.Bucket(dataBucketName).Put([]byte(entry.Key), entry.Value) + }) +} + +// List retrieves the set of keys with the given prefix from the bolt file. +func (f *FSM) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"raft", "list"}, time.Now()) + + f.permitPool.Acquire() + defer f.permitPool.Release() + + f.l.RLock() + defer f.l.RUnlock() + + var keys []string + + err := f.db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket(dataBucketName).Cursor() + + prefixBytes := []byte(prefix) + for k, _ := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, _ = c.Next() { + key := string(k) + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else if i != -1 { + // Add truncated 'folder' paths + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) + } + } + + return nil + }) + + return keys, err +} + +// Transaction writes all the operations in the provided transaction to the bolt +// file. +func (f *FSM) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + f.permitPool.Acquire() + defer f.permitPool.Release() + + f.l.RLock() + defer f.l.RUnlock() + + // TODO: should this be a Batch? + // Start a write transaction. + err := f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) + for _, txn := range txns { + var err error + switch txn.Operation { + case physical.PutOperation: + err = b.Put([]byte(txn.Entry.Key), txn.Entry.Value) + case physical.DeleteOperation: + err = b.Delete([]byte(txn.Entry.Key)) + default: + return fmt.Errorf("%q is not a supported transaction operation", txn.Operation) + } + if err != nil { + return err + } + } + + return nil + }) + return err +} + +// Apply will apply a log value to the FSM. This is called from the raft +// library. +func (f *FSM) Apply(log *raft.Log) interface{} { + command := &LogData{} + err := proto.Unmarshal(log.Data, command) + if err != nil { + f.logger.Error("error proto unmarshaling log data", "error", err) + panic("error proto unmarshaling log data") + } + + f.l.RLock() + defer f.l.RUnlock() + + // Only advance latest pointer if this log has a higher index value than + // what we have seen in the past. + var logIndex []byte + latestIndex, _ := f.LatestState() + if latestIndex.Index < log.Index { + logIndex, err = proto.Marshal(&IndexValue{ + Term: log.Term, + Index: log.Index, + }) + if err != nil { + f.logger.Error("unable to marshal latest index", "error", err) + panic("unable to marshal latest index") + } + } + + err = f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) + for _, op := range command.Operations { + var err error + switch op.OpType { + case putOp: + err = b.Put([]byte(op.Key), op.Value) + case deleteOp: + err = b.Delete([]byte(op.Key)) + case restoreCallbackOp: + if f.restoreCb != nil { + // Kick off the restore callback function in a go routine + go f.restoreCb(context.Background()) + } + default: + return fmt.Errorf("%q is not a supported transaction operation", op.OpType) + } + if err != nil { + return err + } + } + + // TODO: benchmark so we can know how much time this adds + if f.storeLatestState && len(logIndex) > 0 { + b := tx.Bucket(configBucketName) + err = b.Put(latestIndexKey, logIndex) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + f.logger.Error("failed to store data", "error", err) + panic("failed to store data") + } + + // If we advanced the latest value, update the in-memory representation too. + if len(logIndex) > 0 { + atomic.StoreUint64(f.latestTerm, log.Term) + atomic.StoreUint64(f.latestIndex, log.Index) + } + + return &FSMApplyResponse{ + Success: true, + } +} + +type writeErrorCloser interface { + io.WriteCloser + CloseWithError(error) error +} + +// writeTo will copy the FSM's content to a remote sink. The data is written +// twice, once for use in determining various metadata attributes of the dataset +// (size, checksum, etc) and a second for the sink of the data. We also use a +// proto delimited writer so we can stream proto messages to the sink. +func (f *FSM) writeTo(ctx context.Context, metaSink writeErrorCloser, sink writeErrorCloser) { + protoWriter := protoio.NewDelimitedWriter(sink) + metadataProtoWriter := protoio.NewDelimitedWriter(metaSink) + + f.l.RLock() + defer f.l.RUnlock() + + err := f.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) + + c := b.Cursor() + + // Do the first scan of the data for metadata purposes. + for k, v := c.First(); k != nil; k, v = c.Next() { + err := metadataProtoWriter.WriteMsg(&pb.StorageEntry{ + Key: string(k), + Value: v, + }) + if err != nil { + metaSink.CloseWithError(err) + return err + } + } + metaSink.Close() + + // Do the second scan for copy purposes. + for k, v := c.First(); k != nil; k, v = c.Next() { + err := protoWriter.WriteMsg(&pb.StorageEntry{ + Key: string(k), + Value: v, + }) + if err != nil { + return err + } + } + + return nil + }) + sink.CloseWithError(err) +} + +// Snapshot implements the FSM interface. It returns a noop snapshot object. +func (f *FSM) Snapshot() (raft.FSMSnapshot, error) { + return &noopSnapshotter{}, nil +} + +// SetNoopRestore is used to disable restore operations on raft startup. Because +// we are using persistent storage in our FSM we do not need to issue a restore +// on startup. +func (f *FSM) SetNoopRestore(enabled bool) { + f.l.Lock() + f.noopRestore = enabled + f.l.Unlock() +} + +// Restore reads data from the provided reader and writes it into the FSM. It +// first deletes the existing bucket to clear all existing data, then recreates +// it so we can copy in the snapshot. +func (f *FSM) Restore(r io.ReadCloser) error { + if f.noopRestore == true { + return nil + } + + protoReader := protoio.NewDelimitedReader(r, math.MaxInt32) + defer protoReader.Close() + + f.l.Lock() + defer f.l.Unlock() + + // Start a write transaction. + err := f.db.Update(func(tx *bolt.Tx) error { + err := tx.DeleteBucket(dataBucketName) + if err != nil { + return err + } + + b, err := tx.CreateBucket(dataBucketName) + if err != nil { + return err + } + + for { + s := new(pb.StorageEntry) + err := protoReader.ReadMsg(s) + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + err = b.Put([]byte(s.Key), s.Value) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + f.logger.Error("could not restore snapshot", "error", err) + return err + } + + return nil +} + +// noopSnapshotter implements the fsm.Snapshot interface. It doesn't do anything +// since our SnapshotStore reads data out of the FSM on Open(). +type noopSnapshotter struct{} + +// Persist doesn't do anything. +func (s *noopSnapshotter) Persist(sink raft.SnapshotSink) error { + return nil +} + +// Release doesn't do anything. +func (s *noopSnapshotter) Release() {} + +// StoreConfig satisfies the raft.ConfigurationStore interface and persists the +// latest raft server configuration to the bolt file. +func (f *FSM) StoreConfiguration(index uint64, configuration raft.Configuration) { + f.l.RLock() + defer f.l.RUnlock() + + var indexBytes []byte + latestIndex, _ := f.LatestState() + // Only write the new index if we are advancing the pointer + if index > latestIndex.Index { + latestIndex.Index = index + + var err error + indexBytes, err = proto.Marshal(latestIndex) + if err != nil { + f.logger.Error("unable to marshal latest index", "error", err) + panic(fmt.Sprintf("unable to marshal latest index: %v", err)) + } + } + + protoConfig := raftConfigurationToProtoConfiguration(index, configuration) + configBytes, err := proto.Marshal(protoConfig) + if err != nil { + f.logger.Error("unable to marshal config", "error", err) + panic(fmt.Sprintf("unable to marshal config: %v", err)) + } + + if f.storeLatestState { + err = f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(configBucketName) + err := b.Put(latestConfigKey, configBytes) + if err != nil { + return err + } + + // TODO: benchmark so we can know how much time this adds + if len(indexBytes) > 0 { + err = b.Put(latestIndexKey, indexBytes) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + f.logger.Error("unable to store latest configuration", "error", err) + panic(fmt.Sprintf("unable to store latest configuration: %v", err)) + } + } + + f.witnessIndex(latestIndex) + f.latestConfig.Store(protoConfig) +} + +// raftConfigurationToProtoConfiguration converts a raft configuration object to +// a proto value. +func raftConfigurationToProtoConfiguration(index uint64, configuration raft.Configuration) *ConfigurationValue { + servers := make([]*Server, len(configuration.Servers)) + for i, s := range configuration.Servers { + servers[i] = &Server{ + Suffrage: int32(s.Suffrage), + Id: string(s.ID), + Address: string(s.Address), + } + } + return &ConfigurationValue{ + Index: index, + Servers: servers, + } +} + +// protoConfigurationToRaftConfiguration converts a proto configuration object +// to a raft object. +func protoConfigurationToRaftConfiguration(configuration *ConfigurationValue) (uint64, raft.Configuration) { + servers := make([]raft.Server, len(configuration.Servers)) + for i, s := range configuration.Servers { + servers[i] = raft.Server{ + Suffrage: raft.ServerSuffrage(s.Suffrage), + ID: raft.ServerID(s.Id), + Address: raft.ServerAddress(s.Address), + } + } + return configuration.Index, raft.Configuration{ + Servers: servers, + } +} diff --git a/physical/raft/logstore/bolt_store.go b/physical/raft/logstore/bolt_store.go new file mode 100644 index 000000000000..4b6017f59d70 --- /dev/null +++ b/physical/raft/logstore/bolt_store.go @@ -0,0 +1,271 @@ +package logstore + +import ( + "errors" + + "github.com/hashicorp/raft" + bolt "go.etcd.io/bbolt" +) + +const ( + // Permissions to use on the db file. This is only used if the + // database file does not exist and needs to be created. + dbFileMode = 0600 +) + +var ( + // Bucket names we perform transactions in + dbLogs = []byte("logs") + dbConf = []byte("conf") + + // An error indicating a given key does not exist + ErrKeyNotFound = errors.New("not found") +) + +// BoltStore provides access to BoltDB for Raft to store and retrieve +// log entries. It also provides key/value storage, and can be used as +// a LogStore and StableStore. +type BoltStore struct { + // conn is the underlying handle to the db. + conn *bolt.DB + + // The path to the Bolt database file + path string +} + +// Options contains all the configuraiton used to open the BoltDB +type Options struct { + // Path is the file path to the BoltDB to use + Path string + + // BoltOptions contains any specific BoltDB options you might + // want to specify [e.g. open timeout] + BoltOptions *bolt.Options + + // NoSync causes the database to skip fsync calls after each + // write to the log. This is unsafe, so it should be used + // with caution. + NoSync bool +} + +// readOnly returns true if the contained bolt options say to open +// the DB in readOnly mode [this can be useful to tools that want +// to examine the log] +func (o *Options) readOnly() bool { + return o != nil && o.BoltOptions != nil && o.BoltOptions.ReadOnly +} + +// NewBoltStore takes a file path and returns a connected Raft backend. +func NewBoltStore(path string) (*BoltStore, error) { + return New(Options{Path: path}) +} + +// New uses the supplied options to open the BoltDB and prepare it for use as a raft backend. +func New(options Options) (*BoltStore, error) { + // Try to connect + handle, err := bolt.Open(options.Path, dbFileMode, options.BoltOptions) + if err != nil { + return nil, err + } + handle.NoSync = options.NoSync + + // Create the new store + store := &BoltStore{ + conn: handle, + path: options.Path, + } + + // If the store was opened read-only, don't try and create buckets + if !options.readOnly() { + // Set up our buckets + if err := store.initialize(); err != nil { + store.Close() + return nil, err + } + } + return store, nil +} + +// initialize is used to set up all of the buckets. +func (b *BoltStore) initialize() error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Create all the buckets + if _, err := tx.CreateBucketIfNotExists(dbLogs); err != nil { + return err + } + if _, err := tx.CreateBucketIfNotExists(dbConf); err != nil { + return err + } + + return tx.Commit() +} + +// Close is used to gracefully close the DB connection. +func (b *BoltStore) Close() error { + return b.conn.Close() +} + +// FirstIndex returns the first known index from the Raft log. +func (b *BoltStore) FirstIndex() (uint64, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return 0, err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + if first, _ := curs.First(); first == nil { + return 0, nil + } else { + return bytesToUint64(first), nil + } +} + +// LastIndex returns the last known index from the Raft log. +func (b *BoltStore) LastIndex() (uint64, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return 0, err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + if last, _ := curs.Last(); last == nil { + return 0, nil + } else { + return bytesToUint64(last), nil + } +} + +// GetLog is used to retrieve a log from BoltDB at a given index. +func (b *BoltStore) GetLog(idx uint64, log *raft.Log) error { + tx, err := b.conn.Begin(false) + if err != nil { + return err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbLogs) + val := bucket.Get(uint64ToBytes(idx)) + + if val == nil { + return raft.ErrLogNotFound + } + + return decodeMsgPack(val, log) +} + +// StoreLog is used to store a single raft log +func (b *BoltStore) StoreLog(log *raft.Log) error { + return b.StoreLogs([]*raft.Log{log}) +} + +// StoreLogs is used to store a set of raft logs +func (b *BoltStore) StoreLogs(logs []*raft.Log) error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + for _, log := range logs { + key := uint64ToBytes(log.Index) + val, err := encodeMsgPack(log) + if err != nil { + return err + } + + bucket := tx.Bucket(dbLogs) + if err := bucket.Put(key, val.Bytes()); err != nil { + return err + } + } + + return tx.Commit() +} + +// DeleteRange is used to delete logs within a given range inclusively. +func (b *BoltStore) DeleteRange(min, max uint64) error { + minKey := uint64ToBytes(min) + + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + for k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() { + // Handle out-of-range log index + if bytesToUint64(k) > max { + break + } + + // Delete in-range log index + if err := curs.Delete(); err != nil { + return err + } + } + + return tx.Commit() +} + +// Set is used to set a key/value set outside of the raft log +func (b *BoltStore) Set(k, v []byte) error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbConf) + if err := bucket.Put(k, v); err != nil { + return err + } + + return tx.Commit() +} + +// Get is used to retrieve a value from the k/v store by key +func (b *BoltStore) Get(k []byte) ([]byte, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return nil, err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbConf) + val := bucket.Get(k) + + if val == nil { + return nil, ErrKeyNotFound + } + + return append([]byte(nil), val...), nil +} + +// SetUint64 is like Set, but handles uint64 values +func (b *BoltStore) SetUint64(key []byte, val uint64) error { + return b.Set(key, uint64ToBytes(val)) +} + +// GetUint64 is like Get, but handles uint64 values +func (b *BoltStore) GetUint64(key []byte) (uint64, error) { + val, err := b.Get(key) + if err != nil { + return 0, err + } + return bytesToUint64(val), nil +} + +// Sync performs an fsync on the database file handle. This is not necessary +// under normal operation unless NoSync is enabled, in which this forces the +// database file to sync against the disk. +func (b *BoltStore) Sync() error { + return b.conn.Sync() +} diff --git a/physical/raft/logstore/util.go b/physical/raft/logstore/util.go new file mode 100644 index 000000000000..d35dde8cb20e --- /dev/null +++ b/physical/raft/logstore/util.go @@ -0,0 +1,37 @@ +package logstore + +import ( + "bytes" + "encoding/binary" + + "github.com/hashicorp/go-msgpack/codec" +) + +// Decode reverses the encode operation on a byte slice input +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// Converts bytes to an integer +func bytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +// Converts a uint to a byte slice +func uint64ToBytes(u uint64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, u) + return buf +} diff --git a/physical/raft/raft.go b/physical/raft/raft.go new file mode 100644 index 000000000000..a24120aa3115 --- /dev/null +++ b/physical/raft/raft.go @@ -0,0 +1,950 @@ +package raft + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + proto "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/raft" + snapshot "github.com/hashicorp/raft-snapshot" + raftboltdb "github.com/hashicorp/vault/physical/raft/logstore" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault/cluster" + "github.com/hashicorp/vault/vault/seal" + + "github.com/hashicorp/vault/sdk/physical" +) + +// Verify RaftBackend satisfies the correct interfaces +var _ physical.Backend = (*RaftBackend)(nil) +var _ physical.Transactional = (*RaftBackend)(nil) + +var ( + // raftLogCacheSize is the maximum number of logs to cache in-memory. + // This is used to reduce disk I/O for the recently committed entries. + raftLogCacheSize = 512 + + raftState = "raft/" + peersFileName = "peers.json" + snapshotsRetained = 2 + + // Set a max size of 512kb + maxCommandSizeBytes = 512 * 1024 + + // ErrCommandTooLarge is returned when the backend tries to apply a log + // greater than the max allowed size. + ErrCommandTooLarge = fmt.Errorf("%s: exceeds %d byte limit", physical.ErrValueTooLarge, maxCommandSizeBytes) + + restoreOpDelayDuration = 5 * time.Second +) + +// RaftBackend implements the backend interfaces and uses the raft protocol to +// persist writes to the FSM. +type RaftBackend struct { + logger log.Logger + conf map[string]string + l sync.RWMutex + + // fsm is the state store for vault's data + fsm *FSM + + // raft is the instance of raft we will operate on. + raft *raft.Raft + + // raftNotifyCh is used to receive updates about leadership changes + // regarding this node. + raftNotifyCh chan bool + + // streamLayer is the network layer used to connect the nodes in the raft + // cluster. + streamLayer *raftLayer + + // raftTransport is the transport layer that the raft library uses for RPC + // communication. + raftTransport raft.Transport + + // snapStore is our snapshot mechanism. + snapStore raft.SnapshotStore + + // logStore is used by the raft library to store the raft logs in durable + // storage. + logStore raft.LogStore + + // stableStore is used by the raft library to store additional metadata in + // durable storage. + stableStore raft.StableStore + + // bootstrapConfig is only set when this node needs to be bootstrapped upon + // startup. + bootstrapConfig *raft.Configuration + + // dataDir is the location on the local filesystem that raft and FSM data + // will be stored. + dataDir string + + // localID is the ID for this node. This can either be configured in the + // config file, via a file on disk, or is otherwise randomly generated. + localID string + + // serverAddressProvider is used to map server IDs to addresses. + serverAddressProvider raft.ServerAddressProvider +} + +// EnsurePath is used to make sure a path exists +func EnsurePath(path string, dir bool) error { + if !dir { + path = filepath.Dir(path) + } + return os.MkdirAll(path, 0755) +} + +// NewRaftBackend constructs a RaftBackend using the given directory +func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Create the FSM. + var err error + fsm, err := NewFSM(conf, logger.Named("fsm")) + if err != nil { + return nil, fmt.Errorf("failed to create fsm: %v", err) + } + + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("'path' must be set") + } + + // Build an all in-memory setup for dev mode, otherwise prepare a full + // disk-based setup. + var log raft.LogStore + var stable raft.StableStore + var snap raft.SnapshotStore + var devMode bool + if devMode { + store := raft.NewInmemStore() + stable = store + log = store + snap = raft.NewInmemSnapshotStore() + } else { + // Create the base raft path. + path := filepath.Join(path, raftState) + if err := EnsurePath(path, true); err != nil { + return nil, err + } + + // Create the backend raft store for logs and stable storage. + store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db")) + if err != nil { + return nil, err + } + stable = store + + // Wrap the store in a LogCache to improve performance. + cacheStore, err := raft.NewLogCache(raftLogCacheSize, store) + if err != nil { + return nil, err + } + log = cacheStore + + // Create the snapshot store. + snapshots, err := NewBoltSnapshotStore(path, snapshotsRetained, logger.Named("snapshot"), fsm) + if err != nil { + return nil, err + } + snap = snapshots + } + + var localID string + { + // Determine the local node ID + localID = conf["node_id"] + + // If not set in the config check the "node-id" file. + if len(localID) == 0 { + localIDRaw, err := ioutil.ReadFile(filepath.Join(path, "node-id")) + switch { + case err == nil: + if len(localIDRaw) > 0 { + localID = string(localIDRaw) + } + case os.IsNotExist(err): + default: + return nil, err + } + } + + // If the file didn't exist generate a UUID and persist it to tne + // "node-id" file. + if len(localID) == 0 { + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(filepath.Join(path, "node-id"), []byte(id), 0600); err != nil { + return nil, err + } + + localID = id + } + } + + return &RaftBackend{ + logger: logger, + fsm: fsm, + conf: conf, + logStore: log, + stableStore: stable, + snapStore: snap, + dataDir: path, + localID: localID, + }, nil +} + +// RaftServer has information about a server in the Raft configuration +type RaftServer struct { + // NodeID is the name of the server + NodeID string `json:"node_id"` + + // Address is the IP:port of the server, used for Raft communications + Address string `json:"address"` + + // Leader is true if this server is the current cluster leader + Leader bool `json:"leader"` + + // Protocol version is the raft protocol version used by the server + ProtocolVersion string `json:"protocol_version"` + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online. + Voter bool `json:"voter"` +} + +// RaftConfigurationResponse is returned when querying for the current Raft +// configuration. +type RaftConfigurationResponse struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer `json:"servers"` + + // Index has the Raft index of this configuration. + Index uint64 `json:"index"` +} + +// Peer defines the ID and Adress for a given member of the raft cluster. +type Peer struct { + ID string `json:"id"` + Address string `json:"address"` +} + +// NodeID returns the identifier of the node +func (b *RaftBackend) NodeID() string { + return b.localID +} + +// Initialized tells if raft is running or not +func (b *RaftBackend) Initialized() bool { + b.l.RLock() + init := b.raft != nil + b.l.RUnlock() + return init +} + +// SetTLSKeyring is used to install a new keyring. If the active key has changed +// it will also close any network connections or streams forcing a reconnect +// with the new key. +func (b *RaftBackend) SetTLSKeyring(keyring *RaftTLSKeyring) error { + b.l.RLock() + err := b.streamLayer.setTLSKeyring(keyring) + b.l.RUnlock() + + return err +} + +// SetServerAddressProvider sets a the address provider for determining the raft +// node addresses. This is currently only used in tests. +func (b *RaftBackend) SetServerAddressProvider(provider raft.ServerAddressProvider) { + b.l.Lock() + b.serverAddressProvider = provider + b.l.Unlock() +} + +// Bootstrap prepares the given peers to be part of the raft cluster +func (b *RaftBackend) Bootstrap(ctx context.Context, peers []Peer) error { + b.l.Lock() + defer b.l.Unlock() + + hasState, err := raft.HasExistingState(b.logStore, b.stableStore, b.snapStore) + if err != nil { + return err + } + + if hasState { + return errors.New("error bootstrapping cluster: cluster already has state") + } + + raftConfig := &raft.Configuration{ + Servers: make([]raft.Server, len(peers)), + } + + for i, p := range peers { + raftConfig.Servers[i] = raft.Server{ + ID: raft.ServerID(p.ID), + Address: raft.ServerAddress(p.Address), + } + } + + // Store the config for later use + b.bootstrapConfig = raftConfig + return nil +} + +// SetRestoreCallback sets the callback to be used when a restoreCallbackOp is +// processed through the FSM. +func (b *RaftBackend) SetRestoreCallback(restoreCb restoreCallback) { + b.fsm.l.Lock() + b.fsm.restoreCb = restoreCb + b.fsm.l.Unlock() +} + +func (b *RaftBackend) applyConfigSettings(config *raft.Config) error { + config.Logger = b.logger + multiplierRaw, ok := b.conf["performance_multiplier"] + multiplier := 5 + if ok { + var err error + multiplier, err = strconv.Atoi(multiplierRaw) + if err != nil { + return err + } + } + config.ElectionTimeout = config.ElectionTimeout * time.Duration(multiplier) + config.HeartbeatTimeout = config.HeartbeatTimeout * time.Duration(multiplier) + config.LeaderLeaseTimeout = config.LeaderLeaseTimeout * time.Duration(multiplier) + + snapThresholdRaw, ok := b.conf["snapshot_threshold"] + if ok { + var err error + snapThreshold, err := strconv.Atoi(snapThresholdRaw) + if err != nil { + return err + } + config.SnapshotThreshold = uint64(snapThreshold) + } + + trailingLogsRaw, ok := b.conf["trailing_logs"] + if ok { + var err error + trailingLogs, err := strconv.Atoi(trailingLogsRaw) + if err != nil { + return err + } + config.TrailingLogs = uint64(trailingLogs) + } + + return nil +} + +// SetupCluster starts the raft cluster and enables the networking needed for +// the raft nodes to communicate. +func (b *RaftBackend) SetupCluster(ctx context.Context, raftTLSKeyring *RaftTLSKeyring, clusterListener cluster.ClusterHook) error { + b.logger.Trace("setting up raft cluster") + + b.l.Lock() + defer b.l.Unlock() + + // We are already unsealed + if b.raft != nil { + b.logger.Debug("raft already started, not setting up cluster") + return nil + } + + if len(b.localID) == 0 { + return errors.New("no local node id configured") + } + + // Setup the raft config + raftConfig := raft.DefaultConfig() + if err := b.applyConfigSettings(raftConfig); err != nil { + return err + } + + switch { + case raftTLSKeyring == nil && clusterListener == nil: + // If we don't have a provided network we use an in-memory one. + // This allows us to bootstrap a node without bringing up a cluster + // network. This will be true during bootstrap, tests and dev modes. + _, b.raftTransport = raft.NewInmemTransportWithTimeout(raft.ServerAddress(b.localID), time.Second) + case raftTLSKeyring == nil: + return errors.New("no keyring provided") + case clusterListener == nil: + return errors.New("no cluster listener provided") + default: + // Load the base TLS config from the cluster listener. + baseTLSConfig, err := clusterListener.TLSConfig(ctx) + if err != nil { + return err + } + + // Set the local address and localID in the streaming layer and the raft config. + streamLayer, err := NewRaftLayer(b.logger.Named("stream"), raftTLSKeyring, clusterListener.Addr(), baseTLSConfig) + if err != nil { + return err + } + transConfig := &raft.NetworkTransportConfig{ + Stream: streamLayer, + MaxPool: 3, + Timeout: 10 * time.Second, + ServerAddressProvider: b.serverAddressProvider, + } + transport := raft.NewNetworkTransportWithConfig(transConfig) + + b.streamLayer = streamLayer + b.raftTransport = transport + } + + raftConfig.LocalID = raft.ServerID(b.localID) + + // Set up a channel for reliable leader notifications. + raftNotifyCh := make(chan bool, 1) + raftConfig.NotifyCh = raftNotifyCh + + // If we have a bootstrapConfig set we should bootstrap now. + if b.bootstrapConfig != nil { + bootstrapConfig := b.bootstrapConfig + // Unset the bootstrap config + b.bootstrapConfig = nil + + // Bootstrap raft with our known cluster members. + if err := raft.BootstrapCluster(raftConfig, b.logStore, b.stableStore, b.snapStore, b.raftTransport, *bootstrapConfig); err != nil { + return err + } + // If we are the only node we should start as the leader. + if len(bootstrapConfig.Servers) == 1 { + raftConfig.StartAsLeader = true + } + } + + // Setup the Raft store. + b.fsm.SetNoopRestore(true) + + raftPath := filepath.Join(b.dataDir, raftState) + peersFile := filepath.Join(raftPath, peersFileName) + _, err := os.Stat(peersFile) + if err == nil { + b.logger.Info("raft recovery initiated", "recovery_file", peersFileName) + + recoveryConfig, err := raft.ReadConfigJSON(peersFile) + if err != nil { + return errwrap.Wrapf("raft recovery failed to parse peers.json: {{err}}", err) + } + + b.logger.Info("raft recovery: found new config", "config", recoveryConfig) + err = raft.RecoverCluster(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport, recoveryConfig) + if err != nil { + return errwrap.Wrapf("raft recovery failed: {{err}}", err) + } + + err = os.Remove(peersFile) + if err != nil { + return errwrap.Wrapf("raft recovery failed to delete peers.json; please delete manually: {{err}}", err) + } + b.logger.Info("raft recovery deleted peers.json") + } + + raftObj, err := raft.NewRaft(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport) + b.fsm.SetNoopRestore(false) + if err != nil { + return err + } + b.raft = raftObj + b.raftNotifyCh = raftNotifyCh + + if b.streamLayer != nil { + // Add Handler to the cluster. + clusterListener.AddHandler(consts.RaftStorageALPN, b.streamLayer) + + // Add Client to the cluster. + clusterListener.AddClient(consts.RaftStorageALPN, b.streamLayer) + } + + return nil +} + +// TeardownCluster shuts down the raft cluster +func (b *RaftBackend) TeardownCluster(clusterListener cluster.ClusterHook) error { + if clusterListener != nil { + clusterListener.StopHandler(consts.RaftStorageALPN) + clusterListener.RemoveClient(consts.RaftStorageALPN) + } + + b.l.Lock() + future := b.raft.Shutdown() + b.raft = nil + b.l.Unlock() + + return future.Error() +} + +// AppliedIndex returns the latest index applied to the FSM +func (b *RaftBackend) AppliedIndex() uint64 { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return 0 + } + + return b.raft.AppliedIndex() +} + +// RemovePeer removes the given peer ID from the raft cluster. If the node is +// ourselves we will give up leadership. +func (b *RaftBackend) RemovePeer(ctx context.Context, peerID string) error { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + + future := b.raft.RemoveServer(raft.ServerID(peerID), 0, 0) + + return future.Error() +} + +func (b *RaftBackend) GetConfiguration(ctx context.Context) (*RaftConfigurationResponse, error) { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return nil, errors.New("raft storage is not initialized") + } + + future := b.raft.GetConfiguration() + if err := future.Error(); err != nil { + return nil, err + } + + config := &RaftConfigurationResponse{ + Index: future.Index(), + } + + for _, server := range future.Configuration().Servers { + entry := &RaftServer{ + NodeID: string(server.ID), + Address: string(server.Address), + Leader: server.Address == b.raft.Leader(), + Voter: server.Suffrage == raft.Voter, + ProtocolVersion: strconv.Itoa(raft.ProtocolVersionMax), + } + config.Servers = append(config.Servers, entry) + } + + return config, nil +} + +// AddPeer adds a new server to the raft cluster +func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) error { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + + b.logger.Debug("adding raft peer", "node_id", peerID, "cluster_addr", clusterAddr) + + future := b.raft.AddVoter(raft.ServerID(peerID), raft.ServerAddress(clusterAddr), 0, 0) + + return future.Error() +} + +// Peers returns all the servers present in the raft cluster +func (b *RaftBackend) Peers(ctx context.Context) ([]Peer, error) { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return nil, errors.New("raft storage backend is not initialized") + } + + future := b.raft.GetConfiguration() + if err := future.Error(); err != nil { + return nil, err + } + + ret := make([]Peer, len(future.Configuration().Servers)) + for i, s := range future.Configuration().Servers { + ret[i] = Peer{ + ID: string(s.ID), + Address: string(s.Address), + } + } + + return ret, nil +} + +// Snapshot takes a raft snapshot, packages it into a archive file and writes it +// to the provided writer. Seal access is used to encrypt the SHASUM file so we +// can validate the snapshot was taken using the same master keys or not. +func (b *RaftBackend) Snapshot(out io.Writer, access seal.Access) error { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage backend is sealed") + } + + // If we have access to the seal create a sealer object + var s snapshot.Sealer + if access != nil { + s = &sealer{ + access: access, + } + } + + snap, err := snapshot.NewWithSealer(b.logger.Named("snapshot"), b.raft, s) + if err != nil { + return err + } + defer snap.Close() + + _, err = io.Copy(out, snap) + if err != nil { + return err + } + + return nil +} + +// WriteSnapshotToTemp reads a snapshot archive off the provided reader, +// extracts the data and writes the snapshot to a temporary file. The seal +// access is used to decrypt the SHASUM file in the archive to ensure this +// snapshot has the same master key as the running instance. If the provided +// access is nil then it will skip that validation. +func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access seal.Access) (*os.File, func(), raft.SnapshotMeta, error) { + b.l.RLock() + defer b.l.RUnlock() + + var metadata raft.SnapshotMeta + if b.raft == nil { + return nil, nil, metadata, errors.New("raft storage backend is sealed") + } + + // If we have access to the seal create a sealer object + var s snapshot.Sealer + if access != nil { + s = &sealer{ + access: access, + } + } + + snap, cleanup, err := snapshot.WriteToTempFileWithSealer(b.logger.Named("snapshot"), in, &metadata, s) + return snap, cleanup, metadata, err +} + +// RestoreSnapshot applies the provided snapshot metadata and snapshot data to +// raft. +func (b *RaftBackend) RestoreSnapshot(ctx context.Context, metadata raft.SnapshotMeta, snap io.Reader) error { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + + if err := b.raft.Restore(&metadata, snap, 0); err != nil { + b.logger.Named("snapshot").Error("failed to restore snapshot", "error", err) + return err + } + + // Apply a log that tells the follower nodes to run the restore callback + // function. This is done after the restore call so we can be sure the + // snapshot applied to a quorum of nodes. + command := &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: restoreCallbackOp, + }, + }, + } + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + + // Do a best-effort attempt to let the standbys apply the restoreCallbackOp + // before we continue. + time.Sleep(restoreOpDelayDuration) + return err +} + +// Delete inserts an entry in the log to delete the given path +func (b *RaftBackend) Delete(ctx context.Context, path string) error { + command := &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: deleteOp, + Key: path, + }, + }, + } + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + return err +} + +// Get returns the value corresponding to the given path from the fsm +func (b *RaftBackend) Get(ctx context.Context, path string) (*physical.Entry, error) { + if b.fsm == nil { + return nil, errors.New("raft: fsm not configured") + } + + return b.fsm.Get(ctx, path) +} + +// Put inserts an entry in the log for the put operation +func (b *RaftBackend) Put(ctx context.Context, entry *physical.Entry) error { + command := &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: putOp, + Key: entry.Key, + Value: entry.Value, + }, + }, + } + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + return err +} + +// List enumerates all the items under the prefix from the fsm +func (b *RaftBackend) List(ctx context.Context, prefix string) ([]string, error) { + if b.fsm == nil { + return nil, errors.New("raft: fsm not configured") + } + + return b.fsm.List(ctx, prefix) +} + +// Transaction applies all the given operations into a single log and +// applies it. +func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + command := &LogData{ + Operations: make([]*LogOperation, len(txns)), + } + for i, txn := range txns { + op := &LogOperation{} + switch txn.Operation { + case physical.PutOperation: + op.OpType = putOp + op.Key = txn.Entry.Key + op.Value = txn.Entry.Value + case physical.DeleteOperation: + op.OpType = deleteOp + op.Key = txn.Entry.Key + default: + return fmt.Errorf("%q is not a supported transaction operation", txn.Operation) + } + + command.Operations[i] = op + } + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + return err +} + +// applyLog will take a given log command and apply it to the raft log. applyLog +// doesn't return until the log has been applied to a quorum of servers and is +// persisted to the local FSM. Caller should hold the backend's read lock. +func (b *RaftBackend) applyLog(ctx context.Context, command *LogData) error { + if b.raft == nil { + return errors.New("raft storage backend is not initialized") + } + + commandBytes, err := proto.Marshal(command) + if err != nil { + return err + } + + // Restrict the value to maxCommandSizeBytes in length + if len(commandBytes) > maxCommandSizeBytes { + return ErrCommandTooLarge + } + + applyFuture := b.raft.Apply(commandBytes, 0) + err = applyFuture.Error() + if err != nil { + return err + } + + if resp, ok := applyFuture.Response().(*FSMApplyResponse); !ok || !resp.Success { + return errors.New("could not apply data") + } + + return nil +} + +// HAEnabled is the implemention of the HABackend interface +func (b *RaftBackend) HAEnabled() bool { return true } + +// HAEnabled is the implemention of the HABackend interface +func (b *RaftBackend) LockWith(key, value string) (physical.Lock, error) { + return &RaftLock{ + key: key, + value: []byte(value), + b: b, + }, nil +} + +// RaftLock implements the physical Lock interface and enables HA for this +// backend. The Lock uses the raftNotifyCh for receiving leadership edge +// triggers. Vault's active duty matches raft's leadership. +type RaftLock struct { + key string + value []byte + + b *RaftBackend +} + +// monitorLeadership waits until we receive an update on the raftNotifyCh and +// closes the leaderLost channel. +func (l *RaftLock) monitorLeadership(stopCh <-chan struct{}, leaderNotifyCh <-chan bool) <-chan struct{} { + leaderLost := make(chan struct{}) + go func() { + select { + case <-leaderNotifyCh: + close(leaderLost) + case <-stopCh: + } + }() + return leaderLost +} + +// Lock blocks until we become leader or are shutdown. It returns a channel that +// is closed when we detect a loss of leadership. +func (l *RaftLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + l.b.l.RLock() + + // Cache the notifyCh locally + leaderNotifyCh := l.b.raftNotifyCh + + // Check to see if we are already leader. + if l.b.raft.State() == raft.Leader { + err := l.b.applyLog(context.Background(), &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: putOp, + Key: l.key, + Value: l.value, + }, + }, + }) + l.b.l.RUnlock() + if err != nil { + return nil, err + } + + return l.monitorLeadership(stopCh, leaderNotifyCh), nil + } + l.b.l.RUnlock() + + for { + select { + case isLeader := <-leaderNotifyCh: + if isLeader { + // We are leader, set the key + l.b.l.RLock() + err := l.b.applyLog(context.Background(), &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: putOp, + Key: l.key, + Value: l.value, + }, + }, + }) + l.b.l.RUnlock() + if err != nil { + return nil, err + } + + return l.monitorLeadership(stopCh, leaderNotifyCh), nil + } + case <-stopCh: + return nil, nil + } + } + + return nil, nil +} + +// Unlock gives up leadership. +func (l *RaftLock) Unlock() error { + return l.b.raft.LeadershipTransfer().Error() +} + +// Value reads the value of the lock. This informs us who is currently leader. +func (l *RaftLock) Value() (bool, string, error) { + e, err := l.b.Get(context.Background(), l.key) + if err != nil { + return false, "", err + } + if e == nil { + return false, "", nil + } + + value := string(e.Value) + // TODO: how to tell if held? + return true, value, nil +} + +// sealer implements the snapshot.Sealer interface and is used in the snapshot +// process for encrypting/decrypting the SHASUM file in snapshot archives. +type sealer struct { + access seal.Access +} + +// Seal encrypts the data with using the seal access object. +func (s sealer) Seal(ctx context.Context, pt []byte) ([]byte, error) { + if s.access == nil { + return nil, errors.New("no seal access available") + } + eblob, err := s.access.Encrypt(ctx, pt) + if err != nil { + return nil, err + } + + return proto.Marshal(eblob) +} + +// Open decrypts the data using the seal access object. +func (s sealer) Open(ctx context.Context, ct []byte) ([]byte, error) { + if s.access == nil { + return nil, errors.New("no seal access available") + } + + var eblob physical.EncryptedBlobInfo + err := proto.Unmarshal(ct, &eblob) + if err != nil { + return nil, err + } + + return s.access.Decrypt(ctx, &eblob) +} diff --git a/physical/raft/raft_test.go b/physical/raft/raft_test.go new file mode 100644 index 000000000000..b5eb7130efdf --- /dev/null +++ b/physical/raft/raft_test.go @@ -0,0 +1,454 @@ +package raft + +import ( + "context" + "crypto/md5" + "encoding/base64" + fmt "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/golang/protobuf/proto" + hclog "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/physical" + bolt "go.etcd.io/bbolt" +) + +func getRaft(t testing.TB, bootstrap bool, noStoreState bool) (*RaftBackend, string) { + raftDir, err := ioutil.TempDir("", "vault-raft-") + if err != nil { + t.Fatal(err) + } + t.Logf("raft dir: %s", raftDir) + + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.Trace, + }) + logger.Info("raft dir", "dir", raftDir) + + conf := map[string]string{ + "path": raftDir, + "trailing_logs": "100", + } + + if noStoreState { + conf["doNotStoreLatestState"] = "" + } + + backendRaw, err := NewRaftBackend(conf, logger) + if err != nil { + t.Fatal(err) + } + backend := backendRaw.(*RaftBackend) + + if bootstrap { + err = backend.Bootstrap(context.Background(), []Peer{Peer{ID: backend.NodeID(), Address: backend.NodeID()}}) + if err != nil { + t.Fatal(err) + } + + err = backend.SetupCluster(context.Background(), nil, nil) + if err != nil { + t.Fatal(err) + } + + } + + return backend, raftDir +} + +func compareFSMs(t *testing.T, fsm1, fsm2 *FSM) { + t.Helper() + index1, config1 := fsm1.LatestState() + index2, config2 := fsm2.LatestState() + + if !proto.Equal(index1, index2) { + t.Fatalf("indexes did not match: %+v != %+v", index1, index2) + } + if !proto.Equal(config1, config2) { + t.Fatalf("configs did not match: %+v != %+v", config1, config2) + } + + compareDBs(t, fsm1.db, fsm2.db) +} + +func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB) { + db1 := make(map[string]string) + db2 := make(map[string]string) + + err := boltDB1.View(func(tx *bolt.Tx) error { + + c := tx.Cursor() + for bucketName, _ := c.First(); bucketName != nil; bucketName, _ = c.Next() { + b := tx.Bucket(bucketName) + + cBucket := b.Cursor() + + for k, v := cBucket.First(); k != nil; k, v = cBucket.Next() { + db1[string(k)] = base64.StdEncoding.EncodeToString(v) + } + } + + return nil + }) + + if err != nil { + t.Fatal(err) + } + + err = boltDB2.View(func(tx *bolt.Tx) error { + c := tx.Cursor() + for bucketName, _ := c.First(); bucketName != nil; bucketName, _ = c.Next() { + b := tx.Bucket(bucketName) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + db2[string(k)] = base64.StdEncoding.EncodeToString(v) + } + } + + return nil + }) + + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(db1, db2); diff != nil { + t.Fatal(diff) + } +} + +func TestRaft_Backend(t *testing.T) { + b, dir := getRaft(t, true, true) + defer os.RemoveAll(dir) + + physical.ExerciseBackend(t, b) +} + +func TestRaft_Backend_ListPrefix(t *testing.T) { + b, dir := getRaft(t, true, true) + defer os.RemoveAll(dir) + + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestRaft_TransactionalBackend(t *testing.T) { + b, dir := getRaft(t, true, true) + defer os.RemoveAll(dir) + + physical.ExerciseTransactionalBackend(t, b) +} + +func TestRaft_HABackend(t *testing.T) { + t.Skip() + raft, dir := getRaft(t, true, true) + defer os.RemoveAll(dir) + raft2, dir2 := getRaft(t, false, true) + defer os.RemoveAll(dir2) + + // Add raft2 to the cluster + addPeer(t, raft, raft2) + + physical.ExerciseHABackend(t, raft, raft2) +} + +func TestRaft_Backend_ThreeNode(t *testing.T) { + raft1, dir := getRaft(t, true, true) + raft2, dir2 := getRaft(t, false, true) + raft3, dir3 := getRaft(t, false, true) + defer os.RemoveAll(dir) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + // Add raft3 to the cluster + addPeer(t, raft1, raft3) + + physical.ExerciseBackend(t, raft1) + + time.Sleep(10 * time.Second) + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) +} + +func TestRaft_Recovery(t *testing.T) { + // Create 4 raft nodes + raft1, dir1 := getRaft(t, true, true) + raft2, dir2 := getRaft(t, false, true) + raft3, dir3 := getRaft(t, false, true) + raft4, dir4 := getRaft(t, false, true) + defer os.RemoveAll(dir1) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + defer os.RemoveAll(dir4) + + // Add them all to the cluster + addPeer(t, raft1, raft2) + addPeer(t, raft1, raft3) + addPeer(t, raft1, raft4) + + // Add some data into the FSM + physical.ExerciseBackend(t, raft1) + + time.Sleep(10 * time.Second) + + // Bring down all nodes + raft1.TeardownCluster(nil) + raft2.TeardownCluster(nil) + raft3.TeardownCluster(nil) + raft4.TeardownCluster(nil) + + // Prepare peers.json + type RecoveryPeer struct { + ID string `json:"id"` + Address string `json:"address"` + NonVoter bool `json: non_voter` + } + + // Leave out node 1 during recovery + peersList := make([]*RecoveryPeer, 0, 3) + peersList = append(peersList, &RecoveryPeer{ + ID: raft1.NodeID(), + Address: raft1.NodeID(), + NonVoter: false, + }) + peersList = append(peersList, &RecoveryPeer{ + ID: raft2.NodeID(), + Address: raft2.NodeID(), + NonVoter: false, + }) + peersList = append(peersList, &RecoveryPeer{ + ID: raft4.NodeID(), + Address: raft4.NodeID(), + NonVoter: false, + }) + + peersJSONBytes, err := jsonutil.EncodeJSON(peersList) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(filepath.Join(dir1, raftState), "peers.json"), peersJSONBytes, 0644) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(filepath.Join(dir2, raftState), "peers.json"), peersJSONBytes, 0644) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(filepath.Join(dir4, raftState), "peers.json"), peersJSONBytes, 0644) + if err != nil { + t.Fatal(err) + } + + // Bring up the nodes again + raft1.SetupCluster(context.Background(), nil, nil) + raft2.SetupCluster(context.Background(), nil, nil) + raft4.SetupCluster(context.Background(), nil, nil) + + peers, err := raft1.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(peers) != 3 { + t.Fatalf("failed to recover the cluster") + } + + time.Sleep(10 * time.Second) + + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft4.fsm) +} + +func TestRaft_TransactionalBackend_ThreeNode(t *testing.T) { + raft1, dir := getRaft(t, true, true) + raft2, dir2 := getRaft(t, false, true) + raft3, dir3 := getRaft(t, false, true) + defer os.RemoveAll(dir) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + // Add raft3 to the cluster + addPeer(t, raft1, raft3) + + physical.ExerciseTransactionalBackend(t, raft1) + + time.Sleep(10 * time.Second) + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) +} + +func TestRaft_Backend_MaxSize(t *testing.T) { + // Set the max size a little lower for the test + maxCommandSizeBytes = 10 * 1024 + + b, dir := getRaft(t, true, true) + defer os.RemoveAll(dir) + + // Test a value slightly below the max size + value := make([]byte, maxCommandSizeBytes-100) + err := b.Put(context.Background(), &physical.Entry{ + Key: "key", + Value: value, + }) + if err != nil { + t.Fatal(err) + } + + // Test value at max size, should error + value = make([]byte, maxCommandSizeBytes) + err = b.Put(context.Background(), &physical.Entry{ + Key: "key", + Value: value, + }) + if err != ErrCommandTooLarge { + t.Fatal(err) + } +} + +func TestRaft_Backend_Performance(t *testing.T) { + b, dir := getRaft(t, true, false) + defer os.RemoveAll(dir) + + defaultConfig := raft.DefaultConfig() + + localConfig := raft.DefaultConfig() + b.applyConfigSettings(localConfig) + + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + + b.conf = map[string]string{ + "path": dir, + "performance_multiplier": "5", + } + + localConfig = raft.DefaultConfig() + b.applyConfigSettings(localConfig) + + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + + b.conf = map[string]string{ + "path": dir, + "performance_multiplier": "1", + } + + localConfig = raft.DefaultConfig() + b.applyConfigSettings(localConfig) + + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout { + t.Fatalf("bad config: %v", localConfig) + } + +} + +func BenchmarkDB_Puts(b *testing.B) { + raft, dir := getRaft(b, true, false) + defer os.RemoveAll(dir) + raft2, dir2 := getRaft(b, true, false) + defer os.RemoveAll(dir2) + + bench := func(b *testing.B, s physical.Backend, dataSize int) { + data, err := uuid.GenerateRandomBytes(dataSize) + if err != nil { + b.Fatal(err) + } + + ctx := context.Background() + pe := &physical.Entry{ + Value: data, + } + testName := b.Name() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + err := s.Put(ctx, pe) + if err != nil { + b.Fatal(err) + } + } + } + + b.Run("256b", func(b *testing.B) { bench(b, raft, 256) }) + b.Run("256kb", func(b *testing.B) { bench(b, raft2, 256*1024) }) +} + +func BenchmarkDB_Snapshot(b *testing.B) { + raft, dir := getRaft(b, true, false) + defer os.RemoveAll(dir) + + data, err := uuid.GenerateRandomBytes(256 * 1024) + if err != nil { + b.Fatal(err) + } + + ctx := context.Background() + pe := &physical.Entry{ + Value: data, + } + testName := b.Name() + + for i := 0; i < 100; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + err = raft.Put(ctx, pe) + if err != nil { + b.Fatal(err) + } + } + + bench := func(b *testing.B, s *FSM) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + s.writeTo(ctx, discardCloser{Writer: ioutil.Discard}, discardCloser{Writer: ioutil.Discard}) + } + } + + b.Run("256kb", func(b *testing.B) { bench(b, raft.fsm) }) +} + +type discardCloser struct { + io.Writer +} + +func (d discardCloser) Close() error { return nil } +func (d discardCloser) CloseWithError(error) error { return nil } diff --git a/physical/raft/snapshot.go b/physical/raft/snapshot.go new file mode 100644 index 000000000000..7c993a63e6f9 --- /dev/null +++ b/physical/raft/snapshot.go @@ -0,0 +1,288 @@ +package raft + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/raft" +) + +const ( + // boltSnapshotID is the stable ID for any boltDB snapshot. Keeping the ID + // stable means there is only ever one bolt snapshot in the system + boltSnapshotID = "bolt-snapshot" +) + +// BoltSnapshotStore implements the SnapshotStore interface and allows +// snapshots to be made on the local disk. The main difference between this +// store and the file store is we make the distinction between snapshots that +// have been written by the FSM and by internal Raft operations. The former are +// treated as noop snapshots on Persist and are read in full from the FSM on +// Open. The latter are treated like normal file snapshots and are able to be +// opened and applied as usual. +type BoltSnapshotStore struct { + // path is the directory in which to store file based snapshots + path string + // retain is the number of file based snapshots to keep + retain int + + // We hold a copy of the FSM so we can stream snapshots straight out of the + // database. + fsm *FSM + + // fileSnapStore is used to fall back to file snapshots when the data is + // being written from the raft library. This currently only happens on a + // follower during a snapshot install RPC. + fileSnapStore *raft.FileSnapshotStore + logger log.Logger +} + +// BoltSnapshotSink implements SnapshotSink optionally choosing to write to a +// file. +type BoltSnapshotSink struct { + store *BoltSnapshotStore + logger log.Logger + meta raft.SnapshotMeta + trans raft.Transport + + fileSink raft.SnapshotSink + l sync.Mutex + closed bool +} + +// NewBoltSnapshotStore creates a new BoltSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewBoltSnapshotStore(base string, retain int, logger log.Logger, fsm *FSM) (*BoltSnapshotStore, error) { + if retain < 1 { + return nil, fmt.Errorf("must retain at least one snapshot") + } + if logger == nil { + return nil, fmt.Errorf("no logger provided") + } + + fileStore, err := raft.NewFileSnapshotStore(base, retain, nil) + if err != nil { + return nil, err + } + + // Setup the store + store := &BoltSnapshotStore{ + logger: logger, + fsm: fsm, + fileSnapStore: fileStore, + } + + { + // TODO: I think this needs to be done before every NewRaft and + // RecoverCluster call. Not just on Factory method. + + // Here we delete all the existing file based snapshots. This is necessary + // because we do not issue a restore on NewRaft. If a previous file snapshot + // had failed to apply we will be incorrectly setting the indexes. It's + // safer to simply delete all file snapshots on startup and rely on Raft to + // reconcile the FSM state. + if err := store.ReapSnapshots(); err != nil { + return nil, err + } + } + + return store, nil +} + +// Create is used to start a new snapshot +func (f *BoltSnapshotStore) Create(version raft.SnapshotVersion, index, term uint64, + configuration raft.Configuration, configurationIndex uint64, trans raft.Transport) (raft.SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + // We are processing a snapshot, fastforward the index, term, and + // configuration to the latest seen by the raft system. This could include + // log indexes for operation types that are never sent to the FSM. + if err := f.fsm.witnessSnapshot(index, term, configurationIndex, configuration); err != nil { + return nil, err + } + + // Create the sink + sink := &BoltSnapshotSink{ + store: f, + logger: f.logger, + meta: raft.SnapshotMeta{ + Version: version, + ID: boltSnapshotID, + Index: index, + Term: term, + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + trans: trans, + } + + // Done + return sink, nil +} + +// List returns available snapshots in the store. It only returns bolt +// snapshots. No snapshot will be returned if there are no indexes in the +// FSM. +func (f *BoltSnapshotStore) List() ([]*raft.SnapshotMeta, error) { + meta, err := f.getBoltSnapshotMeta() + if err != nil { + return nil, err + } + + // If we haven't seen any data yet do not return a snapshot + if meta.Index == 0 { + return nil, nil + } + + return []*raft.SnapshotMeta{meta}, nil +} + +// getBoltSnapshotMeta returns the fsm's latest state and configuration. +func (f *BoltSnapshotStore) getBoltSnapshotMeta() (*raft.SnapshotMeta, error) { + latestIndex, latestConfig := f.fsm.LatestState() + meta := &raft.SnapshotMeta{ + Version: 1, + ID: boltSnapshotID, + Index: latestIndex.Index, + Term: latestIndex.Term, + } + + if latestConfig != nil { + index, configuration := protoConfigurationToRaftConfiguration(latestConfig) + meta.Configuration = configuration + meta.ConfigurationIndex = index + } + + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *BoltSnapshotStore) Open(id string) (*raft.SnapshotMeta, io.ReadCloser, error) { + var readCloser io.ReadCloser + var meta *raft.SnapshotMeta + switch id { + case boltSnapshotID: + + var err error + meta, err = f.getBoltSnapshotMeta() + if err != nil { + return nil, nil, err + } + // If we don't have any data return an error + if meta.Index == 0 { + return nil, nil, errors.New("no snapshot data") + } + + // Stream data out of the FSM to calculate the size + var writeCloser *io.PipeWriter + readCloser, writeCloser = io.Pipe() + metaReadCloser, metaWriteCloser := io.Pipe() + go func() { + f.fsm.writeTo(context.Background(), metaWriteCloser, writeCloser) + }() + + // Compute the size + n, err := io.Copy(ioutil.Discard, metaReadCloser) + if err != nil { + f.logger.Error("failed to read state file", "error", err) + metaReadCloser.Close() + readCloser.Close() + return nil, nil, err + } + + meta.Size = n + + default: + var err error + meta, readCloser, err = f.fileSnapStore.Open(id) + if err != nil { + return nil, nil, err + } + } + + return meta, readCloser, nil +} + +// ReapSnapshots reaps any snapshots beyond the retain count. +func (f *BoltSnapshotStore) ReapSnapshots() error { + return f.fileSnapStore.ReapSnapshots() +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *BoltSnapshotSink) ID() string { + s.l.Lock() + defer s.l.Unlock() + + if s.fileSink != nil { + return s.fileSink.ID() + } + + return s.meta.ID +} + +// Write is used to append to the state file. We write to the +// buffered IO object to reduce the amount of context switches. +func (s *BoltSnapshotSink) Write(b []byte) (int, error) { + s.l.Lock() + defer s.l.Unlock() + + // If someone is writting to this sink then we need to create a file sink to + // capture the data. This currently only happens when a follower is being + // sent a snapshot. + if s.fileSink == nil { + fileSink, err := s.store.fileSnapStore.Create(s.meta.Version, s.meta.Index, s.meta.Term, s.meta.Configuration, s.meta.ConfigurationIndex, s.trans) + if err != nil { + return 0, err + } + s.fileSink = fileSink + } + + return s.fileSink.Write(b) +} + +// Close is used to indicate a successful end. +func (s *BoltSnapshotSink) Close() error { + s.l.Lock() + defer s.l.Unlock() + + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + if s.fileSink != nil { + return s.fileSink.Close() + } + + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *BoltSnapshotSink) Cancel() error { + s.l.Lock() + defer s.l.Unlock() + + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + if s.fileSink != nil { + return s.fileSink.Cancel() + } + + return nil +} diff --git a/physical/raft/snapshot_test.go b/physical/raft/snapshot_test.go new file mode 100644 index 000000000000..57ed3eba4b17 --- /dev/null +++ b/physical/raft/snapshot_test.go @@ -0,0 +1,418 @@ +package raft + +import ( + "bytes" + "context" + fmt "fmt" + "hash/crc64" + "io" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/physical" +) + +type idAddr struct { + id string +} + +func (a *idAddr) Network() string { return "inmem" } +func (a *idAddr) String() string { return a.id } + +func addPeer(t *testing.T, leader, follower *RaftBackend) { + t.Helper() + if err := leader.AddPeer(context.Background(), follower.NodeID(), follower.NodeID()); err != nil { + t.Fatal(err) + } + + peers, err := leader.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + + err = follower.Bootstrap(context.Background(), peers) + if err != nil { + t.Fatal(err) + } + + err = follower.SetupCluster(context.Background(), nil, nil) + if err != nil { + t.Fatal(err) + } + + leader.raftTransport.(*raft.InmemTransport).Connect(raft.ServerAddress(follower.NodeID()), follower.raftTransport) + follower.raftTransport.(*raft.InmemTransport).Connect(raft.ServerAddress(leader.NodeID()), leader.raftTransport) +} + +func TestRaft_Snapshot_Loading(t *testing.T) { + raft, dir := getRaft(t, true, false) + defer os.RemoveAll(dir) + + // Write some data + for i := 0; i < 1000; i++ { + err := raft.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + readCloser, writeCloser := io.Pipe() + metaReadCloser, metaWriteCloser := io.Pipe() + + go func() { + raft.fsm.writeTo(context.Background(), metaWriteCloser, writeCloser) + }() + + // Create a CRC64 hash + stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + size1, err := io.Copy(stateHash, metaReadCloser) + if err != nil { + t.Fatal(err) + } + + computed1 := stateHash.Sum(nil) + + // Create a CRC64 hash + stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + size2, err := io.Copy(stateHash, readCloser) + if err != nil { + t.Fatal(err) + } + + computed2 := stateHash.Sum(nil) + + if size1 != size2 { + t.Fatal("sizes did not match") + } + + if !bytes.Equal(computed1, computed2) { + t.Fatal("hashes did not match") + } + + snapFuture := raft.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + meta, reader, err := snapFuture.Open() + if err != nil { + t.Fatal(err) + } + if meta.Size != size1 { + t.Fatal("meta size did not match expected") + } + + // Create a CRC64 hash + stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + size3, err := io.Copy(stateHash, reader) + if err != nil { + t.Fatal(err) + } + + computed3 := stateHash.Sum(nil) + if size1 != size3 { + t.Fatal("sizes did not match") + } + + if !bytes.Equal(computed1, computed3) { + t.Fatal("hashes did not match") + } + +} + +func TestRaft_Snapshot_Index(t *testing.T) { + raft, dir := getRaft(t, true, false) + defer os.RemoveAll(dir) + + err := raft.Put(context.Background(), &physical.Entry{ + Key: "key", + Value: []byte("value"), + }) + if err != nil { + t.Fatal(err) + } + + // Get index + index, _ := raft.fsm.LatestState() + if index.Term != 1 { + t.Fatalf("unexpected term, got %d expected 1", index.Term) + } + if index.Index != 3 { + t.Fatalf("unexpected index, got %d expected 3", index.Term) + } + + // Write some data + for i := 0; i < 100; i++ { + err := raft.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Get index + index, _ = raft.fsm.LatestState() + if index.Term != 1 { + t.Fatalf("unexpected term, got %d expected 1", index.Term) + } + if index.Index != 103 { + t.Fatalf("unexpected index, got %d expected 103", index.Term) + } + + // Take a snapshot + snapFuture := raft.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + meta, reader, err := snapFuture.Open() + if err != nil { + t.Fatal(err) + } + io.Copy(ioutil.Discard, reader) + + if meta.Index != index.Index { + t.Fatalf("indexes did not match, got %d expected %d", meta.Index, index.Index) + } + if meta.Term != index.Term { + t.Fatalf("term did not match, got %d expected %d", meta.Term, index.Term) + } + + // Write some more data + for i := 0; i < 100; i++ { + err := raft.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Open the same snapshot again + meta, reader, err = raft.snapStore.Open(meta.ID) + if err != nil { + t.Fatal(err) + } + io.Copy(ioutil.Discard, reader) + + // Make sure the meta data has updated to the new values + if meta.Index != 203 { + t.Fatalf("unexpected snapshot index %d", meta.Index) + } + if meta.Term != 1 { + t.Fatalf("unexpected snapshot term %d", meta.Term) + } +} + +func TestRaft_Snapshot_Peers(t *testing.T) { + raft1, dir := getRaft(t, true, false) + raft2, dir2 := getRaft(t, false, false) + raft3, dir3 := getRaft(t, false, false) + defer os.RemoveAll(dir) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + + // Write some data + for i := 0; i < 1000; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Force a snapshot + snapFuture := raft1.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + // TODO: remove sleeps from these tests + time.Sleep(10 * time.Second) + + // Make sure the snapshot was applied correctly on the follower + compareDBs(t, raft1.fsm.db, raft2.fsm.db) + + // Write some more data + for i := 1000; i < 2000; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + snapFuture = raft1.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + // Add raft3 to the cluster + addPeer(t, raft1, raft3) + + // TODO: remove sleeps from these tests + time.Sleep(10 * time.Second) + + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) +} + +func TestRaft_Snapshot_Restart(t *testing.T) { + raft1, dir := getRaft(t, true, false) + defer os.RemoveAll(dir) + raft2, dir2 := getRaft(t, false, false) + defer os.RemoveAll(dir2) + + // Write some data + for i := 0; i < 100; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Take a snapshot + snapFuture := raft1.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + // Advance FSM's index past configuration change + raft1.Put(context.Background(), &physical.Entry{ + Key: "key", + Value: []byte("value"), + }) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + time.Sleep(2 * time.Second) + + peers, err := raft2.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(peers) != 2 { + t.Fatal(peers) + } + + // Shutdown raft1 + if err := raft1.TeardownCluster(nil); err != nil { + t.Fatal(err) + } + + // Start Raft + err = raft1.SetupCluster(context.Background(), nil, nil) + if err != nil { + t.Fatal(err) + } + + peers, err = raft1.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(peers) != 2 { + t.Fatal(peers) + } + + compareFSMs(t, raft1.fsm, raft2.fsm) +} + +func TestRaft_Snapshot_Take_Restore(t *testing.T) { + raft1, dir := getRaft(t, true, false) + defer os.RemoveAll(dir) + raft2, dir2 := getRaft(t, false, false) + defer os.RemoveAll(dir2) + + addPeer(t, raft1, raft2) + + // Write some data + for i := 0; i < 100; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + snap := &bytes.Buffer{} + + err := raft1.Snapshot(snap, nil) + if err != nil { + t.Fatal(err) + } + + // Write some more data + for i := 100; i < 200; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + snapFile, cleanup, metadata, err := raft1.WriteSnapshotToTemp(ioutil.NopCloser(snap), nil) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + err = raft1.RestoreSnapshot(context.Background(), metadata, snapFile) + if err != nil { + t.Fatal(err) + } + + // make sure we don't have the second batch of writes + for i := 100; i < 200; i++ { + { + value, err := raft1.Get(context.Background(), fmt.Sprintf("key-%d", i)) + if err != nil { + t.Fatal(err) + } + if value != nil { + t.Fatal("didn't remove data") + } + } + { + value, err := raft2.Get(context.Background(), fmt.Sprintf("key-%d", i)) + if err != nil { + t.Fatal(err) + } + if value != nil { + t.Fatal("didn't remove data") + } + } + } + + time.Sleep(10 * time.Second) + compareFSMs(t, raft1.fsm, raft2.fsm) +} diff --git a/physical/raft/streamlayer.go b/physical/raft/streamlayer.go new file mode 100644 index 000000000000..ec04b316166b --- /dev/null +++ b/physical/raft/streamlayer.go @@ -0,0 +1,370 @@ +package raft + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "errors" + fmt "fmt" + "math/big" + mathrand "math/rand" + "net" + "sync" + "time" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault/cluster" +) + +// RaftTLSKey is a single TLS keypair in the Keyring +type RaftTLSKey struct { + // ID is a unique identifier for this Key + ID string `json:"id"` + + // KeyType defines the algorighm used to generate the private keys + KeyType string `json:"key_type"` + + // AppliedIndex is the earliest known raft index that safely contains this + // key. + AppliedIndex uint64 `json:"applied_index"` + + // CertBytes is the marshaled certificate. + CertBytes []byte `json:"cluster_cert"` + + // KeyParams is the marshaled private key. + KeyParams *certutil.ClusterKeyParams `json:"cluster_key_params"` + + // CreatedTime is the time this key was generated. This value is useful in + // determining when the next rotation should be. + CreatedTime time.Time `json:"created_time"` + + parsedCert *x509.Certificate + parsedKey *ecdsa.PrivateKey +} + +// RaftTLSKeyring is the set of keys that raft uses for network communication. +// Only one key is used to dial at a time but both keys will be used to accept +// connections. +type RaftTLSKeyring struct { + // Keys is the set of available key pairs + Keys []*RaftTLSKey `json:"keys"` + + // AppliedIndex is the earliest known raft index that safely contains the + // latest key in the keyring. + AppliedIndex uint64 `json:"applied_index"` + + // Term is an incrementing identifier value used to quickly determine if two + // states of the keyring are different. + Term uint64 `json:"term"` + + // ActiveKeyID is the key ID to track the active key in the keyring. Only + // the active key is used for dialing. + ActiveKeyID string `json:"active_key_id"` +} + +// GetActive returns the active key. +func (k *RaftTLSKeyring) GetActive() *RaftTLSKey { + if k.ActiveKeyID == "" { + return nil + } + + for _, key := range k.Keys { + if key.ID == k.ActiveKeyID { + return key + } + } + return nil +} + +func GenerateTLSKey() (*RaftTLSKey, error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, err + } + + host, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + host = fmt.Sprintf("raft-%s", host) + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + // 30 years of single-active uptime ought to be enough for anybody + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err) + } + + return &RaftTLSKey{ + ID: host, + KeyType: certutil.PrivateKeyTypeP521, + CertBytes: certBytes, + KeyParams: &certutil.ClusterKeyParams{ + Type: certutil.PrivateKeyTypeP521, + X: key.PublicKey.X, + Y: key.PublicKey.Y, + D: key.D, + }, + CreatedTime: time.Now(), + }, nil +} + +// Make sure raftLayer satisfies the raft.StreamLayer interface +var _ raft.StreamLayer = (*raftLayer)(nil) + +// Make sure raftLayer satisfies the cluster.Handler and cluster.Client +// interfaces +var _ cluster.Handler = (*raftLayer)(nil) +var _ cluster.Client = (*raftLayer)(nil) + +// RaftLayer implements the raft.StreamLayer interface, +// so that we can use a single RPC layer for Raft and Vault +type raftLayer struct { + // Addr is the listener address to return + addr net.Addr + + // connCh is used to accept connections + connCh chan net.Conn + + // Tracks if we are closed + closed bool + closeCh chan struct{} + closeLock sync.Mutex + + logger log.Logger + + dialerFunc func(string, time.Duration) (net.Conn, error) + + // TLS config + keyring *RaftTLSKeyring + baseTLSConfig *tls.Config +} + +// NewRaftLayer creates a new raftLayer object. It parses the TLS information +// from the network config. +func NewRaftLayer(logger log.Logger, raftTLSKeyring *RaftTLSKeyring, clusterAddr net.Addr, baseTLSConfig *tls.Config) (*raftLayer, error) { + switch { + case clusterAddr == nil: + // Clustering disabled on the server, don't try to look for params + return nil, errors.New("no raft addr found") + } + + layer := &raftLayer{ + addr: clusterAddr, + connCh: make(chan net.Conn), + closeCh: make(chan struct{}), + logger: logger, + baseTLSConfig: baseTLSConfig, + } + + if err := layer.setTLSKeyring(raftTLSKeyring); err != nil { + return nil, err + } + + return layer, nil +} + +func (l *raftLayer) setTLSKeyring(keyring *RaftTLSKeyring) error { + // Fast path a noop update + if l.keyring != nil && l.keyring.Term == keyring.Term { + return nil + } + + for _, key := range keyring.Keys { + switch { + case key.KeyParams == nil: + return errors.New("no raft cluster key params found") + + case key.KeyParams.X == nil, key.KeyParams.Y == nil, key.KeyParams.D == nil: + return errors.New("failed to parse raft cluster key") + + case key.KeyParams.Type != certutil.PrivateKeyTypeP521: + return errors.New("failed to find valid raft cluster key type") + + case len(key.CertBytes) == 0: + return errors.New("no cluster cert found") + } + + parsedCert, err := x509.ParseCertificate(key.CertBytes) + if err != nil { + return errwrap.Wrapf("error parsing raft cluster certificate: {{err}}", err) + } + + key.parsedCert = parsedCert + key.parsedKey = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: elliptic.P521(), + X: key.KeyParams.X, + Y: key.KeyParams.Y, + }, + D: key.KeyParams.D, + } + } + + if keyring.GetActive() == nil { + return errors.New("expected one active key to be present in the keyring") + } + + l.keyring = keyring + + return nil +} + +func (l *raftLayer) ClientLookup(ctx context.Context, requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) { + for _, subj := range requestInfo.AcceptableCAs { + for _, key := range l.keyring.Keys { + if bytes.Equal(subj, key.parsedCert.RawIssuer) { + localCert := make([]byte, len(key.CertBytes)) + copy(localCert, key.CertBytes) + + return &tls.Certificate{ + Certificate: [][]byte{localCert}, + PrivateKey: key.parsedKey, + Leaf: key.parsedCert, + }, nil + } + } + } + + return nil, nil +} + +func (l *raftLayer) ServerLookup(ctx context.Context, clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if l.keyring == nil { + return nil, errors.New("got raft connection but no local cert") + } + + for _, key := range l.keyring.Keys { + if clientHello.ServerName == key.ID { + localCert := make([]byte, len(key.CertBytes)) + copy(localCert, key.CertBytes) + + return &tls.Certificate{ + Certificate: [][]byte{localCert}, + PrivateKey: key.parsedKey, + Leaf: key.parsedCert, + }, nil + } + } + + return nil, nil +} + +// CALookup returns the CA to use when validating this connection. +func (l *raftLayer) CALookup(context.Context) ([]*x509.Certificate, error) { + ret := make([]*x509.Certificate, len(l.keyring.Keys)) + for i, key := range l.keyring.Keys { + ret[i] = key.parsedCert + } + return ret, nil +} + +// Stop shutsdown the raft layer. +func (l *raftLayer) Stop() error { + l.Close() + return nil +} + +// Handoff is used to hand off a connection to the +// RaftLayer. This allows it to be Accept()'ed +func (l *raftLayer) Handoff(ctx context.Context, wg *sync.WaitGroup, quit chan struct{}, conn *tls.Conn) error { + l.closeLock.Lock() + closed := l.closed + l.closeLock.Unlock() + + if closed { + return errors.New("raft is shutdown") + } + + wg.Add(1) + go func() { + defer wg.Done() + select { + case l.connCh <- conn: + case <-l.closeCh: + case <-ctx.Done(): + case <-quit: + } + }() + + return nil +} + +// Accept is used to return connection which are +// dialed to be used with the Raft layer +func (l *raftLayer) Accept() (net.Conn, error) { + select { + case conn := <-l.connCh: + return conn, nil + case <-l.closeCh: + return nil, fmt.Errorf("Raft RPC layer closed") + } +} + +// Close is used to stop listening for Raft connections +func (l *raftLayer) Close() error { + l.closeLock.Lock() + defer l.closeLock.Unlock() + + if !l.closed { + l.closed = true + close(l.closeCh) + } + return nil +} + +// Addr is used to return the address of the listener +func (l *raftLayer) Addr() net.Addr { + return l.addr +} + +// Dial is used to create a new outgoing connection +func (l *raftLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error) { + + tlsConfig := l.baseTLSConfig.Clone() + + key := l.keyring.GetActive() + if key == nil { + return nil, errors.New("no active key") + } + + tlsConfig.NextProtos = []string{consts.RaftStorageALPN} + tlsConfig.ServerName = key.parsedCert.Subject.CommonName + + l.logger.Debug("creating rpc dialer", "host", tlsConfig.ServerName) + + pool := x509.NewCertPool() + pool.AddCert(key.parsedCert) + tlsConfig.RootCAs = pool + tlsConfig.ClientCAs = pool + + dialer := &net.Dialer{ + Timeout: timeout, + } + return tls.DialWithDialer(dialer, "tcp", string(address), tlsConfig) +} diff --git a/physical/raft/types.pb.go b/physical/raft/types.pb.go new file mode 100644 index 000000000000..d2b81132ee20 --- /dev/null +++ b/physical/raft/types.pb.go @@ -0,0 +1,311 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: physical/raft/types.proto + +package raft + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LogOperation struct { + // OpType is the Operation type + OpType uint32 `protobuf:"varint,1,opt,name=op_type,json=opType,proto3" json:"op_type,omitempty"` + // Flags is an opaque value, currently unused. Reserved. + Flags uint64 `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"` + // Key that is being affected + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Value is optional, corresponds to the key + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogOperation) Reset() { *m = LogOperation{} } +func (m *LogOperation) String() string { return proto.CompactTextString(m) } +func (*LogOperation) ProtoMessage() {} +func (*LogOperation) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{0} +} + +func (m *LogOperation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogOperation.Unmarshal(m, b) +} +func (m *LogOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogOperation.Marshal(b, m, deterministic) +} +func (m *LogOperation) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogOperation.Merge(m, src) +} +func (m *LogOperation) XXX_Size() int { + return xxx_messageInfo_LogOperation.Size(m) +} +func (m *LogOperation) XXX_DiscardUnknown() { + xxx_messageInfo_LogOperation.DiscardUnknown(m) +} + +var xxx_messageInfo_LogOperation proto.InternalMessageInfo + +func (m *LogOperation) GetOpType() uint32 { + if m != nil { + return m.OpType + } + return 0 +} + +func (m *LogOperation) GetFlags() uint64 { + if m != nil { + return m.Flags + } + return 0 +} + +func (m *LogOperation) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LogOperation) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type LogData struct { + Operations []*LogOperation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogData) Reset() { *m = LogData{} } +func (m *LogData) String() string { return proto.CompactTextString(m) } +func (*LogData) ProtoMessage() {} +func (*LogData) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{1} +} + +func (m *LogData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogData.Unmarshal(m, b) +} +func (m *LogData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogData.Marshal(b, m, deterministic) +} +func (m *LogData) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogData.Merge(m, src) +} +func (m *LogData) XXX_Size() int { + return xxx_messageInfo_LogData.Size(m) +} +func (m *LogData) XXX_DiscardUnknown() { + xxx_messageInfo_LogData.DiscardUnknown(m) +} + +var xxx_messageInfo_LogData proto.InternalMessageInfo + +func (m *LogData) GetOperations() []*LogOperation { + if m != nil { + return m.Operations + } + return nil +} + +type IndexValue struct { + Term uint64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Index uint64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexValue) Reset() { *m = IndexValue{} } +func (m *IndexValue) String() string { return proto.CompactTextString(m) } +func (*IndexValue) ProtoMessage() {} +func (*IndexValue) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{2} +} + +func (m *IndexValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexValue.Unmarshal(m, b) +} +func (m *IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexValue.Marshal(b, m, deterministic) +} +func (m *IndexValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexValue.Merge(m, src) +} +func (m *IndexValue) XXX_Size() int { + return xxx_messageInfo_IndexValue.Size(m) +} +func (m *IndexValue) XXX_DiscardUnknown() { + xxx_messageInfo_IndexValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexValue proto.InternalMessageInfo + +func (m *IndexValue) GetTerm() uint64 { + if m != nil { + return m.Term + } + return 0 +} + +func (m *IndexValue) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +type Server struct { + Suffrage int32 `protobuf:"varint,1,opt,name=suffrage,proto3" json:"suffrage,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{3} +} + +func (m *Server) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Server.Unmarshal(m, b) +} +func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Server.Marshal(b, m, deterministic) +} +func (m *Server) XXX_Merge(src proto.Message) { + xxx_messageInfo_Server.Merge(m, src) +} +func (m *Server) XXX_Size() int { + return xxx_messageInfo_Server.Size(m) +} +func (m *Server) XXX_DiscardUnknown() { + xxx_messageInfo_Server.DiscardUnknown(m) +} + +var xxx_messageInfo_Server proto.InternalMessageInfo + +func (m *Server) GetSuffrage() int32 { + if m != nil { + return m.Suffrage + } + return 0 +} + +func (m *Server) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Server) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +type ConfigurationValue struct { + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Servers []*Server `protobuf:"bytes,2,rep,name=servers,proto3" json:"servers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigurationValue) Reset() { *m = ConfigurationValue{} } +func (m *ConfigurationValue) String() string { return proto.CompactTextString(m) } +func (*ConfigurationValue) ProtoMessage() {} +func (*ConfigurationValue) Descriptor() ([]byte, []int) { + return fileDescriptor_a8b3efb4def82ab3, []int{4} +} + +func (m *ConfigurationValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigurationValue.Unmarshal(m, b) +} +func (m *ConfigurationValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigurationValue.Marshal(b, m, deterministic) +} +func (m *ConfigurationValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigurationValue.Merge(m, src) +} +func (m *ConfigurationValue) XXX_Size() int { + return xxx_messageInfo_ConfigurationValue.Size(m) +} +func (m *ConfigurationValue) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigurationValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigurationValue proto.InternalMessageInfo + +func (m *ConfigurationValue) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *ConfigurationValue) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +func init() { + proto.RegisterType((*LogOperation)(nil), "raft.LogOperation") + proto.RegisterType((*LogData)(nil), "raft.LogData") + proto.RegisterType((*IndexValue)(nil), "raft.IndexValue") + proto.RegisterType((*Server)(nil), "raft.Server") + proto.RegisterType((*ConfigurationValue)(nil), "raft.ConfigurationValue") +} + +func init() { proto.RegisterFile("physical/raft/types.proto", fileDescriptor_a8b3efb4def82ab3) } + +var fileDescriptor_a8b3efb4def82ab3 = []byte{ + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0xc1, 0x4b, 0xc3, 0x30, + 0x14, 0xc6, 0xc9, 0xd6, 0xad, 0xee, 0x39, 0x45, 0x1e, 0x82, 0xd5, 0x53, 0xe9, 0x41, 0x8a, 0x87, + 0x16, 0x26, 0x78, 0xf3, 0xa2, 0x5e, 0x84, 0xa1, 0x10, 0xc5, 0x83, 0x17, 0xc9, 0xd6, 0xb4, 0x0d, + 0x76, 0x4b, 0x48, 0xd2, 0x61, 0xff, 0x7b, 0x49, 0x63, 0xc7, 0xbc, 0xbd, 0xaf, 0xfd, 0x92, 0xef, + 0xf7, 0xe5, 0xc1, 0xa5, 0xaa, 0x3b, 0x23, 0xd6, 0xac, 0xc9, 0x35, 0x2b, 0x6d, 0x6e, 0x3b, 0xc5, + 0x4d, 0xa6, 0xb4, 0xb4, 0x12, 0x03, 0xf7, 0x25, 0xe1, 0x30, 0x5f, 0xca, 0xea, 0x55, 0x71, 0xcd, + 0xac, 0x90, 0x5b, 0xbc, 0x80, 0x50, 0xaa, 0x2f, 0xe7, 0x8b, 0x48, 0x4c, 0xd2, 0x13, 0x3a, 0x95, + 0xea, 0xbd, 0x53, 0x1c, 0xcf, 0x61, 0x52, 0x36, 0xac, 0x32, 0xd1, 0x28, 0x26, 0x69, 0x40, 0xbd, + 0xc0, 0x33, 0x18, 0x7f, 0xf3, 0x2e, 0x1a, 0xc7, 0x24, 0x9d, 0x51, 0x37, 0x3a, 0xdf, 0x8e, 0x35, + 0x2d, 0x8f, 0x82, 0x98, 0xa4, 0x73, 0xea, 0x45, 0x72, 0x0f, 0xe1, 0x52, 0x56, 0x4f, 0xcc, 0x32, + 0x5c, 0x00, 0xc8, 0x21, 0xce, 0x44, 0x24, 0x1e, 0xa7, 0xc7, 0x0b, 0xcc, 0x1c, 0x4c, 0x76, 0x48, + 0x42, 0x0f, 0x5c, 0xc9, 0x1d, 0xc0, 0xf3, 0xb6, 0xe0, 0x3f, 0x1f, 0xee, 0x32, 0x44, 0x08, 0x2c, + 0xd7, 0x9b, 0x1e, 0x30, 0xa0, 0xfd, 0xec, 0x62, 0x85, 0x73, 0x0c, 0x78, 0xbd, 0x48, 0x5e, 0x60, + 0xfa, 0xc6, 0xf5, 0x8e, 0x6b, 0xbc, 0x82, 0x23, 0xd3, 0x96, 0xa5, 0x66, 0x95, 0x2f, 0x36, 0xa1, + 0x7b, 0x8d, 0xa7, 0x30, 0x12, 0x45, 0x7f, 0x70, 0x46, 0x47, 0xa2, 0xc0, 0x08, 0x42, 0x56, 0x14, + 0x9a, 0x1b, 0xf3, 0x57, 0x6c, 0x90, 0x09, 0x05, 0x7c, 0x94, 0xdb, 0x52, 0x54, 0xad, 0x27, 0xf3, + 0x3c, 0xfb, 0x6c, 0x72, 0x90, 0x8d, 0xd7, 0x10, 0x9a, 0x3e, 0xdb, 0x3d, 0x99, 0x2b, 0x39, 0xf7, + 0x25, 0x3d, 0x10, 0x1d, 0x7e, 0x3e, 0xdc, 0x7c, 0xa6, 0x95, 0xb0, 0x75, 0xbb, 0xca, 0xd6, 0x72, + 0x93, 0xd7, 0xcc, 0xd4, 0x62, 0x2d, 0xb5, 0xca, 0x77, 0xac, 0x6d, 0x6c, 0xfe, 0x6f, 0x7f, 0xab, + 0x69, 0xbf, 0xba, 0xdb, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x85, 0xad, 0xad, 0xd7, 0x01, + 0x00, 0x00, +} diff --git a/physical/raft/types.proto b/physical/raft/types.proto new file mode 100644 index 000000000000..bec32ad714e0 --- /dev/null +++ b/physical/raft/types.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/physical/raft"; + +package raft; + +message LogOperation { + // OpType is the Operation type + uint32 op_type = 1; + + // Flags is an opaque value, currently unused. Reserved. + uint64 flags = 2; + + // Key that is being affected + string key = 3; + + // Value is optional, corresponds to the key + bytes value = 4; +} + +message LogData { + repeated LogOperation operations = 1; +} + +message IndexValue { + uint64 term = 1; + uint64 index = 2; +} + +message Server { + int32 suffrage = 1; + string id = 2; + string address = 3; +} + +message ConfigurationValue { + uint64 index = 1; + repeated Server servers = 2; +} diff --git a/plugins/database/cassandra/cassandra.go b/plugins/database/cassandra/cassandra.go index 44c889709372..886e7c8b3196 100644 --- a/plugins/database/cassandra/cassandra.go +++ b/plugins/database/cassandra/cassandra.go @@ -129,7 +129,7 @@ func (c *Cassandra) CreateUser(ctx context.Context, statements dbplugin.Statemen err = session.Query(dbutil.QueryHelper(query, map[string]string{ "username": username, "password": password, - })).Exec() + })).WithContext(ctx).Exec() if err != nil { for _, stmt := range rollbackCQL { for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { @@ -140,7 +140,7 @@ func (c *Cassandra) CreateUser(ctx context.Context, statements dbplugin.Statemen session.Query(dbutil.QueryHelper(query, map[string]string{ "username": username, - })).Exec() + })).WithContext(ctx).Exec() } } return "", "", err @@ -185,7 +185,7 @@ func (c *Cassandra) RevokeUser(ctx context.Context, statements dbplugin.Statemen err := session.Query(dbutil.QueryHelper(query, map[string]string{ "username": username, - })).Exec() + })).WithContext(ctx).Exec() result = multierror.Append(result, err) } @@ -225,7 +225,7 @@ func (c *Cassandra) RotateRootCredentials(ctx context.Context, statements []stri err := session.Query(dbutil.QueryHelper(query, map[string]string{ "username": c.Username, "password": password, - })).Exec() + })).WithContext(ctx).Exec() result = multierror.Append(result, err) } @@ -239,3 +239,12 @@ func (c *Cassandra) RotateRootCredentials(ctx context.Context, statements []stri c.rawConfig["password"] = password return c.rawConfig, nil } + +// GenerateCredentials returns a generated password +func (c *Cassandra) GenerateCredentials(ctx context.Context) (string, error) { + password, err := c.GeneratePassword() + if err != nil { + return "", err + } + return password, nil +} diff --git a/plugins/database/cassandra/connection_producer.go b/plugins/database/cassandra/connection_producer.go index 87579de4e49d..a34dd4e45c00 100644 --- a/plugins/database/cassandra/connection_producer.go +++ b/plugins/database/cassandra/connection_producer.go @@ -12,6 +12,7 @@ import ( "github.com/gocql/gocql" "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/hashicorp/vault/sdk/helper/certutil" @@ -136,7 +137,7 @@ func (c *cassandraConnectionProducer) Init(ctx context.Context, conf map[string] return conf, nil } -func (c *cassandraConnectionProducer) Connection(_ context.Context) (interface{}, error) { +func (c *cassandraConnectionProducer) Connection(ctx context.Context) (interface{}, error) { if !c.Initialized { return nil, connutil.ErrNotInitialized } @@ -146,7 +147,7 @@ func (c *cassandraConnectionProducer) Connection(_ context.Context) (interface{} return c.session, nil } - session, err := c.createSession() + session, err := c.createSession(ctx) if err != nil { return nil, err } @@ -171,7 +172,7 @@ func (c *cassandraConnectionProducer) Close() error { return nil } -func (c *cassandraConnectionProducer) createSession() (*gocql.Session, error) { +func (c *cassandraConnectionProducer) createSession(ctx context.Context) (*gocql.Session, error) { hosts := strings.Split(c.Hosts, ",") clusterConfig := gocql.NewCluster(hosts...) clusterConfig.Authenticator = gocql.PasswordAuthenticator{ @@ -255,7 +256,7 @@ func (c *cassandraConnectionProducer) createSession() (*gocql.Session, error) { } // Verify the info - err = session.Query(`LIST ALL`).Exec() + err = session.Query(`LIST ALL`).WithContext(ctx).Exec() if err != nil && len(c.Username) != 0 && strings.Contains(err.Error(), "not authorized") { rowNum := session.Query(dbutil.QueryHelper(`LIST CREATE ON ALL ROLES OF '{{username}}';`, map[string]string{ "username": c.Username, @@ -278,3 +279,13 @@ func (c *cassandraConnectionProducer) secretValues() map[string]interface{} { c.PemJSON: "[pem_json]", } } + +// SetCredentials uses provided information to set/create a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for creating +// and setting the password of static accounts, as well as rolling back +// passwords in the database in the event an updated database fails to save in +// Vault's storage. +func (c *cassandraConnectionProducer) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + return "", "", dbutil.Unimplemented() +} diff --git a/plugins/database/hana/hana.go b/plugins/database/hana/hana.go index a2af57526785..872177a03761 100644 --- a/plugins/database/hana/hana.go +++ b/plugins/database/hana/hana.go @@ -293,3 +293,12 @@ func (h *HANA) revokeUserDefault(ctx context.Context, username string) error { func (h *HANA) RotateRootCredentials(ctx context.Context, statements []string) (map[string]interface{}, error) { return nil, errors.New("root credentaion rotation is not currently implemented in this database secrets engine") } + +// GenerateCredentials returns a generated password +func (h *HANA) GenerateCredentials(ctx context.Context) (string, error) { + password, err := h.GeneratePassword() + if err != nil { + return "", err + } + return password, nil +} diff --git a/plugins/database/influxdb/connection_producer.go b/plugins/database/influxdb/connection_producer.go index 52bd8e9455ef..84f93bc81453 100644 --- a/plugins/database/influxdb/connection_producer.go +++ b/plugins/database/influxdb/connection_producer.go @@ -8,7 +8,9 @@ import ( "time" "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/parseutil" "github.com/hashicorp/vault/sdk/helper/tlsutil" @@ -261,3 +263,13 @@ func isUserAdmin(cli influx.Client, user string) (bool, error) { } return false, fmt.Errorf("the provided username is not a valid user in the influxdb") } + +// SetCredentials uses provided information to set/create a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for creating +// and setting the password of static accounts, as well as rolling back +// passwords in the database in the event an updated database fails to save in +// Vault's storage. +func (i *influxdbConnectionProducer) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + return "", "", dbutil.Unimplemented() +} diff --git a/plugins/database/influxdb/influxdb.go b/plugins/database/influxdb/influxdb.go index e77adf5f16db..7a1520d9d4e1 100644 --- a/plugins/database/influxdb/influxdb.go +++ b/plugins/database/influxdb/influxdb.go @@ -242,3 +242,12 @@ func (i *Influxdb) RotateRootCredentials(ctx context.Context, statements []strin i.rawConfig["password"] = password return i.rawConfig, nil } + +// GenerateCredentials returns a generated password +func (i *Influxdb) GenerateCredentials(ctx context.Context) (string, error) { + password, err := i.GeneratePassword() + if err != nil { + return "", err + } + return password, nil +} diff --git a/plugins/database/mongodb/connection_producer.go b/plugins/database/mongodb/connection_producer.go index 847e8fa0ea05..20912531ff8e 100644 --- a/plugins/database/mongodb/connection_producer.go +++ b/plugins/database/mongodb/connection_producer.go @@ -15,6 +15,7 @@ import ( "time" "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/mitchellh/mapstructure" @@ -153,6 +154,16 @@ func (c *mongoDBConnectionProducer) Close() error { return nil } +// SetCredentials uses provided information to set/create a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for creating +// and setting the password of static accounts, as well as rolling back +// passwords in the database in the event an updated database fails to save in +// Vault's storage. +func (c *mongoDBConnectionProducer) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + return "", "", dbutil.Unimplemented() +} + func parseMongoURL(rawURL string) (*mgo.DialInfo, error) { url, err := url.Parse(rawURL) if err != nil { diff --git a/plugins/database/mongodb/mongodb.go b/plugins/database/mongodb/mongodb.go index 607aff71b18c..f3aa6e216a8c 100644 --- a/plugins/database/mongodb/mongodb.go +++ b/plugins/database/mongodb/mongodb.go @@ -224,3 +224,12 @@ func (m *MongoDB) RevokeUser(ctx context.Context, statements dbplugin.Statements func (m *MongoDB) RotateRootCredentials(ctx context.Context, statements []string) (map[string]interface{}, error) { return nil, errors.New("root credential rotation is not currently implemented in this database secrets engine") } + +// GenerateCredentials returns a generated password +func (m *MongoDB) GenerateCredentials(ctx context.Context) (string, error) { + password, err := m.GeneratePassword() + if err != nil { + return "", err + } + return password, nil +} diff --git a/plugins/database/mssql/mssql.go b/plugins/database/mssql/mssql.go index dfc34c1b43e2..b525be8cbc81 100644 --- a/plugins/database/mssql/mssql.go +++ b/plugins/database/mssql/mssql.go @@ -381,3 +381,12 @@ END const rotateRootCredentialsSQL = ` ALTER LOGIN [{{username}}] WITH PASSWORD = '{{password}}' ` + +// GenerateCredentials returns a generated password +func (m *MSSQL) GenerateCredentials(ctx context.Context) (string, error) { + password, err := m.GeneratePassword() + if err != nil { + return "", err + } + return password, nil +} diff --git a/plugins/database/mysql/mysql.go b/plugins/database/mysql/mysql.go index bf349aaea432..f3967558309e 100644 --- a/plugins/database/mysql/mysql.go +++ b/plugins/database/mysql/mysql.go @@ -315,3 +315,12 @@ func (m *MySQL) RotateRootCredentials(ctx context.Context, statements []string) m.RawConfig["password"] = password return m.RawConfig, nil } + +// GenerateCredentials returns a generated password +func (m *MySQL) GenerateCredentials(ctx context.Context) (string, error) { + password, err := m.GeneratePassword() + if err != nil { + return "", err + } + return password, nil +} diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go index 4be5418cb248..1e9c168165f6 100644 --- a/plugins/database/postgresql/postgresql.go +++ b/plugins/database/postgresql/postgresql.go @@ -26,6 +26,10 @@ ALTER ROLE "{{name}}" VALID UNTIL '{{expiration}}'; ` defaultPostgresRotateRootCredentialsSQL = ` ALTER ROLE "{{username}}" WITH PASSWORD '{{password}}'; +` + + defaultPostgresRotateCredentialsSQL = ` +ALTER ROLE "{{name}}" WITH PASSWORD '{{password}}'; ` ) @@ -88,6 +92,79 @@ func (p *PostgreSQL) getConnection(ctx context.Context) (*sql.DB, error) { return db.(*sql.DB), nil } +// SetCredentials uses provided information to set/create a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for creating +// and setting the password of static accounts, as well as rolling back +// passwords in the database in the event an updated database fails to save in +// Vault's storage. +func (p *PostgreSQL) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + if len(statements.Rotation) == 0 { + return "", "", errors.New("empty rotation statements") + } + + username = staticUser.Username + password = staticUser.Password + if username == "" || password == "" { + return "", "", errors.New("must provide both username and password") + } + + // Grab the lock + p.Lock() + defer p.Unlock() + + // Get the connection + db, err := p.getConnection(ctx) + if err != nil { + return "", "", err + } + + // Check if the role exists + var exists bool + err = db.QueryRowContext(ctx, "SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + return "", "", err + } + + // Vault requires the database user already exist, and that the credentials + // used to execute the rotation statements has sufficient privileges. + stmts := statements.Rotation + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return "", "", err + } + defer func() { + _ = tx.Rollback() + }() + + // Execute each query + for _, stmt := range stmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": staticUser.Username, + "password": password, + } + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return "", "", err + } + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + } + + return username, password, nil +} + func (p *PostgreSQL) CreateUser(ctx context.Context, statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { statements = dbutil.StatementCompatibilityHelper(statements) @@ -129,7 +206,6 @@ func (p *PostgreSQL) CreateUser(ctx context.Context, statements dbplugin.Stateme defer func() { tx.Rollback() }() - // Return the secret // Execute each query for _, stmt := range statements.Creation { @@ -267,7 +343,7 @@ func (p *PostgreSQL) defaultRevokeUser(ctx context.Context, username string) err return err } - if exists == false { + if !exists { return nil } @@ -424,3 +500,12 @@ func (p *PostgreSQL) RotateRootCredentials(ctx context.Context, statements []str p.RawConfig["password"] = password return p.RawConfig, nil } + +// GenerateCredentials returns a generated password +func (p *PostgreSQL) GenerateCredentials(ctx context.Context) (string, error) { + password, err := p.GeneratePassword() + if err != nil { + return "", err + } + return password, nil +} diff --git a/plugins/database/postgresql/postgresql_test.go b/plugins/database/postgresql/postgresql_test.go index 85c632563083..3fbbb439ebbd 100644 --- a/plugins/database/postgresql/postgresql_test.go +++ b/plugins/database/postgresql/postgresql_test.go @@ -12,6 +12,8 @@ import ( "github.com/hashicorp/vault/helper/testhelpers/docker" "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/lib/pq" "github.com/ory/dockertest" ) @@ -317,6 +319,70 @@ func TestPostgreSQL_RevokeUser(t *testing.T) { } } +func TestPostgresSQL_SetCredentials(t *testing.T) { + cleanup, connURL := preparePostgresTestContainer(t) + defer cleanup() + + // create the database user + dbUser := "vaultstatictest" + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + db := new() + _, err := db.Init(context.Background(), connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + password, err := db.GenerateCredentials(context.Background()) + if err != nil { + t.Fatal(err) + } + + usernameConfig := dbplugin.StaticUserConfig{ + Username: dbUser, + Password: password, + } + + // Test with no configured Rotation Statement + username, password, err := db.SetCredentials(context.Background(), dbplugin.Statements{}, usernameConfig) + if err == nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + Rotation: []string{testPostgresStaticRoleRotate}, + } + // User should not exist, make sure we can create + username, password, err = db.SetCredentials(context.Background(), statements, usernameConfig) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // call SetCredentials again, password will change + newPassword, _ := db.GenerateCredentials(context.Background()) + usernameConfig.Password = newPassword + username, password, err = db.SetCredentials(context.Background(), statements, usernameConfig) + if err != nil { + t.Fatalf("err: %s", err) + } + + if password != newPassword { + t.Fatal("passwords should have changed") + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + func testCredsExist(t testing.TB, connURL, username, password string) error { t.Helper() // Log in with the new creds @@ -398,3 +464,63 @@ REVOKE USAGE ON SCHEMA public FROM "{{name}}"; DROP ROLE IF EXISTS "{{name}}"; ` + +const testPostgresStaticRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const testRoleStaticCreate = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}'; +` + +const testPostgresStaticRoleRotate = ` +ALTER ROLE "{{name}}" WITH PASSWORD '{{password}}'; +` + +const testPostgresStaticRoleGrant = ` +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +// This is a copy of a test helper method also found in +// builtin/logical/database/rotation_test.go , and should be moved into a shared +// helper file in the future. +func createTestPGUser(t *testing.T, connURL string, username, password, query string) { + t.Helper() + conn, err := pq.ParseURL(connURL) + if err != nil { + t.Fatal(err) + } + + db, err := sql.Open("postgres", conn) + defer db.Close() + if err != nil { + t.Fatal(err) + } + + // Start a transaction + ctx := context.Background() + tx, err := db.BeginTx(ctx, nil) + if err != nil { + t.Fatal(err) + } + defer func() { + _ = tx.Rollback() + }() + + m := map[string]string{ + "name": username, + "password": password, + } + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + t.Fatal(err) + } + // Commit the transaction + if err := tx.Commit(); err != nil { + t.Fatal(err) + } +} diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile index 2ce7ea669a25..d367ab2e390a 100644 --- a/scripts/cross/Dockerfile +++ b/scripts/cross/Dockerfile @@ -21,7 +21,7 @@ RUN apt-get update -y && apt-get install -y -q nodejs yarn=1.12.1-1 RUN rm -rf /var/lib/apt/lists/* -ENV GOVERSION 1.12.4 +ENV GOVERSION 1.12.6 RUN mkdir /goroot && mkdir /gopath RUN curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz \ | tar xvzf - -C /goroot --strip-components=1 diff --git a/sdk/database/dbplugin/database.pb.go b/sdk/database/dbplugin/database.pb.go index c2fe16a85590..c820015136cc 100644 --- a/sdk/database/dbplugin/database.pb.go +++ b/sdk/database/dbplugin/database.pb.go @@ -329,6 +329,7 @@ type Statements struct { Revocation []string `protobuf:"bytes,6,rep,name=revocation,proto3" json:"revocation,omitempty"` Rollback []string `protobuf:"bytes,7,rep,name=rollback,proto3" json:"rollback,omitempty"` Renewal []string `protobuf:"bytes,8,rep,name=renewal,proto3" json:"renewal,omitempty"` + Rotation []string `protobuf:"bytes,9,rep,name=rotation,proto3" json:"rotation,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -419,6 +420,13 @@ func (m *Statements) GetRenewal() []string { return nil } +func (m *Statements) GetRotation() []string { + if m != nil { + return m.Rotation + } + return nil +} + type UsernameConfig struct { DisplayName string `protobuf:"bytes,1,opt,name=DisplayName,proto3" json:"DisplayName,omitempty"` RoleName string `protobuf:"bytes,2,opt,name=RoleName,proto3" json:"RoleName,omitempty"` @@ -661,6 +669,194 @@ func (m *Empty) XXX_DiscardUnknown() { var xxx_messageInfo_Empty proto.InternalMessageInfo +type GenerateCredentialsResponse struct { + Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GenerateCredentialsResponse) Reset() { *m = GenerateCredentialsResponse{} } +func (m *GenerateCredentialsResponse) String() string { return proto.CompactTextString(m) } +func (*GenerateCredentialsResponse) ProtoMessage() {} +func (*GenerateCredentialsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{13} +} + +func (m *GenerateCredentialsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GenerateCredentialsResponse.Unmarshal(m, b) +} +func (m *GenerateCredentialsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GenerateCredentialsResponse.Marshal(b, m, deterministic) +} +func (m *GenerateCredentialsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenerateCredentialsResponse.Merge(m, src) +} +func (m *GenerateCredentialsResponse) XXX_Size() int { + return xxx_messageInfo_GenerateCredentialsResponse.Size(m) +} +func (m *GenerateCredentialsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GenerateCredentialsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GenerateCredentialsResponse proto.InternalMessageInfo + +func (m *GenerateCredentialsResponse) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type StaticUserConfig struct { + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Create bool `protobuf:"varint,3,opt,name=create,proto3" json:"create,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StaticUserConfig) Reset() { *m = StaticUserConfig{} } +func (m *StaticUserConfig) String() string { return proto.CompactTextString(m) } +func (*StaticUserConfig) ProtoMessage() {} +func (*StaticUserConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{14} +} + +func (m *StaticUserConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StaticUserConfig.Unmarshal(m, b) +} +func (m *StaticUserConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StaticUserConfig.Marshal(b, m, deterministic) +} +func (m *StaticUserConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_StaticUserConfig.Merge(m, src) +} +func (m *StaticUserConfig) XXX_Size() int { + return xxx_messageInfo_StaticUserConfig.Size(m) +} +func (m *StaticUserConfig) XXX_DiscardUnknown() { + xxx_messageInfo_StaticUserConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_StaticUserConfig proto.InternalMessageInfo + +func (m *StaticUserConfig) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *StaticUserConfig) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *StaticUserConfig) GetCreate() bool { + if m != nil { + return m.Create + } + return false +} + +type SetCredentialsRequest struct { + Statements *Statements `protobuf:"bytes,1,opt,name=statements,proto3" json:"statements,omitempty"` + StaticUserConfig *StaticUserConfig `protobuf:"bytes,2,opt,name=static_user_config,json=staticUserConfig,proto3" json:"static_user_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetCredentialsRequest) Reset() { *m = SetCredentialsRequest{} } +func (m *SetCredentialsRequest) String() string { return proto.CompactTextString(m) } +func (*SetCredentialsRequest) ProtoMessage() {} +func (*SetCredentialsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{15} +} + +func (m *SetCredentialsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetCredentialsRequest.Unmarshal(m, b) +} +func (m *SetCredentialsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetCredentialsRequest.Marshal(b, m, deterministic) +} +func (m *SetCredentialsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetCredentialsRequest.Merge(m, src) +} +func (m *SetCredentialsRequest) XXX_Size() int { + return xxx_messageInfo_SetCredentialsRequest.Size(m) +} +func (m *SetCredentialsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetCredentialsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetCredentialsRequest proto.InternalMessageInfo + +func (m *SetCredentialsRequest) GetStatements() *Statements { + if m != nil { + return m.Statements + } + return nil +} + +func (m *SetCredentialsRequest) GetStaticUserConfig() *StaticUserConfig { + if m != nil { + return m.StaticUserConfig + } + return nil +} + +type SetCredentialsResponse struct { + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetCredentialsResponse) Reset() { *m = SetCredentialsResponse{} } +func (m *SetCredentialsResponse) String() string { return proto.CompactTextString(m) } +func (*SetCredentialsResponse) ProtoMessage() {} +func (*SetCredentialsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfa445f4444c6876, []int{16} +} + +func (m *SetCredentialsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetCredentialsResponse.Unmarshal(m, b) +} +func (m *SetCredentialsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetCredentialsResponse.Marshal(b, m, deterministic) +} +func (m *SetCredentialsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetCredentialsResponse.Merge(m, src) +} +func (m *SetCredentialsResponse) XXX_Size() int { + return xxx_messageInfo_SetCredentialsResponse.Size(m) +} +func (m *SetCredentialsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetCredentialsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetCredentialsResponse proto.InternalMessageInfo + +func (m *SetCredentialsResponse) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *SetCredentialsResponse) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + func init() { proto.RegisterType((*InitializeRequest)(nil), "dbplugin.InitializeRequest") proto.RegisterType((*InitRequest)(nil), "dbplugin.InitRequest") @@ -675,6 +871,10 @@ func init() { proto.RegisterType((*TypeResponse)(nil), "dbplugin.TypeResponse") proto.RegisterType((*RotateRootCredentialsResponse)(nil), "dbplugin.RotateRootCredentialsResponse") proto.RegisterType((*Empty)(nil), "dbplugin.Empty") + proto.RegisterType((*GenerateCredentialsResponse)(nil), "dbplugin.GenerateCredentialsResponse") + proto.RegisterType((*StaticUserConfig)(nil), "dbplugin.StaticUserConfig") + proto.RegisterType((*SetCredentialsRequest)(nil), "dbplugin.SetCredentialsRequest") + proto.RegisterType((*SetCredentialsResponse)(nil), "dbplugin.SetCredentialsResponse") } func init() { @@ -682,52 +882,60 @@ func init() { } var fileDescriptor_cfa445f4444c6876 = []byte{ - // 716 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xd1, 0x4e, 0xdb, 0x4a, - 0x10, 0x95, 0x93, 0x00, 0xc9, 0x80, 0x80, 0xec, 0x05, 0x64, 0xf9, 0x72, 0xef, 0x45, 0xd6, 0x15, - 0xa5, 0xaa, 0x6a, 0x57, 0xd0, 0x8a, 0x8a, 0x87, 0x56, 0x25, 0x54, 0x55, 0xa5, 0x8a, 0x87, 0x05, - 0x5e, 0xaa, 0x4a, 0x68, 0xe3, 0x2c, 0x89, 0x85, 0xe3, 0x75, 0xbd, 0xeb, 0xd0, 0xf4, 0x07, 0xda, - 0xcf, 0xe8, 0xe7, 0xf4, 0xb1, 0x9f, 0x54, 0x79, 0xe3, 0xf5, 0x6e, 0xe2, 0x50, 0x1e, 0x68, 0xdf, - 0x3c, 0x3b, 0x73, 0x66, 0xce, 0x1c, 0xcf, 0xce, 0xc2, 0xff, 0xbc, 0x77, 0xed, 0xf7, 0x88, 0x20, - 0x5d, 0xc2, 0xa9, 0xdf, 0xeb, 0x26, 0x51, 0xd6, 0x0f, 0xe3, 0xf2, 0xc4, 0x4b, 0x52, 0x26, 0x18, - 0x6a, 0x2a, 0x87, 0xf3, 0x5f, 0x9f, 0xb1, 0x7e, 0x44, 0x7d, 0x79, 0xde, 0xcd, 0xae, 0x7c, 0x11, - 0x0e, 0x29, 0x17, 0x64, 0x98, 0x4c, 0x42, 0xdd, 0x0f, 0xd0, 0x7e, 0x1b, 0x87, 0x22, 0x24, 0x51, - 0xf8, 0x99, 0x62, 0xfa, 0x31, 0xa3, 0x5c, 0xa0, 0x2d, 0x58, 0x0c, 0x58, 0x7c, 0x15, 0xf6, 0x6d, - 0x6b, 0xc7, 0xda, 0x5b, 0xc1, 0x85, 0x85, 0x1e, 0x41, 0x7b, 0x44, 0xd3, 0xf0, 0x6a, 0x7c, 0x19, - 0xb0, 0x38, 0xa6, 0x81, 0x08, 0x59, 0x6c, 0xd7, 0x76, 0xac, 0xbd, 0x26, 0x5e, 0x9f, 0x38, 0x3a, - 0xe5, 0xf9, 0x51, 0xcd, 0xb6, 0x5c, 0x0c, 0xcb, 0x79, 0xf6, 0xdf, 0x99, 0xd7, 0xfd, 0x6e, 0x41, - 0xbb, 0x93, 0x52, 0x22, 0xe8, 0x05, 0xa7, 0xa9, 0x4a, 0xfd, 0x14, 0x80, 0x0b, 0x22, 0xe8, 0x90, - 0xc6, 0x82, 0xcb, 0xf4, 0xcb, 0xfb, 0x1b, 0x9e, 0xd2, 0xc1, 0x3b, 0x2b, 0x7d, 0xd8, 0x88, 0x43, - 0xaf, 0x60, 0x2d, 0xe3, 0x34, 0x8d, 0xc9, 0x90, 0x5e, 0x16, 0xcc, 0x6a, 0x12, 0x6a, 0x6b, 0xe8, - 0x45, 0x11, 0xd0, 0x91, 0x7e, 0xbc, 0x9a, 0x4d, 0xd9, 0xe8, 0x08, 0x80, 0x7e, 0x4a, 0xc2, 0x94, - 0x48, 0xd2, 0x75, 0x89, 0x76, 0xbc, 0x89, 0xec, 0x9e, 0x92, 0xdd, 0x3b, 0x57, 0xb2, 0x63, 0x23, - 0xda, 0xfd, 0x66, 0xc1, 0x3a, 0xa6, 0x31, 0xbd, 0xb9, 0x7f, 0x27, 0x0e, 0x34, 0x15, 0x31, 0xd9, - 0x42, 0x0b, 0x97, 0xf6, 0xbd, 0x28, 0x52, 0x68, 0x63, 0x3a, 0x62, 0xd7, 0xf4, 0x8f, 0x52, 0x74, - 0x5f, 0xc0, 0x36, 0x66, 0x79, 0x28, 0x66, 0x4c, 0x74, 0x52, 0xda, 0xa3, 0x71, 0x3e, 0x93, 0x5c, - 0x55, 0xfc, 0x77, 0xa6, 0x62, 0x7d, 0xaf, 0x65, 0xe6, 0x76, 0x7f, 0xd4, 0x00, 0x74, 0x59, 0x74, - 0x00, 0x7f, 0x05, 0xf9, 0x88, 0x84, 0x2c, 0xbe, 0x9c, 0x61, 0xda, 0x3a, 0xae, 0xd9, 0x16, 0x46, - 0xca, 0x6d, 0x80, 0x0e, 0x61, 0x33, 0xa5, 0x23, 0x16, 0x54, 0x60, 0xb5, 0x12, 0xb6, 0xa1, 0x03, - 0xa6, 0xab, 0xa5, 0x2c, 0x8a, 0xba, 0x24, 0xb8, 0x36, 0x61, 0x75, 0x5d, 0x4d, 0xb9, 0x0d, 0xd0, - 0x63, 0x58, 0x4f, 0xf3, 0x5f, 0x6f, 0x22, 0x1a, 0x25, 0x62, 0x4d, 0xfa, 0xce, 0xa6, 0xc4, 0x53, - 0x94, 0xed, 0x05, 0xd9, 0x7e, 0x69, 0xe7, 0xe2, 0x68, 0x5e, 0xf6, 0xe2, 0x44, 0x1c, 0x7d, 0x92, - 0x63, 0x15, 0x01, 0x7b, 0x69, 0x82, 0x55, 0x36, 0xb2, 0x61, 0x49, 0x96, 0x22, 0x91, 0xdd, 0x94, - 0x2e, 0x65, 0xba, 0xa7, 0xb0, 0x3a, 0x3d, 0xfa, 0x68, 0x07, 0x96, 0x4f, 0x42, 0x9e, 0x44, 0x64, - 0x7c, 0x9a, 0xff, 0x43, 0xa9, 0x26, 0x36, 0x8f, 0xf2, 0x4a, 0x98, 0x45, 0xf4, 0xd4, 0xf8, 0xc5, - 0xca, 0x76, 0x77, 0x61, 0x65, 0xb2, 0x0b, 0x78, 0xc2, 0x62, 0x4e, 0x6f, 0x5b, 0x06, 0xee, 0x3b, - 0x40, 0xe6, 0xf5, 0x2e, 0xa2, 0xcd, 0xe1, 0xb1, 0x66, 0xe6, 0xdb, 0x81, 0x66, 0x42, 0x38, 0xbf, - 0x61, 0x69, 0x4f, 0x55, 0x55, 0xb6, 0xeb, 0xc2, 0xca, 0xf9, 0x38, 0xa1, 0x65, 0x1e, 0x04, 0x0d, - 0x31, 0x4e, 0x54, 0x0e, 0xf9, 0xed, 0x1e, 0xc2, 0x3f, 0xb7, 0x0c, 0xdf, 0x1d, 0x54, 0x97, 0x60, - 0xe1, 0xf5, 0x30, 0x11, 0xe3, 0xfd, 0x2f, 0x0d, 0x68, 0x9e, 0x14, 0x3b, 0x18, 0xf9, 0xd0, 0xc8, - 0x4b, 0xa2, 0x35, 0x7d, 0x23, 0x64, 0x94, 0xb3, 0xa5, 0x0f, 0xa6, 0x38, 0xbd, 0x01, 0xd0, 0x1d, - 0xa3, 0xbf, 0x75, 0x54, 0x65, 0xcd, 0x39, 0xdb, 0xf3, 0x9d, 0x45, 0xa2, 0xe7, 0xd0, 0x2a, 0xd7, - 0x09, 0x72, 0x74, 0xe8, 0xec, 0x8e, 0x71, 0x66, 0xa9, 0xe5, 0x2b, 0x42, 0x5f, 0x73, 0x93, 0x42, - 0xe5, 0xf2, 0x57, 0xb1, 0x03, 0xd8, 0x9c, 0x2b, 0x1f, 0xda, 0x35, 0xd2, 0xfc, 0xe2, 0x72, 0x3b, - 0x0f, 0xee, 0x8c, 0x2b, 0xfa, 0x7b, 0x06, 0x8d, 0x7c, 0x84, 0xd0, 0xa6, 0x06, 0x18, 0xcf, 0x8b, - 0xa9, 0xef, 0xd4, 0xa4, 0x3d, 0x84, 0x85, 0x4e, 0xc4, 0xf8, 0x9c, 0x3f, 0x52, 0xe9, 0xe5, 0x25, - 0x80, 0x7e, 0x0e, 0x4d, 0x1d, 0x2a, 0x8f, 0x64, 0x05, 0xeb, 0xd6, 0xbf, 0xd6, 0xac, 0xe3, 0xfd, - 0xf7, 0x4f, 0xfa, 0xa1, 0x18, 0x64, 0x5d, 0x2f, 0x60, 0x43, 0x7f, 0x40, 0xf8, 0x20, 0x0c, 0x58, - 0x9a, 0xf8, 0x23, 0x92, 0x45, 0xc2, 0x9f, 0xfb, 0x7a, 0x77, 0x17, 0xe5, 0x0e, 0x3e, 0xf8, 0x19, - 0x00, 0x00, 0xff, 0xff, 0xdb, 0x96, 0x8b, 0x5c, 0xdd, 0x07, 0x00, 0x00, + // 839 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x14, 0x96, 0xf3, 0xb3, 0x9b, 0x9c, 0x5d, 0xed, 0x26, 0xd3, 0x66, 0x65, 0xb9, 0x85, 0x46, 0x23, + 0x28, 0x8b, 0x10, 0x31, 0xda, 0x82, 0x0a, 0xbd, 0x00, 0xd1, 0x14, 0x15, 0x24, 0x58, 0xa1, 0x49, + 0x7b, 0x83, 0x90, 0xa2, 0x89, 0x33, 0x9b, 0x58, 0xeb, 0x78, 0x8c, 0x67, 0x92, 0x12, 0x9e, 0x80, + 0x37, 0xe0, 0x96, 0x7b, 0x5e, 0x84, 0x87, 0xe1, 0x21, 0x90, 0xc7, 0x1e, 0x7b, 0xfc, 0xb3, 0xad, + 0xd4, 0x85, 0x3b, 0x9f, 0x39, 0xe7, 0x3b, 0xf3, 0x9d, 0x5f, 0x0f, 0xbc, 0x27, 0x96, 0xd7, 0xee, + 0x92, 0x4a, 0xba, 0xa0, 0x82, 0xb9, 0xcb, 0x45, 0x14, 0x6c, 0x57, 0x7e, 0x98, 0x9f, 0x4c, 0xa2, + 0x98, 0x4b, 0x8e, 0x7a, 0x5a, 0xe1, 0x3c, 0x58, 0x71, 0xbe, 0x0a, 0x98, 0xab, 0xce, 0x17, 0xdb, + 0x2b, 0x57, 0xfa, 0x1b, 0x26, 0x24, 0xdd, 0x44, 0xa9, 0x29, 0xfe, 0x19, 0x86, 0xdf, 0x85, 0xbe, + 0xf4, 0x69, 0xe0, 0xff, 0xc6, 0x08, 0xfb, 0x65, 0xcb, 0x84, 0x44, 0x67, 0x70, 0xe0, 0xf1, 0xf0, + 0xca, 0x5f, 0xd9, 0xd6, 0xd8, 0x3a, 0x3f, 0x26, 0x99, 0x84, 0x3e, 0x82, 0xe1, 0x8e, 0xc5, 0xfe, + 0xd5, 0x7e, 0xee, 0xf1, 0x30, 0x64, 0x9e, 0xf4, 0x79, 0x68, 0xb7, 0xc6, 0xd6, 0x79, 0x8f, 0x0c, + 0x52, 0xc5, 0x34, 0x3f, 0x7f, 0xd2, 0xb2, 0x2d, 0x4c, 0xe0, 0x28, 0xf1, 0xfe, 0x5f, 0xfa, 0xc5, + 0x7f, 0x5b, 0x30, 0x9c, 0xc6, 0x8c, 0x4a, 0xf6, 0x52, 0xb0, 0x58, 0xbb, 0xfe, 0x14, 0x40, 0x48, + 0x2a, 0xd9, 0x86, 0x85, 0x52, 0x28, 0xf7, 0x47, 0x17, 0x77, 0x27, 0x3a, 0x0f, 0x93, 0x59, 0xae, + 0x23, 0x86, 0x1d, 0xfa, 0x1a, 0x4e, 0xb7, 0x82, 0xc5, 0x21, 0xdd, 0xb0, 0x79, 0xc6, 0xac, 0xa5, + 0xa0, 0x76, 0x01, 0x7d, 0x99, 0x19, 0x4c, 0x95, 0x9e, 0x9c, 0x6c, 0x4b, 0x32, 0x7a, 0x02, 0xc0, + 0x7e, 0x8d, 0xfc, 0x98, 0x2a, 0xd2, 0x6d, 0x85, 0x76, 0x26, 0x69, 0xda, 0x27, 0x3a, 0xed, 0x93, + 0x17, 0x3a, 0xed, 0xc4, 0xb0, 0xc6, 0x7f, 0x5a, 0x30, 0x20, 0x2c, 0x64, 0xaf, 0x6e, 0x1f, 0x89, + 0x03, 0x3d, 0x4d, 0x4c, 0x85, 0xd0, 0x27, 0xb9, 0x7c, 0x2b, 0x8a, 0x0c, 0x86, 0x84, 0xed, 0xf8, + 0x35, 0xfb, 0x5f, 0x29, 0xe2, 0x2f, 0xe1, 0x3e, 0xe1, 0x89, 0x29, 0xe1, 0x5c, 0x4e, 0x63, 0xb6, + 0x64, 0x61, 0xd2, 0x93, 0x42, 0xdf, 0xf8, 0x6e, 0xe5, 0xc6, 0xf6, 0x79, 0xdf, 0xf4, 0x8d, 0xff, + 0x69, 0x01, 0x14, 0xd7, 0xa2, 0x47, 0x70, 0xc7, 0x4b, 0x5a, 0xc4, 0xe7, 0xe1, 0xbc, 0xc2, 0xb4, + 0xff, 0xb4, 0x65, 0x5b, 0x04, 0x69, 0xb5, 0x01, 0x7a, 0x0c, 0xa3, 0x98, 0xed, 0xb8, 0x57, 0x83, + 0xb5, 0x72, 0xd8, 0xdd, 0xc2, 0xa0, 0x7c, 0x5b, 0xcc, 0x83, 0x60, 0x41, 0xbd, 0x6b, 0x13, 0xd6, + 0x2e, 0x6e, 0xd3, 0x6a, 0x03, 0xf4, 0x31, 0x0c, 0xe2, 0xa4, 0xf4, 0x26, 0xa2, 0x93, 0x23, 0x4e, + 0x95, 0x6e, 0x56, 0x4a, 0x9e, 0xa6, 0x6c, 0x77, 0x55, 0xf8, 0xb9, 0x9c, 0x24, 0xa7, 0xe0, 0x65, + 0x1f, 0xa4, 0xc9, 0x29, 0x4e, 0x12, 0xac, 0x26, 0x60, 0x1f, 0xa6, 0x58, 0x2d, 0x23, 0x1b, 0x0e, + 0xd5, 0x55, 0x34, 0xb0, 0x7b, 0x4a, 0xa5, 0xc5, 0x14, 0x25, 0x53, 0x9f, 0x7d, 0x8d, 0x4a, 0x65, + 0x7c, 0x09, 0x27, 0xe5, 0xb1, 0x40, 0x63, 0x38, 0x7a, 0xe6, 0x8b, 0x28, 0xa0, 0xfb, 0xcb, 0xa4, + 0xbe, 0x2a, 0xd3, 0xc4, 0x3c, 0x4a, 0xfc, 0x11, 0x1e, 0xb0, 0x4b, 0xa3, 0xfc, 0x5a, 0xc6, 0x0f, + 0xe1, 0x38, 0xdd, 0x13, 0x22, 0xe2, 0xa1, 0x60, 0x37, 0x2d, 0x0a, 0xfc, 0x3d, 0x20, 0x73, 0xf4, + 0x33, 0x6b, 0xb3, 0xb1, 0xac, 0x4a, 0xef, 0x3b, 0xd0, 0x8b, 0xa8, 0x10, 0xaf, 0x78, 0xbc, 0xd4, + 0xb7, 0x6a, 0x19, 0x63, 0x38, 0x7e, 0xb1, 0x8f, 0x58, 0xee, 0x07, 0x41, 0x47, 0xee, 0x23, 0xed, + 0x43, 0x7d, 0xe3, 0xc7, 0xf0, 0xce, 0x0d, 0x8d, 0xf9, 0x06, 0xaa, 0x87, 0xd0, 0xfd, 0x66, 0x13, + 0xc9, 0x3d, 0xfe, 0x02, 0xee, 0x3d, 0x67, 0x21, 0x8b, 0xa9, 0x64, 0x4d, 0x78, 0x93, 0xa0, 0x55, + 0x21, 0xb8, 0x80, 0x41, 0xd2, 0x02, 0xbe, 0x97, 0x84, 0x9b, 0x25, 0xfa, 0x2d, 0x83, 0x55, 0x3c, + 0x55, 0xea, 0x54, 0x5f, 0xf6, 0x48, 0x26, 0xe1, 0x3f, 0x2c, 0x18, 0xcd, 0x58, 0xd3, 0xcc, 0xbd, + 0xdd, 0x94, 0x7f, 0x0b, 0x48, 0x28, 0xce, 0xf3, 0x84, 0x56, 0x79, 0xab, 0x3a, 0x65, 0xb4, 0x19, + 0x17, 0x19, 0x88, 0xca, 0x09, 0xfe, 0x11, 0xce, 0xaa, 0xc4, 0x6e, 0x57, 0xf0, 0x8b, 0xbf, 0xba, + 0xd0, 0x7b, 0x96, 0xfd, 0x2a, 0x91, 0x0b, 0x9d, 0xa4, 0xfa, 0xe8, 0xb4, 0x20, 0xa5, 0x0a, 0xe6, + 0x9c, 0x15, 0x07, 0xa5, 0xf6, 0x78, 0x0e, 0x50, 0x34, 0x1f, 0xba, 0x57, 0x58, 0xd5, 0xfe, 0x46, + 0xce, 0xfd, 0x66, 0x65, 0xe6, 0xe8, 0x73, 0xe8, 0xe7, 0x5b, 0x1f, 0x19, 0x39, 0xa9, 0xfe, 0x0a, + 0x9c, 0x2a, 0xb5, 0x64, 0x93, 0x17, 0xdb, 0xd8, 0xa4, 0x50, 0xdb, 0xd1, 0x75, 0xec, 0x1a, 0x46, + 0x8d, 0x9d, 0x8c, 0x1e, 0x1a, 0x6e, 0x5e, 0xb3, 0x83, 0x9d, 0x0f, 0xde, 0x68, 0x97, 0xc5, 0xf7, + 0x19, 0x74, 0x92, 0x69, 0x46, 0xa3, 0x02, 0x60, 0xbc, 0x02, 0xcc, 0xfc, 0x96, 0x86, 0xfe, 0x43, + 0xe8, 0x4e, 0x03, 0x2e, 0x1a, 0x2a, 0x52, 0x8b, 0x65, 0x06, 0x27, 0xe5, 0xd6, 0x40, 0x0f, 0x8c, + 0xd6, 0x6a, 0xea, 0x66, 0x67, 0x7c, 0xb3, 0x41, 0x76, 0xff, 0x0f, 0x70, 0xa7, 0x61, 0x50, 0xeb, + 0x6c, 0xde, 0x2f, 0x0e, 0x5e, 0x37, 0xd8, 0x5f, 0x01, 0x14, 0x2f, 0x2b, 0xb3, 0x56, 0xb5, 0xf7, + 0x56, 0x2d, 0x3e, 0xdc, 0xfe, 0xbd, 0x65, 0x3d, 0xbd, 0xf8, 0xe9, 0x93, 0x95, 0x2f, 0xd7, 0xdb, + 0xc5, 0xc4, 0xe3, 0x1b, 0x77, 0x4d, 0xc5, 0xda, 0xf7, 0x78, 0x1c, 0xb9, 0x3b, 0xba, 0x0d, 0xa4, + 0xdb, 0xf8, 0x10, 0x5c, 0x1c, 0xa8, 0xdf, 0xf9, 0xa3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, + 0xf5, 0x87, 0x73, 0x28, 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -749,6 +957,8 @@ type DatabaseClient interface { RotateRootCredentials(ctx context.Context, in *RotateRootCredentialsRequest, opts ...grpc.CallOption) (*RotateRootCredentialsResponse, error) Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*InitResponse, error) Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + SetCredentials(ctx context.Context, in *SetCredentialsRequest, opts ...grpc.CallOption) (*SetCredentialsResponse, error) + GenerateCredentials(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*GenerateCredentialsResponse, error) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*Empty, error) } @@ -823,6 +1033,24 @@ func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.Call return out, nil } +func (c *databaseClient) SetCredentials(ctx context.Context, in *SetCredentialsRequest, opts ...grpc.CallOption) (*SetCredentialsResponse, error) { + out := new(SetCredentialsResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/SetCredentials", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) GenerateCredentials(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*GenerateCredentialsResponse, error) { + out := new(GenerateCredentialsResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/GenerateCredentials", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Deprecated: Do not use. func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) @@ -842,6 +1070,8 @@ type DatabaseServer interface { RotateRootCredentials(context.Context, *RotateRootCredentialsRequest) (*RotateRootCredentialsResponse, error) Init(context.Context, *InitRequest) (*InitResponse, error) Close(context.Context, *Empty) (*Empty, error) + SetCredentials(context.Context, *SetCredentialsRequest) (*SetCredentialsResponse, error) + GenerateCredentials(context.Context, *Empty) (*GenerateCredentialsResponse, error) Initialize(context.Context, *InitializeRequest) (*Empty, error) } @@ -870,6 +1100,12 @@ func (*UnimplementedDatabaseServer) Init(ctx context.Context, req *InitRequest) func (*UnimplementedDatabaseServer) Close(ctx context.Context, req *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Close not implemented") } +func (*UnimplementedDatabaseServer) SetCredentials(ctx context.Context, req *SetCredentialsRequest) (*SetCredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetCredentials not implemented") +} +func (*UnimplementedDatabaseServer) GenerateCredentials(ctx context.Context, req *Empty) (*GenerateCredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateCredentials not implemented") +} func (*UnimplementedDatabaseServer) Initialize(ctx context.Context, req *InitializeRequest) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented") } @@ -1004,6 +1240,42 @@ func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(inte return interceptor(ctx, in, info, handler) } +func _Database_SetCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetCredentialsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).SetCredentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/SetCredentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).SetCredentials(ctx, req.(*SetCredentialsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_GenerateCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).GenerateCredentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/GenerateCredentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).GenerateCredentials(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + func _Database_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(InitializeRequest) if err := dec(in); err != nil { @@ -1054,6 +1326,14 @@ var _Database_serviceDesc = grpc.ServiceDesc{ MethodName: "Close", Handler: _Database_Close_Handler, }, + { + MethodName: "SetCredentials", + Handler: _Database_SetCredentials_Handler, + }, + { + MethodName: "GenerateCredentials", + Handler: _Database_GenerateCredentials_Handler, + }, { MethodName: "Initialize", Handler: _Database_Initialize_Handler, diff --git a/sdk/database/dbplugin/database.proto b/sdk/database/dbplugin/database.proto index c3f4831b0c32..d8c208099b36 100644 --- a/sdk/database/dbplugin/database.proto +++ b/sdk/database/dbplugin/database.proto @@ -18,7 +18,7 @@ message InitRequest { } message CreateUserRequest { - Statements statements = 1; + Statements statements = 1; UsernameConfig username_config = 2; google.protobuf.Timestamp expiration = 3; } @@ -44,14 +44,15 @@ message Statements { // DEPRECATED, will be removed in 0.12 string revocation_statements = 2 [deprecated=true]; // DEPRECATED, will be removed in 0.12 - string rollback_statements = 3 [deprecated=true]; + string rollback_statements = 3 [deprecated=true]; // DEPRECATED, will be removed in 0.12 string renew_statements = 4 [deprecated=true]; repeated string creation = 5; repeated string revocation = 6; - repeated string rollback = 7; + repeated string rollback = 7; repeated string renewal = 8; + repeated string rotation = 9; } message UsernameConfig { @@ -78,6 +79,26 @@ message RotateRootCredentialsResponse { message Empty {} +message GenerateCredentialsResponse { + string password = 1; +} + +message StaticUserConfig{ + string username = 1; + string password = 2; + bool create = 3; +} + +message SetCredentialsRequest { + Statements statements = 1; + StaticUserConfig static_user_config = 2; +} + +message SetCredentialsResponse { + string username = 1; + string password = 2; +} + service Database { rpc Type(Empty) returns (TypeResponse); rpc CreateUser(CreateUserRequest) returns (CreateUserResponse); @@ -86,6 +107,8 @@ service Database { rpc RotateRootCredentials(RotateRootCredentialsRequest) returns (RotateRootCredentialsResponse); rpc Init(InitRequest) returns (InitResponse); rpc Close(Empty) returns (Empty); + rpc SetCredentials(SetCredentialsRequest) returns (SetCredentialsResponse); + rpc GenerateCredentials(Empty) returns (GenerateCredentialsResponse); rpc Initialize(InitializeRequest) returns (Empty) { option deprecated = true; diff --git a/sdk/database/dbplugin/databasemiddleware.go b/sdk/database/dbplugin/databasemiddleware.go index ba2dd4e5c4a0..19cfa3374b62 100644 --- a/sdk/database/dbplugin/databasemiddleware.go +++ b/sdk/database/dbplugin/databasemiddleware.go @@ -86,6 +86,24 @@ func (mw *databaseTracingMiddleware) Close() (err error) { return mw.next.Close() } +func (mw *databaseTracingMiddleware) GenerateCredentials(ctx context.Context) (password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("generate credentials", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("generate credentials", "status", "started") + return mw.next.GenerateCredentials(ctx) +} + +func (mw *databaseTracingMiddleware) SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username, password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("set credentials", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("set credentials", "status", "started") + return mw.next.SetCredentials(ctx, statements, staticConfig) +} + // ---- Metrics Middleware Domain ---- // databaseMetricsMiddleware wraps an implementation of Databases and on @@ -201,6 +219,38 @@ func (mw *databaseMetricsMiddleware) Close() (err error) { return mw.next.Close() } +func (mw *databaseMetricsMiddleware) GenerateCredentials(ctx context.Context) (password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "GenerateCredentials"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "GenerateCredentials"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "GenerateCredentials", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "GenerateCredentials", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "GenerateCredentials"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "GenerateCredentials"}, 1) + return mw.next.GenerateCredentials(ctx) +} + +func (mw *databaseMetricsMiddleware) SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username, password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "SetCredentials"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "SetCredentials"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "SetCredentials", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "SetCredentials", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "SetCredentials"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "SetCredentials"}, 1) + return mw.next.SetCredentials(ctx, statements, staticConfig) +} + // ---- Error Sanitizer Middleware Domain ---- // DatabaseErrorSanitizerMiddleware wraps an implementation of Databases and @@ -273,3 +323,13 @@ func (mw *DatabaseErrorSanitizerMiddleware) sanitize(err error) error { } return err } + +func (mw *DatabaseErrorSanitizerMiddleware) GenerateCredentials(ctx context.Context) (password string, err error) { + password, err = mw.next.GenerateCredentials(ctx) + return password, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username, password string, err error) { + username, password, err = mw.next.SetCredentials(ctx, statements, staticConfig) + return username, password, mw.sanitize(err) +} diff --git a/sdk/database/dbplugin/grpc_transport.go b/sdk/database/dbplugin/grpc_transport.go index 1b3fe7f47383..bfd848021c2e 100644 --- a/sdk/database/dbplugin/grpc_transport.go +++ b/sdk/database/dbplugin/grpc_transport.go @@ -15,7 +15,8 @@ import ( ) var ( - ErrPluginShutdown = errors.New("plugin shutdown") + ErrPluginShutdown = errors.New("plugin shutdown") + ErrPluginStaticUnsupported = errors.New("database plugin does not support Static Accounts") ) // ---- gRPC Server domain ---- @@ -115,6 +116,30 @@ func (s *gRPCServer) Close(_ context.Context, _ *Empty) (*Empty, error) { return &Empty{}, nil } +func (s *gRPCServer) GenerateCredentials(ctx context.Context, _ *Empty) (*GenerateCredentialsResponse, error) { + p, err := s.impl.GenerateCredentials(ctx) + if err != nil { + return nil, err + } + + return &GenerateCredentialsResponse{ + Password: p, + }, nil +} + +func (s *gRPCServer) SetCredentials(ctx context.Context, req *SetCredentialsRequest) (*SetCredentialsResponse, error) { + + username, password, err := s.impl.SetCredentials(ctx, *req.Statements, *req.StaticUserConfig) + if err != nil { + return nil, err + } + + return &SetCredentialsResponse{ + Username: username, + Password: password, + }, err +} + // ---- gRPC client domain ---- type gRPCClient struct { @@ -283,3 +308,51 @@ func (c *gRPCClient) Close() error { _, err := c.client.Close(c.doneCtx, &Empty{}) return err } + +func (c *gRPCClient) GenerateCredentials(ctx context.Context) (string, error) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.GenerateCredentials(ctx, &Empty{}) + if err != nil { + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + return "", ErrPluginStaticUnsupported + } + + if c.doneCtx.Err() != nil { + return "", ErrPluginShutdown + } + return "", err + } + + return resp.Password, nil +} +func (c *gRPCClient) SetCredentials(ctx context.Context, statements Statements, staticUser StaticUserConfig) (username, password string, err error) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.SetCredentials(ctx, &SetCredentialsRequest{ + StaticUserConfig: &staticUser, + Statements: &statements, + }) + + if err != nil { + // Fall back to old call if not implemented + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + return "", "", ErrPluginStaticUnsupported + } + + if c.doneCtx.Err() != nil { + return "", "", ErrPluginShutdown + } + return "", "", err + } + + return resp.Username, resp.Password, err +} diff --git a/sdk/database/dbplugin/plugin.go b/sdk/database/dbplugin/plugin.go index 6d248d15af00..957cf3f489ca 100644 --- a/sdk/database/dbplugin/plugin.go +++ b/sdk/database/dbplugin/plugin.go @@ -44,6 +44,19 @@ type Database interface { // the API. RotateRootCredentials(ctx context.Context, statements []string) (config map[string]interface{}, err error) + // GenerateCredentials returns a generated password for the plugin. This is + // used in combination with SetCredentials to set a specific password for a + // database user and preserve the password in WAL entries. + GenerateCredentials(ctx context.Context) (string, error) + + // SetCredentials uses provided information to create or set the credentials + // for a database user. Unlike CreateUser, this method requires both a + // username and a password given instead of generating them. This is used for + // creating and setting the password of static accounts, as well as rolling + // back passwords in the database in the event an updated database fails to + // save in Vault's storage. + SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username string, password string, err error) + // Init is called on `$ vault write database/config/:db-name`, or when you // do a creds call after Vault's been restarted. The config provided won't // hold all the keys and values provided in the API call, some will be diff --git a/sdk/database/helper/connutil/sql.go b/sdk/database/helper/connutil/sql.go index 44917e0d5667..05de1b4ffa4e 100644 --- a/sdk/database/helper/connutil/sql.go +++ b/sdk/database/helper/connutil/sql.go @@ -9,6 +9,7 @@ import ( "time" "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/hashicorp/vault/sdk/helper/parseutil" "github.com/mitchellh/mapstructure" @@ -162,3 +163,13 @@ func (c *SQLConnectionProducer) Close() error { return nil } + +// SetCredentials uses provided information to set/create a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for creating +// and setting the password of static accounts, as well as rolling back +// passwords in the database in the event an updated database fails to save in +// Vault's storage. +func (c *SQLConnectionProducer) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + return "", "", dbutil.Unimplemented() +} diff --git a/sdk/database/helper/dbutil/dbutil.go b/sdk/database/helper/dbutil/dbutil.go index 725338112d18..84b98d188967 100644 --- a/sdk/database/helper/dbutil/dbutil.go +++ b/sdk/database/helper/dbutil/dbutil.go @@ -6,10 +6,13 @@ import ( "strings" "github.com/hashicorp/vault/sdk/database/dbplugin" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) var ( ErrEmptyCreationStatement = errors.New("empty creation statements") + ErrEmptyRotationStatement = errors.New("empty rotation statements") ) // Query templates a query for us. @@ -50,3 +53,8 @@ func StatementCompatibilityHelper(statements dbplugin.Statements) dbplugin.State } return statements } + +// Unimplemented returns a gRPC error with the Unimplemented code +func Unimplemented() error { + return status.Error(codes.Unimplemented, "Not yet implemented") +} diff --git a/sdk/framework/backend.go b/sdk/framework/backend.go index 1482826d2b3f..721d23f3e776 100644 --- a/sdk/framework/backend.go +++ b/sdk/framework/backend.go @@ -2,7 +2,6 @@ package framework import ( "context" - "encoding/json" "fmt" "io/ioutil" "net/http" @@ -418,16 +417,27 @@ func (b *Backend) handleRevokeRenew(ctx context.Context, req *logical.Request) ( } } -// handleRollback invokes the PeriodicFunc set on the backend. It also does a WAL rollback operation. +// handleRollback invokes the PeriodicFunc set on the backend. It also does a +// WAL rollback operation. func (b *Backend) handleRollback(ctx context.Context, req *logical.Request) (*logical.Response, error) { // Response is not expected from the periodic operation. + var resp *logical.Response + + merr := new(multierror.Error) if b.PeriodicFunc != nil { if err := b.PeriodicFunc(ctx, req); err != nil { - return nil, err + merr = multierror.Append(merr, err) } } - return b.handleWALRollback(ctx, req) + if b.WALRollback != nil { + var err error + resp, err = b.handleWALRollback(ctx, req) + if err != nil { + merr = multierror.Append(merr, err) + } + } + return resp, merr.ErrorOrNil() } func (b *Backend) handleAuthRenew(ctx context.Context, req *logical.Request) (*logical.Response, error) { @@ -519,19 +529,9 @@ type FieldSchema struct { // dynamic UI generation. AllowedValues []interface{} - // Display* members are available to provide hints for UI and documentation - // generators. They will be included in OpenAPI output if set. - - // DisplayName is the name of the field suitable as a label or documentation heading. - DisplayName string - - // DisplayValue is a sample value to display for this field. This may be used - // to indicate a default value, but it is for display only and completely separate - // from any Default member handling. - DisplayValue interface{} - - // DisplaySensitive indicates that the value should be masked by default in the UI. - DisplaySensitive bool + // DisplayAttrs provides hints for UI and documentation generators. They + // will be included in OpenAPI output if set. + DisplayAttrs *DisplayAttributes } // DefaultOrZero returns the default value if it is set, or otherwise @@ -539,35 +539,12 @@ type FieldSchema struct { func (s *FieldSchema) DefaultOrZero() interface{} { if s.Default != nil { switch s.Type { - case TypeDurationSecond: - var result int - switch inp := s.Default.(type) { - case nil: - return s.Type.Zero() - case int: - result = inp - case int64: - result = int(inp) - case float32: - result = int(inp) - case float64: - result = int(inp) - case string: - dur, err := parseutil.ParseDurationSecond(inp) - if err != nil { - return s.Type.Zero() - } - result = int(dur.Seconds()) - case json.Number: - valInt64, err := inp.Int64() - if err != nil { - return s.Type.Zero() - } - result = int(valInt64) - default: + case TypeDurationSecond, TypeSignedDurationSecond: + resultDur, err := parseutil.ParseDurationSecond(s.Default) + if err != nil { return s.Type.Zero() } - return result + return int(resultDur.Seconds()) default: return s.Default @@ -590,7 +567,7 @@ func (t FieldType) Zero() interface{} { return map[string]interface{}{} case TypeKVPairs: return map[string]string{} - case TypeDurationSecond: + case TypeDurationSecond, TypeSignedDurationSecond: return 0 case TypeSlice: return []interface{}{} diff --git a/sdk/framework/backend_test.go b/sdk/framework/backend_test.go index f32e61cc66b4..24d2a60d3440 100644 --- a/sdk/framework/backend_test.go +++ b/sdk/framework/backend_test.go @@ -564,11 +564,70 @@ func TestFieldSchemaDefaultOrZero(t *testing.T) { 60, }, + "illegal default duration string": { + &FieldSchema{Type: TypeDurationSecond, Default: "h1"}, + 0, + }, + + "default duration time.Duration": { + &FieldSchema{Type: TypeDurationSecond, Default: 60 * time.Second}, + 60, + }, + "default duration not set": { &FieldSchema{Type: TypeDurationSecond}, 0, }, + "default signed positive duration set": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: 60}, + 60, + }, + + "default signed positive duration int64": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: int64(60)}, + 60, + }, + + "default signed positive duration string": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: "60s"}, + 60, + }, + + "illegal default signed duration string": { + &FieldSchema{Type: TypeDurationSecond, Default: "-h1"}, + 0, + }, + + "default signed positive duration time.Duration": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: 60 * time.Second}, + 60, + }, + + "default signed negative duration set": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: -60}, + -60, + }, + + "default signed negative duration int64": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: int64(-60)}, + -60, + }, + + "default signed negative duration string": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: "-60s"}, + -60, + }, + + "default signed negative duration time.Duration": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: -60 * time.Second}, + -60, + }, + + "default signed negative duration not set": { + &FieldSchema{Type: TypeSignedDurationSecond}, + 0, + }, "default header not set": { &FieldSchema{Type: TypeHeader}, http.Header{}, @@ -578,7 +637,7 @@ func TestFieldSchemaDefaultOrZero(t *testing.T) { for name, tc := range cases { actual := tc.Schema.DefaultOrZero() if !reflect.DeepEqual(actual, tc.Value) { - t.Fatalf("bad: %s\n\nExpected: %#v\nGot: %#v", + t.Errorf("bad: %s\n\nExpected: %#v\nGot: %#v", name, tc.Value, actual) } } diff --git a/sdk/framework/field_data.go b/sdk/framework/field_data.go index f55c5334fd94..46b120c43251 100644 --- a/sdk/framework/field_data.go +++ b/sdk/framework/field_data.go @@ -38,8 +38,8 @@ func (d *FieldData) Validate() error { } switch schema.Type { - case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString, TypeLowerCaseString, - TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice, + case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeSignedDurationSecond, TypeString, + TypeLowerCaseString, TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice, TypeKVPairs, TypeCommaIntSlice, TypeHeader: _, _, err := d.getPrimitive(field, schema) if err != nil { @@ -131,8 +131,8 @@ func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) { } switch schema.Type { - case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString, TypeLowerCaseString, - TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice, + case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeSignedDurationSecond, TypeString, + TypeLowerCaseString, TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice, TypeKVPairs, TypeCommaIntSlice, TypeHeader: return d.getPrimitive(k, schema) default: @@ -147,7 +147,7 @@ func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bo return nil, false, nil } - switch schema.Type { + switch t := schema.Type; t { case TypeBool: var result bool if err := mapstructure.WeakDecode(raw, &result); err != nil { @@ -197,43 +197,19 @@ func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bo } return result, true, nil - case TypeDurationSecond: + case TypeDurationSecond, TypeSignedDurationSecond: var result int switch inp := raw.(type) { case nil: return nil, false, nil - case int: - result = inp - case int32: - result = int(inp) - case int64: - result = int(inp) - case uint: - result = int(inp) - case uint32: - result = int(inp) - case uint64: - result = int(inp) - case float32: - result = int(inp) - case float64: - result = int(inp) - case string: + default: dur, err := parseutil.ParseDurationSecond(inp) if err != nil { return nil, false, err } result = int(dur.Seconds()) - case json.Number: - valInt64, err := inp.Int64() - if err != nil { - return nil, false, err - } - result = int(valInt64) - default: - return nil, false, fmt.Errorf("invalid input '%v'", raw) } - if result < 0 { + if t == TypeDurationSecond && result < 0 { return nil, false, fmt.Errorf("cannot provide negative value '%d'", result) } return result, true, nil diff --git a/sdk/framework/field_data_test.go b/sdk/framework/field_data_test.go index 458d2a42f4a2..b1ef7aa266a2 100644 --- a/sdk/framework/field_data_test.go +++ b/sdk/framework/field_data_test.go @@ -202,6 +202,127 @@ func TestFieldDataGet(t *testing.T) { 0, }, + "duration type, 0 value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": 0, + }, + "foo", + 0, + }, + + "signed duration type, positive string value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": "42", + }, + "foo", + 42, + }, + + "signed duration type, positive string duration value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": "42m", + }, + "foo", + 2520, + }, + + "signed duration type, positive int value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": 42, + }, + "foo", + 42, + }, + + "signed duration type, positive float value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": 42.0, + }, + "foo", + 42, + }, + + "signed duration type, negative string value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": "-42", + }, + "foo", + -42, + }, + + "signed duration type, negative string duration value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": "-42m", + }, + "foo", + -2520, + }, + + "signed duration type, negative int value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": -42, + }, + "foo", + -42, + }, + + "signed duration type, negative float value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": -42.0, + }, + "foo", + -42, + }, + + "signed duration type, nil value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": nil, + }, + "foo", + 0, + }, + + "signed duration type, 0 value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": 0, + }, + "foo", + 0, + }, + "slice type, empty slice": { map[string]*FieldSchema{ "foo": &FieldSchema{Type: TypeSlice}, @@ -628,6 +749,15 @@ func TestFieldDataGet(t *testing.T) { 0, }, + "type signed duration second, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{}, + "foo", + 0, + }, + "type slice, not supplied": { map[string]*FieldSchema{ "foo": {Type: TypeSlice}, @@ -675,21 +805,23 @@ func TestFieldDataGet(t *testing.T) { } for name, tc := range cases { - data := &FieldData{ - Raw: tc.Raw, - Schema: tc.Schema, - } - - if err := data.Validate(); err != nil { - t.Fatalf("bad: %s", err) - } - - actual := data.Get(tc.Key) - if !reflect.DeepEqual(actual, tc.Value) { - t.Fatalf( - "bad: %s\n\nExpected: %#v\nGot: %#v", - name, tc.Value, actual) - } + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + data := &FieldData{ + Raw: tc.Raw, + Schema: tc.Schema, + } + + if err := data.Validate(); err != nil { + t.Fatalf("bad: %s", err) + } + + actual := data.Get(tc.Key) + if !reflect.DeepEqual(actual, tc.Value) { + t.Fatalf("Expected: %#v\nGot: %#v", tc.Value, actual) + } + }) } } @@ -744,18 +876,58 @@ func TestFieldDataGet_Error(t *testing.T) { }, "foo", }, + "duration type, negative string value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": "-42", + }, + "foo", + }, + "duration type, negative string duration value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": "-42m", + }, + "foo", + }, + "duration type, negative int value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": -42, + }, + "foo", + }, + "duration type, negative float value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": -42.0, + }, + "foo", + }, } - for _, tc := range cases { - data := &FieldData{ - Raw: tc.Raw, - Schema: tc.Schema, - } - - _, _, err := data.GetOkErr(tc.Key) - if err == nil { - t.Fatalf("error expected, none received") - } + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + data := &FieldData{ + Raw: tc.Raw, + Schema: tc.Schema, + } + + got, _, err := data.GetOkErr(tc.Key) + if err == nil { + t.Fatalf("error expected, none received, got result: %#v", got) + } + }) } } diff --git a/sdk/framework/field_type.go b/sdk/framework/field_type.go index 64a6a56dc08c..2e1121ca6d29 100644 --- a/sdk/framework/field_type.go +++ b/sdk/framework/field_type.go @@ -14,6 +14,11 @@ const ( // integer or go duration format string (e.g. 24h) TypeDurationSecond + // TypeSignedDurationSecond represents a positive or negative duration + // as seconds, this can be either an integer or go duration format + // string (e.g. 24h). + TypeSignedDurationSecond + // TypeSlice represents a slice of any type TypeSlice @@ -66,7 +71,7 @@ func (t FieldType) String() string { return "map" case TypeKVPairs: return "keypair" - case TypeDurationSecond: + case TypeDurationSecond, TypeSignedDurationSecond: return "duration (sec)" case TypeSlice, TypeStringSlice, TypeCommaStringSlice, TypeCommaIntSlice: return "slice" diff --git a/sdk/framework/openapi.go b/sdk/framework/openapi.go index 37de53fdbebb..1fa238999a63 100644 --- a/sdk/framework/openapi.go +++ b/sdk/framework/openapi.go @@ -98,11 +98,13 @@ type OASLicense struct { } type OASPathItem struct { - Description string `json:"description,omitempty"` - Parameters []OASParameter `json:"parameters,omitempty"` - Sudo bool `json:"x-vault-sudo,omitempty" mapstructure:"x-vault-sudo"` - Unauthenticated bool `json:"x-vault-unauthenticated,omitempty" mapstructure:"x-vault-unauthenticated"` - CreateSupported bool `json:"x-vault-createSupported,omitempty" mapstructure:"x-vault-createSupported"` + Description string `json:"description,omitempty"` + Parameters []OASParameter `json:"parameters,omitempty"` + Sudo bool `json:"x-vault-sudo,omitempty" mapstructure:"x-vault-sudo"` + Unauthenticated bool `json:"x-vault-unauthenticated,omitempty" mapstructure:"x-vault-unauthenticated"` + CreateSupported bool `json:"x-vault-createSupported,omitempty" mapstructure:"x-vault-createSupported"` + DisplayNavigation bool `json:"x-vault-displayNavigation,omitempty" mapstructure:"x-vault-displayNavigation"` + DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs"` Get *OASOperation `json:"get,omitempty"` Post *OASOperation `json:"post,omitempty"` @@ -156,16 +158,18 @@ type OASSchema struct { // approach than OASParameter (unfortunately), but is how JSONSchema handles 'required'. Required []string `json:"required,omitempty"` - Items *OASSchema `json:"items,omitempty"` - Format string `json:"format,omitempty"` - Pattern string `json:"pattern,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - Default interface{} `json:"default,omitempty"` - Example interface{} `json:"example,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - DisplayName string `json:"x-vault-displayName,omitempty" mapstructure:"x-vault-displayName,omitempty"` - DisplayValue interface{} `json:"x-vault-displayValue,omitempty" mapstructure:"x-vault-displayValue,omitempty"` - DisplaySensitive bool `json:"x-vault-displaySensitive,omitempty" mapstructure:"x-vault-displaySensitive,omitempty"` + Items *OASSchema `json:"items,omitempty"` + Format string `json:"format,omitempty"` + Pattern string `json:"pattern,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + //DisplayName string `json:"x-vault-displayName,omitempty" mapstructure:"x-vault-displayName,omitempty"` + DisplayValue interface{} `json:"x-vault-displayValue,omitempty" mapstructure:"x-vault-displayValue,omitempty"` + DisplaySensitive bool `json:"x-vault-displaySensitive,omitempty" mapstructure:"x-vault-displaySensitive,omitempty"` + DisplayGroup string `json:"x-vault-displayGroup,omitempty" mapstructure:"x-vault-displayGroup,omitempty"` + DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs,omitempty"` } type OASResponse struct { @@ -230,6 +234,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, backendType logical.Back pi.Sudo = specialPathMatch(path, sudoPaths) pi.Unauthenticated = specialPathMatch(path, unauthPaths) + pi.DisplayAttrs = p.DisplayAttrs // If the newer style Operations map isn't defined, create one from the legacy fields. operations := p.Operations @@ -263,13 +268,11 @@ func documentPath(p *Path, specialPaths *logical.Paths, backendType logical.Back Description: cleanString(field.Description), In: location, Schema: &OASSchema{ - Type: t.baseType, - Pattern: t.pattern, - Enum: field.AllowedValues, - Default: field.Default, - DisplayName: field.DisplayName, - DisplayValue: field.DisplayValue, - DisplaySensitive: field.DisplaySensitive, + Type: t.baseType, + Pattern: t.pattern, + Enum: field.AllowedValues, + Default: field.Default, + DisplayAttrs: field.DisplayAttrs, }, Required: required, Deprecated: field.Deprecated, @@ -325,16 +328,14 @@ func documentPath(p *Path, specialPaths *logical.Paths, backendType logical.Back } p := OASSchema{ - Type: openapiField.baseType, - Description: cleanString(field.Description), - Format: openapiField.format, - Pattern: openapiField.pattern, - Enum: field.AllowedValues, - Default: field.Default, - Deprecated: field.Deprecated, - DisplayName: field.DisplayName, - DisplayValue: field.DisplayValue, - DisplaySensitive: field.DisplaySensitive, + Type: openapiField.baseType, + Description: cleanString(field.Description), + Format: openapiField.format, + Pattern: openapiField.pattern, + Enum: field.AllowedValues, + Default: field.Default, + Deprecated: field.Deprecated, + DisplayAttrs: field.DisplayAttrs, } if openapiField.baseType == "array" { p.Items = &OASSchema{ @@ -551,7 +552,7 @@ func convertType(t FieldType) schemaType { ret.format = "lowercase" case TypeInt: ret.baseType = "integer" - case TypeDurationSecond: + case TypeDurationSecond, TypeSignedDurationSecond: ret.baseType = "integer" ret.format = "seconds" case TypeBool: diff --git a/sdk/framework/openapi_test.go b/sdk/framework/openapi_test.go index b9990902f6dc..69e91b7ec387 100644 --- a/sdk/framework/openapi_test.go +++ b/sdk/framework/openapi_test.go @@ -335,13 +335,16 @@ func TestOpenAPI_Paths(t *testing.T) { Description: "the name", }, "age": { - Type: TypeInt, - Description: "the age", - AllowedValues: []interface{}{1, 2, 3}, - Required: true, - DisplayName: "Age", - DisplayValue: 7, - DisplaySensitive: true, + Type: TypeInt, + Description: "the age", + AllowedValues: []interface{}{1, 2, 3}, + Required: true, + DisplayAttrs: &DisplayAttributes{ + Name: "Age", + Sensitive: true, + Group: "Some Group", + Value: 7, + }, }, "x-abc-token": { Type: TypeHeader, @@ -378,6 +381,9 @@ func TestOpenAPI_Paths(t *testing.T) { Unpublished: true, }, }, + DisplayAttrs: &DisplayAttributes{ + Navigation: true, + }, } sp := &logical.Paths{ diff --git a/sdk/framework/path.go b/sdk/framework/path.go index 419ce34f6ca2..a8e5f56ffffb 100644 --- a/sdk/framework/path.go +++ b/sdk/framework/path.go @@ -112,6 +112,10 @@ type Path struct { // be automatically line-wrapped at 80 characters. HelpSynopsis string HelpDescription string + + // DisplayAttrs provides hints for UI and documentation generators. They + // will be included in OpenAPI output if set. + DisplayAttrs *DisplayAttributes } // OperationHandler defines and describes a specific operation handler. @@ -148,6 +152,32 @@ type OperationProperties struct { // Deprecated indicates that this operation should be avoided. Deprecated bool + + // DisplayAttrs provides hints for UI and documentation generators. They + // will be included in OpenAPI output if set. + DisplayAttrs *DisplayAttributes +} + +type DisplayAttributes struct { + // Name is the name of the field suitable as a label or documentation heading. + Name string `json:"name,omitempty"` + + // Value is a sample value to display for this field. This may be used + // to indicate a default value, but it is for display only and completely separate + // from any Default member handling. + Value interface{} `json:"value,omitempty"` + + // Sensitive indicates that the value should be masked by default in the UI. + Sensitive bool `json:"sensitive,omitempty"` + + // Navigation indicates that the path should be available as a navigation tab + Navigation bool `json:"navigation,omitempty"` + + // Group is the suggested UI group to place this field in. + Group string `json:"group,omitempty"` + + // Action is the verb to use for the operation. + Action string `json:"action,omitempty"` } // RequestExample is example of request data. @@ -227,6 +257,7 @@ func (p *Path) helpCallback(b *Backend) OperationFunc { Key: k, Type: schema.Type.String(), Description: description, + Deprecated: schema.Deprecated, } } @@ -256,6 +287,7 @@ type pathTemplateData struct { type pathTemplateFieldData struct { Key string Type string + Deprecated bool Description string URL bool } @@ -270,8 +302,11 @@ Matching Route: {{.RoutePattern}} ## PARAMETERS {{range .Fields}} {{indent 4 .Key}} ({{.Type}}) +{{if .Deprecated}} +{{printf "(DEPRECATED) %s" .Description | indent 8}} +{{else}} {{indent 8 .Description}} -{{end}}{{end}} +{{end}}{{end}}{{end}} ## DESCRIPTION {{.Description}} diff --git a/sdk/framework/path_map.go b/sdk/framework/path_map.go index bb675cee0108..8e1b91864b77 100644 --- a/sdk/framework/path_map.go +++ b/sdk/framework/path_map.go @@ -10,6 +10,9 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +// DEPRECATED: Don't use this. It's too inflexible, nearly impossible to use +// with some modern Vault features, and imposes specific API designs. +// // PathMap can be used to generate a path that stores mappings in the // storage. It is a structure that also exports functions for querying the // mappings. diff --git a/sdk/framework/policy_map.go b/sdk/framework/policy_map.go index a20405561e86..7657b4b0a92d 100644 --- a/sdk/framework/policy_map.go +++ b/sdk/framework/policy_map.go @@ -8,6 +8,9 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +// DEPRECATED: Don't use this. It's too inflexible, nearly impossible to use +// with some modern Vault features, and imposes specific API designs. +// // PolicyMap is a specialization of PathMap that expects the values to // be lists of policies. This assists in querying and loading policies // from the PathMap. diff --git a/sdk/framework/testdata/operations.json b/sdk/framework/testdata/operations.json index f889f118237e..4c140f92b732 100644 --- a/sdk/framework/testdata/operations.json +++ b/sdk/framework/testdata/operations.json @@ -14,6 +14,9 @@ "description": "Synopsis", "x-vault-createSupported": true, "x-vault-sudo": true, + "x-vault-displayAttrs": { + "navigation": true + }, "parameters": [ { "name": "format", @@ -77,9 +80,12 @@ "type": "integer", "description": "the age", "enum": [1, 2, 3], - "x-vault-displayName": "Age", - "x-vault-displayValue": 7, - "x-vault-displaySensitive": true + "x-vault-displayAttrs": { + "name": "Age", + "sensitive": true, + "group": "Some Group", + "value": 7 + } }, "name": { "type": "string", diff --git a/sdk/go.mod b/sdk/go.mod index 30f962040d08..104e5130650e 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -8,14 +8,13 @@ require ( github.com/fatih/structs v1.1.0 github.com/go-ldap/ldap v3.0.2+incompatible github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 - github.com/gogo/protobuf v1.2.1 github.com/golang/protobuf v1.3.1 github.com/golang/snappy v0.0.1 github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-hclog v0.8.0 github.com/hashicorp/go-immutable-radix v1.0.0 github.com/hashicorp/go-multierror v1.0.0 - github.com/hashicorp/go-plugin v1.0.0 + github.com/hashicorp/go-plugin v1.0.1 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-uuid v1.0.1 github.com/hashicorp/go-version v1.1.0 diff --git a/sdk/go.sum b/sdk/go.sum index 2238d1fea1ac..b7de3f3fb5a3 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -18,8 +18,6 @@ github.com/go-ldap/ldap v3.0.2+incompatible h1:kD5HQcAzlQ7yrhfn+h+MSABeAy/jAJhvI github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -38,8 +36,8 @@ github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxB github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.0.0 h1:/gQ1sNR8/LHpoxKRQq4PmLBuacfZb4tC93e9B30o/7c= -github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -56,8 +54,6 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= @@ -123,7 +119,6 @@ golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/sdk/helper/certutil/helpers.go b/sdk/helper/certutil/helpers.go index d1cc28159633..4a35f88dca5e 100644 --- a/sdk/helper/certutil/helpers.go +++ b/sdk/helper/certutil/helpers.go @@ -9,16 +9,24 @@ import ( "crypto/rsa" "crypto/sha1" "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" "encoding/pem" "errors" "fmt" "math/big" + "net" + "net/url" "strconv" "strings" + "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/mitchellh/mapstructure" + "golang.org/x/crypto/cryptobyte" + cbasn1 "golang.org/x/crypto/cryptobyte/asn1" ) // GetHexFormatted returns the byte buffer formatted in hex with @@ -275,7 +283,7 @@ func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) { } } -// PasrsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs +// ParsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs func ParsePublicKeyPEM(data []byte) (interface{}, error) { block, data := pem.Decode(data) if block != nil { @@ -299,3 +307,500 @@ func ParsePublicKeyPEM(data []byte) (interface{}, error) { return nil, errors.New("data does not contain any valid RSA or ECDSA public keys") } + +// addPolicyIdentifiers adds certificate policies extension +// +func AddPolicyIdentifiers(data *CreationBundle, certTemplate *x509.Certificate) { + for _, oidstr := range data.Params.PolicyIdentifiers { + oid, err := StringToOid(oidstr) + if err == nil { + certTemplate.PolicyIdentifiers = append(certTemplate.PolicyIdentifiers, oid) + } + } +} + +// addExtKeyUsageOids adds custom extended key usage OIDs to certificate +func AddExtKeyUsageOids(data *CreationBundle, certTemplate *x509.Certificate) { + for _, oidstr := range data.Params.ExtKeyUsageOIDs { + oid, err := StringToOid(oidstr) + if err == nil { + certTemplate.UnknownExtKeyUsage = append(certTemplate.UnknownExtKeyUsage, oid) + } + } +} + +func HandleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) error { + certTemplate := &x509.Certificate{ + DNSNames: in.DNSNames, + IPAddresses: in.IPAddresses, + EmailAddresses: in.EmailAddresses, + URIs: in.URIs, + } + if err := HandleOtherSANs(certTemplate, sans); err != nil { + return err + } + if len(certTemplate.ExtraExtensions) > 0 { + for _, v := range certTemplate.ExtraExtensions { + in.ExtraExtensions = append(in.ExtraExtensions, v) + } + } + return nil +} + +func HandleOtherSANs(in *x509.Certificate, sans map[string][]string) error { + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(sans) == 0 { + return nil + } + + var rawValues []asn1.RawValue + + // We need to generate an IMPLICIT sequence for compatibility with OpenSSL + // -- it's an open question what the default for RFC 5280 actually is, see + // https://github.com/openssl/openssl/issues/5091 -- so we have to use + // cryptobyte because using the asn1 package's marshaling always produces + // an EXPLICIT sequence. Note that asn1 is way too magical according to + // agl, and cryptobyte is modeled after the CBB/CBS bits that agl put into + // boringssl. + for oid, vals := range sans { + for _, val := range vals { + var b cryptobyte.Builder + oidStr, err := StringToOid(oid) + if err != nil { + return err + } + b.AddASN1ObjectIdentifier(oidStr) + b.AddASN1(cbasn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddASN1(cbasn1.UTF8String, func(b *cryptobyte.Builder) { + b.AddBytes([]byte(val)) + }) + }) + m, err := b.Bytes() + if err != nil { + return err + } + rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: m}) + } + } + + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(rawValues) == 0 { + return nil + } + + // Append any existing SANs, sans marshalling + rawValues = append(rawValues, marshalSANs(in.DNSNames, in.EmailAddresses, in.IPAddresses, in.URIs)...) + + // Marshal and add to ExtraExtensions + ext := pkix.Extension{ + // This is the defined OID for subjectAltName + Id: asn1.ObjectIdentifier{2, 5, 29, 17}, + } + var err error + ext.Value, err = asn1.Marshal(rawValues) + if err != nil { + return err + } + in.ExtraExtensions = append(in.ExtraExtensions, ext) + + return nil +} + +// Note: Taken from the Go source code since it's not public, and used in the +// modified function below (which also uses these consts upstream) +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +// Note: Taken from the Go source code since it's not public, plus changed to not marshal +// marshalSANs marshals a list of addresses into a the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) + } + for _, uri := range uris { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())}) + } + return rawValues +} + +func StringToOid(in string) (asn1.ObjectIdentifier, error) { + split := strings.Split(in, ".") + ret := make(asn1.ObjectIdentifier, 0, len(split)) + for _, v := range split { + i, err := strconv.Atoi(v) + if err != nil { + return nil, err + } + ret = append(ret, i) + } + return asn1.ObjectIdentifier(ret), nil +} + +func ValidateKeyTypeLength(keyType string, keyBits int) error { + switch keyType { + case "rsa": + switch keyBits { + case 2048: + case 4096: + case 8192: + default: + return fmt.Errorf("unsupported bit length for RSA key: %d", keyBits) + } + case "ec": + switch keyBits { + case 224: + case 256: + case 384: + case 521: + default: + return fmt.Errorf("unsupported bit length for EC key: %d", keyBits) + } + case "any": + default: + return fmt.Errorf("unknown key type %s", keyType) + } + + return nil +} + +// Performs the heavy lifting of creating a certificate. Returns +// a fully-filled-in ParsedCertBundle. +func CreateCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + var err error + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + if err := GeneratePrivateKey(data.Params.KeyType, + data.Params.KeyBits, + result); err != nil { + return nil, err + } + + subjKeyID, err := GetSubjKeyID(result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)} + } + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + IsCA: false, + SubjectKeyId: subjKeyID, + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + // Add this before calling addKeyUsages + if data.SigningBundle == nil { + certTemplate.IsCA = true + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + // This will only be filled in from the generation paths + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.Params.URLs.OCSPServers + + var certBytes []byte + if data.SigningBundle != nil { + switch data.SigningBundle.PrivateKeyType { + case RSAPrivateKey: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case ECPrivateKey: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + } + + caCert := data.SigningBundle.Certificate + certTemplate.AuthorityKeyId = caCert.SubjectKeyId + + certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, result.PrivateKey.Public(), data.SigningBundle.PrivateKey) + } else { + // Creating a self-signed root + if data.Params.MaxPathLength == 0 { + certTemplate.MaxPathLen = 0 + certTemplate.MaxPathLenZero = true + } else { + certTemplate.MaxPathLen = data.Params.MaxPathLength + } + + switch data.Params.KeyType { + case "rsa": + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case "ec": + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + } + + certTemplate.AuthorityKeyId = subjKeyID + certTemplate.BasicConstraintsValid = true + certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey) + } + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + if data.SigningBundle != nil { + if len(data.SigningBundle.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(data.SigningBundle.Certificate.AuthorityKeyId, data.SigningBundle.Certificate.SubjectKeyId) { + + result.CAChain = []*CertBlock{ + &CertBlock{ + Certificate: data.SigningBundle.Certificate, + Bytes: data.SigningBundle.CertificateBytes, + }, + } + result.CAChain = append(result.CAChain, data.SigningBundle.CAChain...) + } + } + + return result, nil +} + +var oidExtensionBasicConstraints = []int{2, 5, 29, 19} + +// Creates a CSR. This is currently only meant for use when +// generating an intermediate certificate. +func CreateCSR(data *CreationBundle, addBasicConstraints bool) (*ParsedCSRBundle, error) { + var err error + result := &ParsedCSRBundle{} + + if err := GeneratePrivateKey(data.Params.KeyType, + data.Params.KeyBits, + result); err != nil { + return nil, err + } + + // Like many root CAs, other information is ignored + csrTemplate := &x509.CertificateRequest{ + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + + if err := HandleOtherCSRSANs(csrTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + if addBasicConstraints { + type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` + } + val, err := asn1.Marshal(basicConstraints{IsCA: true, MaxPathLen: -1}) + if err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} + } + ext := pkix.Extension{ + Id: oidExtensionBasicConstraints, + Value: val, + Critical: true, + } + csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, ext) + } + + switch data.Params.KeyType { + case "rsa": + csrTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case "ec": + csrTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + } + + csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CSRBytes = csr + result.CSR, err = x509.ParseCertificateRequest(csr) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)} + } + + return result, nil +} + +// Performs the heavy lifting of generating a certificate from a CSR. +// Returns a ParsedCertBundle sans private keys. +func SignCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + switch { + case data == nil: + return nil, errutil.UserError{Err: "nil data bundle given to signCertificate"} + case data.Params == nil: + return nil, errutil.UserError{Err: "nil parameters given to signCertificate"} + case data.SigningBundle == nil: + return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"} + case data.CSR == nil: + return nil, errutil.UserError{Err: "nil csr given to signCertificate"} + } + + err := data.CSR.CheckSignature() + if err != nil { + return nil, errutil.UserError{Err: "request signature invalid"} + } + + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + marshaledKey, err := x509.MarshalPKIXPublicKey(data.CSR.PublicKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} + } + subjKeyID := sha1.Sum(marshaledKey) + + caCert := data.SigningBundle.Certificate + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: data.Params.Subject, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + SubjectKeyId: subjKeyID[:], + AuthorityKeyId: caCert.SubjectKeyId, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + switch data.SigningBundle.PrivateKeyType { + case RSAPrivateKey: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case ECPrivateKey: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + } + + if data.Params.UseCSRValues { + certTemplate.Subject = data.CSR.Subject + certTemplate.Subject.ExtraNames = certTemplate.Subject.Names + + certTemplate.DNSNames = data.CSR.DNSNames + certTemplate.EmailAddresses = data.CSR.EmailAddresses + certTemplate.IPAddresses = data.CSR.IPAddresses + certTemplate.URIs = data.CSR.URIs + + for _, name := range data.CSR.Extensions { + if !name.Id.Equal(oidExtensionBasicConstraints) { + certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) + } + } + + } else { + certTemplate.DNSNames = data.Params.DNSNames + certTemplate.EmailAddresses = data.Params.EmailAddresses + certTemplate.IPAddresses = data.Params.IPAddresses + certTemplate.URIs = data.Params.URIs + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + var certBytes []byte + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.SigningBundle.URLs.OCSPServers + + if data.Params.IsCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = true + + if data.SigningBundle.Certificate.MaxPathLen == 0 && + data.SigningBundle.Certificate.MaxPathLenZero { + return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"} + } + + certTemplate.MaxPathLen = data.Params.MaxPathLength + if certTemplate.MaxPathLen == 0 { + certTemplate.MaxPathLenZero = true + } + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey) + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + result.CAChain = data.SigningBundle.GetCAChain() + + return result, nil +} diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go index 62b494b19aa1..d9a77dcc902c 100644 --- a/sdk/helper/certutil/types.go +++ b/sdk/helper/certutil/types.go @@ -15,15 +15,23 @@ import ( "crypto/rsa" "crypto/tls" "crypto/x509" + "crypto/x509/pkix" "encoding/pem" "fmt" "math/big" + "net" + "net/url" "strings" + "time" "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/helper/errutil" ) +const ( + PrivateKeyTypeP521 = "p521" +) + // This can be one of a few key types so the different params may or may not be filled type ClusterKeyParams struct { Type string `json:"type" structs:"type" mapstructure:"type"` @@ -598,3 +606,158 @@ type IssueData struct { IPSANs string `json:"ip_sans" structs:"ip_sans" mapstructure:"ip_sans"` CSR string `json:"csr" structs:"csr" mapstructure:"csr"` } + +type URLEntries struct { + IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"` + CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"` + OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"` +} + +type CAInfoBundle struct { + ParsedCertBundle + URLs *URLEntries +} + +func (b *CAInfoBundle) GetCAChain() []*CertBlock { + chain := []*CertBlock{} + + // Include issuing CA in Chain, not including Root Authority + if (len(b.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) || + (len(b.Certificate.AuthorityKeyId) == 0 && + !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) { + + chain = append(chain, &CertBlock{ + Certificate: b.Certificate, + Bytes: b.CertificateBytes, + }) + if b.CAChain != nil && len(b.CAChain) > 0 { + chain = append(chain, b.CAChain...) + } + } + + return chain +} + +type CertExtKeyUsage int + +const ( + AnyExtKeyUsage CertExtKeyUsage = 1 << iota + ServerAuthExtKeyUsage + ClientAuthExtKeyUsage + CodeSigningExtKeyUsage + EmailProtectionExtKeyUsage + IpsecEndSystemExtKeyUsage + IpsecTunnelExtKeyUsage + IpsecUserExtKeyUsage + TimeStampingExtKeyUsage + OcspSigningExtKeyUsage + MicrosoftServerGatedCryptoExtKeyUsage + NetscapeServerGatedCryptoExtKeyUsage + MicrosoftCommercialCodeSigningExtKeyUsage + MicrosoftKernelCodeSigningExtKeyUsage +) + +type CreationParameters struct { + Subject pkix.Name + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL + OtherSANs map[string][]string + IsCA bool + KeyType string + KeyBits int + NotAfter time.Time + KeyUsage x509.KeyUsage + ExtKeyUsage CertExtKeyUsage + ExtKeyUsageOIDs []string + PolicyIdentifiers []string + BasicConstraintsValidForNonCA bool + + // Only used when signing a CA cert + UseCSRValues bool + PermittedDNSDomains []string + + // URLs to encode into the certificate + URLs *URLEntries + + // The maximum path length to encode + MaxPathLength int + + // The duration the certificate will use NotBefore + NotBeforeDuration time.Duration +} + +type CreationBundle struct { + Params *CreationParameters + SigningBundle *CAInfoBundle + CSR *x509.CertificateRequest +} + +// addKeyUsages adds appropriate key usages to the template given the creation +// information +func AddKeyUsages(data *CreationBundle, certTemplate *x509.Certificate) { + if data.Params.IsCA { + certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign) + return + } + + certTemplate.KeyUsage = data.Params.KeyUsage + + if data.Params.ExtKeyUsage&AnyExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageAny) + } + + if data.Params.ExtKeyUsage&ServerAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth) + } + + if data.Params.ExtKeyUsage&ClientAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if data.Params.ExtKeyUsage&CodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning) + } + + if data.Params.ExtKeyUsage&EmailProtectionExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection) + } + + if data.Params.ExtKeyUsage&IpsecEndSystemExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECEndSystem) + } + + if data.Params.ExtKeyUsage&IpsecTunnelExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECTunnel) + } + + if data.Params.ExtKeyUsage&IpsecUserExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECUser) + } + + if data.Params.ExtKeyUsage&TimeStampingExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageTimeStamping) + } + + if data.Params.ExtKeyUsage&OcspSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageOCSPSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&NetscapeServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageNetscapeServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&MicrosoftCommercialCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftCommercialCodeSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftKernelCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftKernelCodeSigning) + } +} diff --git a/sdk/helper/keysutil/cache.go b/sdk/helper/keysutil/cache.go new file mode 100644 index 000000000000..7da9c202fa58 --- /dev/null +++ b/sdk/helper/keysutil/cache.go @@ -0,0 +1,8 @@ +package keysutil + +type Cache interface { + Delete(key interface{}) + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) + Size() int +} diff --git a/sdk/helper/keysutil/lock_manager.go b/sdk/helper/keysutil/lock_manager.go index 95064ff7a87e..123742ccc59f 100644 --- a/sdk/helper/keysutil/lock_manager.go +++ b/sdk/helper/keysutil/lock_manager.go @@ -55,21 +55,44 @@ type PolicyRequest struct { type LockManager struct { useCache bool - // If caching is enabled, the map of name to in-memory policy cache - cache sync.Map - + cache Cache keyLocks []*locksutil.LockEntry } -func NewLockManager(cacheDisabled bool) *LockManager { +func NewLockManager(useCache bool, cacheSize int) (*LockManager, error) { + // determine the type of cache to create + var cache Cache + switch { + case !useCache: + case cacheSize < 0: + return nil, errors.New("cache size must be greater or equal to zero") + case cacheSize == 0: + cache = NewTransitSyncMap() + case cacheSize > 0: + newLRUCache, err := NewTransitLRU(cacheSize) + if err != nil { + return nil, errwrap.Wrapf("failed to create cache: {{err}}", err) + } + cache = newLRUCache + } + lm := &LockManager{ - useCache: !cacheDisabled, + useCache: useCache, + cache: cache, keyLocks: locksutil.CreateLocks(), } - return lm + + return lm, nil } -func (lm *LockManager) CacheActive() bool { +func (lm *LockManager) GetCacheSize() int { + if !lm.useCache { + return 0 + } + return lm.cache.Size() +} + +func (lm *LockManager) GetUseCache() bool { return lm.useCache } @@ -178,7 +201,6 @@ func (lm *LockManager) RestorePolicy(ctx context.Context, storage logical.Storag if lm.useCache { lm.cache.Store(name, keyData.Policy) } - return nil } @@ -186,7 +208,7 @@ func (lm *LockManager) BackupPolicy(ctx context.Context, storage logical.Storage var p *Policy var err error - // Backup writes information about when the bacup took place, so we get an + // Backup writes information about when the backup took place, so we get an // exclusive lock here lock := locksutil.LockForKey(lm.keyLocks, name) lock.Lock() diff --git a/sdk/helper/keysutil/policy_test.go b/sdk/helper/keysutil/policy_test.go index 20f71c5aed4d..79b50a3c1436 100644 --- a/sdk/helper/keysutil/policy_test.go +++ b/sdk/helper/keysutil/policy_test.go @@ -52,8 +52,10 @@ func TestPolicy_KeyEntryMapUpgrade(t *testing.T) { } func Test_KeyUpgrade(t *testing.T) { - testKeyUpgradeCommon(t, NewLockManager(false)) - testKeyUpgradeCommon(t, NewLockManager(true)) + lockManagerWithCache, _ := NewLockManager(true, 0) + lockManagerWithoutCache, _ := NewLockManager(false, 0) + testKeyUpgradeCommon(t, lockManagerWithCache) + testKeyUpgradeCommon(t, lockManagerWithoutCache) } func testKeyUpgradeCommon(t *testing.T, lm *LockManager) { @@ -97,8 +99,10 @@ func testKeyUpgradeCommon(t *testing.T, lm *LockManager) { } func Test_ArchivingUpgrade(t *testing.T) { - testArchivingUpgradeCommon(t, NewLockManager(false)) - testArchivingUpgradeCommon(t, NewLockManager(true)) + lockManagerWithCache, _ := NewLockManager(true, 0) + lockManagerWithoutCache, _ := NewLockManager(false, 0) + testArchivingUpgradeCommon(t, lockManagerWithCache) + testArchivingUpgradeCommon(t, lockManagerWithoutCache) } func testArchivingUpgradeCommon(t *testing.T, lm *LockManager) { @@ -255,8 +259,10 @@ func testArchivingUpgradeCommon(t *testing.T, lm *LockManager) { } func Test_Archiving(t *testing.T) { - testArchivingCommon(t, NewLockManager(false)) - testArchivingCommon(t, NewLockManager(true)) + lockManagerWithCache, _ := NewLockManager(true, 0) + lockManagerWithoutCache, _ := NewLockManager(false, 0) + testArchivingUpgradeCommon(t, lockManagerWithCache) + testArchivingUpgradeCommon(t, lockManagerWithoutCache) } func testArchivingCommon(t *testing.T, lm *LockManager) { @@ -420,7 +426,7 @@ func checkKeys(t *testing.T, func Test_StorageErrorSafety(t *testing.T) { ctx := context.Background() - lm := NewLockManager(false) + lm, _ := NewLockManager(true, 0) storage := &logical.InmemStorage{} p, _, err := lm.GetPolicy(ctx, PolicyRequest{ @@ -468,7 +474,7 @@ func Test_StorageErrorSafety(t *testing.T) { func Test_BadUpgrade(t *testing.T) { ctx := context.Background() - lm := NewLockManager(false) + lm, _ := NewLockManager(true, 0) storage := &logical.InmemStorage{} p, _, err := lm.GetPolicy(ctx, PolicyRequest{ Upsert: true, @@ -533,7 +539,7 @@ func Test_BadUpgrade(t *testing.T) { func Test_BadArchive(t *testing.T) { ctx := context.Background() - lm := NewLockManager(false) + lm, _ := NewLockManager(true, 0) storage := &logical.InmemStorage{} p, _, err := lm.GetPolicy(ctx, PolicyRequest{ Upsert: true, diff --git a/sdk/helper/keysutil/transit_lru.go b/sdk/helper/keysutil/transit_lru.go new file mode 100644 index 000000000000..cd1f6dafe693 --- /dev/null +++ b/sdk/helper/keysutil/transit_lru.go @@ -0,0 +1,29 @@ +package keysutil + +import lru "github.com/hashicorp/golang-lru" + +type TransitLRU struct { + size int + lru *lru.TwoQueueCache +} + +func NewTransitLRU(size int) (*TransitLRU, error) { + lru, err := lru.New2Q(size) + return &TransitLRU{lru: lru, size: size}, err +} + +func (c *TransitLRU) Delete(key interface{}) { + c.lru.Remove(key) +} + +func (c *TransitLRU) Load(key interface{}) (value interface{}, ok bool) { + return c.lru.Get(key) +} + +func (c *TransitLRU) Store(key, value interface{}) { + c.lru.Add(key, value) +} + +func (c *TransitLRU) Size() int { + return c.size +} diff --git a/sdk/helper/keysutil/transit_syncmap.go b/sdk/helper/keysutil/transit_syncmap.go new file mode 100644 index 000000000000..ce9071380a99 --- /dev/null +++ b/sdk/helper/keysutil/transit_syncmap.go @@ -0,0 +1,29 @@ +package keysutil + +import ( + "sync" +) + +type TransitSyncMap struct { + syncmap sync.Map +} + +func NewTransitSyncMap() *TransitSyncMap { + return &TransitSyncMap{syncmap: sync.Map{}} +} + +func (c *TransitSyncMap) Delete(key interface{}) { + c.syncmap.Delete(key) +} + +func (c *TransitSyncMap) Load(key interface{}) (value interface{}, ok bool) { + return c.syncmap.Load(key) +} + +func (c *TransitSyncMap) Store(key, value interface{}) { + c.syncmap.Store(key, value) +} + +func (c *TransitSyncMap) Size() int { + return 0 +} diff --git a/sdk/helper/ldaputil/config.go b/sdk/helper/ldaputil/config.go index 3e8fa55370be..2c9787b04e81 100644 --- a/sdk/helper/ldaputil/config.go +++ b/sdk/helper/ldaputil/config.go @@ -22,31 +22,41 @@ func ConfigFields() map[string]*framework.FieldSchema { Type: framework.TypeString, Default: "ldap://127.0.0.1", Description: "LDAP URL to connect to (default: ldap://127.0.0.1). Multiple URLs can be specified by concatenating them with commas; they will be tried in-order.", - DisplayName: "URL", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "URL", + }, }, "userdn": { Type: framework.TypeString, Description: "LDAP domain to use for users (eg: ou=People,dc=example,dc=org)", - DisplayName: "User DN", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User DN", + }, }, "binddn": { Type: framework.TypeString, Description: "LDAP DN for searching for the user DN (optional)", - DisplayName: "Name of Object to bind (binddn)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Name of Object to bind (binddn)", + }, }, "bindpass": { - Type: framework.TypeString, - Description: "LDAP password for searching for the user DN (optional)", - DisplaySensitive: true, + Type: framework.TypeString, + Description: "LDAP password for searching for the user DN (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Sensitive: true, + }, }, "groupdn": { Type: framework.TypeString, Description: "LDAP search base to use for group membership search (eg: ou=Groups,dc=example,dc=org)", - DisplayName: "Group DN", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Group DN", + }, }, "groupfilter": { @@ -56,6 +66,9 @@ func ConfigFields() map[string]*framework.FieldSchema { The template can access the following context variables: UserDN, Username Example: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}})) Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Group Filter", + }, }, "groupattr": { @@ -65,20 +78,28 @@ Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN} in order to enumerate user group membership. Examples: "cn" or "memberOf", etc. Default: cn`, - DisplayName: "Group Attribute", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Group Attribute", + Value: "cn", + }, }, "upndomain": { Type: framework.TypeString, Description: "Enables userPrincipalDomain login with [username]@UPNDomain (optional)", - DisplayName: "User Principal (UPN) Domain", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User Principal (UPN) Domain", + }, }, "userattr": { Type: framework.TypeString, Default: "cn", Description: "Attribute used for users (default: cn)", - DisplayName: "User Attribute", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User Attribute", + Value: "cn", + }, }, "certificate": { @@ -89,34 +110,44 @@ Default: cn`, "discoverdn": { Type: framework.TypeBool, Description: "Use anonymous bind to discover the bind DN of a user (optional)", - DisplayName: "Discover DN", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Discover DN", + }, }, "insecure_tls": { Type: framework.TypeBool, Description: "Skip LDAP server SSL Certificate verification - VERY insecure (optional)", - DisplayName: "Insecure TLS", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Insecure TLS", + }, }, "starttls": { Type: framework.TypeBool, Description: "Issue a StartTLS command after establishing unencrypted connection (optional)", - DisplayName: "Issue StartTLS command after establishing an unencrypted connection", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Issue StartTLS", + }, }, "tls_min_version": { - Type: framework.TypeString, - Default: "tls12", - Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'", - DisplayName: "Minimum TLS Version", + Type: framework.TypeString, + Default: "tls12", + Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Minimum TLS Version", + }, AllowedValues: []interface{}{"tls10", "tls11", "tls12"}, }, "tls_max_version": { - Type: framework.TypeString, - Default: "tls12", - Description: "Maximum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'", - DisplayName: "Maxumum TLS Version", + Type: framework.TypeString, + Default: "tls12", + Description: "Maximum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Maximum TLS Version", + }, AllowedValues: []interface{}{"tls10", "tls11", "tls12"}, }, @@ -143,118 +174,121 @@ Default: cn`, * Creates and initializes a ConfigEntry object with its default values, * as specified by the passed schema. */ -func NewConfigEntry(d *framework.FieldData) (*ConfigEntry, error) { - cfg := new(ConfigEntry) +func NewConfigEntry(existing *ConfigEntry, d *framework.FieldData) (*ConfigEntry, error) { + var hadExisting bool + var cfg *ConfigEntry + + if existing != nil { + cfg = existing + hadExisting = true + } else { + cfg = new(ConfigEntry) + } - url := d.Get("url").(string) - if url != "" { - cfg.Url = strings.ToLower(url) + if _, ok := d.Raw["url"]; ok || !hadExisting { + cfg.Url = strings.ToLower(d.Get("url").(string)) } - userattr := d.Get("userattr").(string) - if userattr != "" { - cfg.UserAttr = strings.ToLower(userattr) + + if _, ok := d.Raw["userattr"]; ok || !hadExisting { + cfg.UserAttr = strings.ToLower(d.Get("userattr").(string)) } - userdn := d.Get("userdn").(string) - if userdn != "" { - cfg.UserDN = userdn + + if _, ok := d.Raw["userdn"]; ok || !hadExisting { + cfg.UserDN = d.Get("userdn").(string) } - groupdn := d.Get("groupdn").(string) - if groupdn != "" { - cfg.GroupDN = groupdn + + if _, ok := d.Raw["groupdn"]; ok || !hadExisting { + cfg.GroupDN = d.Get("groupdn").(string) } - groupfilter := d.Get("groupfilter").(string) - if groupfilter != "" { - // Validate the template before proceeding - _, err := template.New("queryTemplate").Parse(groupfilter) - if err != nil { - return nil, errwrap.Wrapf("invalid groupfilter: {{err}}", err) + + if _, ok := d.Raw["groupfilter"]; ok || !hadExisting { + groupfilter := d.Get("groupfilter").(string) + if groupfilter != "" { + // Validate the template before proceeding + _, err := template.New("queryTemplate").Parse(groupfilter) + if err != nil { + return nil, errwrap.Wrapf("invalid groupfilter: {{err}}", err) + } } cfg.GroupFilter = groupfilter } - groupattr := d.Get("groupattr").(string) - if groupattr != "" { - cfg.GroupAttr = groupattr + + if _, ok := d.Raw["groupattr"]; ok || !hadExisting { + cfg.GroupAttr = d.Get("groupattr").(string) } - upndomain := d.Get("upndomain").(string) - if upndomain != "" { - cfg.UPNDomain = upndomain + + if _, ok := d.Raw["upndomain"]; ok || !hadExisting { + cfg.UPNDomain = d.Get("upndomain").(string) } - certificate := d.Get("certificate").(string) - if certificate != "" { - block, _ := pem.Decode([]byte(certificate)) - if block == nil || block.Type != "CERTIFICATE" { - return nil, fmt.Errorf("failed to decode PEM block in the certificate") - } - _, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, errwrap.Wrapf("failed to parse certificate: {{err}}", err) + if _, ok := d.Raw["certificate"]; ok || !hadExisting { + certificate := d.Get("certificate").(string) + if certificate != "" { + block, _ := pem.Decode([]byte(certificate)) + + if block == nil || block.Type != "CERTIFICATE" { + return nil, errors.New("failed to decode PEM block in the certificate") + } + _, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, errwrap.Wrapf("failed to parse certificate: {{err}}", err) + } } + cfg.Certificate = certificate } - insecureTLS := d.Get("insecure_tls").(bool) - if insecureTLS { - cfg.InsecureTLS = insecureTLS - } - cfg.TLSMinVersion = d.Get("tls_min_version").(string) - if cfg.TLSMinVersion == "" { - return nil, fmt.Errorf("failed to get 'tls_min_version' value") - } - var ok bool - _, ok = tlsutil.TLSLookup[cfg.TLSMinVersion] - if !ok { - return nil, fmt.Errorf("invalid 'tls_min_version'") + if _, ok := d.Raw["insecure_tls"]; ok || !hadExisting { + cfg.InsecureTLS = d.Get("insecure_tls").(bool) } - cfg.TLSMaxVersion = d.Get("tls_max_version").(string) - if cfg.TLSMaxVersion == "" { - return nil, fmt.Errorf("failed to get 'tls_max_version' value") + if _, ok := d.Raw["tls_min_version"]; ok || !hadExisting { + cfg.TLSMinVersion = d.Get("tls_min_version").(string) + _, ok = tlsutil.TLSLookup[cfg.TLSMinVersion] + if !ok { + return nil, errors.New("invalid 'tls_min_version'") + } } - _, ok = tlsutil.TLSLookup[cfg.TLSMaxVersion] - if !ok { - return nil, fmt.Errorf("invalid 'tls_max_version'") + if _, ok := d.Raw["tls_max_version"]; ok || !hadExisting { + cfg.TLSMaxVersion = d.Get("tls_max_version").(string) + _, ok = tlsutil.TLSLookup[cfg.TLSMaxVersion] + if !ok { + return nil, fmt.Errorf("invalid 'tls_max_version'") + } } if cfg.TLSMaxVersion < cfg.TLSMinVersion { return nil, fmt.Errorf("'tls_max_version' must be greater than or equal to 'tls_min_version'") } - startTLS := d.Get("starttls").(bool) - if startTLS { - cfg.StartTLS = startTLS + if _, ok := d.Raw["starttls"]; ok || !hadExisting { + cfg.StartTLS = d.Get("starttls").(bool) } - bindDN := d.Get("binddn").(string) - if bindDN != "" { - cfg.BindDN = bindDN + if _, ok := d.Raw["binddn"]; ok || !hadExisting { + cfg.BindDN = d.Get("binddn").(string) } - bindPass := d.Get("bindpass").(string) - if bindPass != "" { - cfg.BindPassword = bindPass + if _, ok := d.Raw["bindpass"]; ok || !hadExisting { + cfg.BindPassword = d.Get("bindpass").(string) } - denyNullBind := d.Get("deny_null_bind").(bool) - if denyNullBind { - cfg.DenyNullBind = denyNullBind + if _, ok := d.Raw["deny_null_bind"]; ok || !hadExisting { + cfg.DenyNullBind = d.Get("deny_null_bind").(bool) } - discoverDN := d.Get("discoverdn").(bool) - if discoverDN { - cfg.DiscoverDN = discoverDN + if _, ok := d.Raw["discoverdn"]; ok || !hadExisting { + cfg.DiscoverDN = d.Get("discoverdn").(bool) } - caseSensitiveNames, ok := d.GetOk("case_sensitive_names") - if ok { + if _, ok := d.Raw["case_sensitive_names"]; ok || !hadExisting { cfg.CaseSensitiveNames = new(bool) - *cfg.CaseSensitiveNames = caseSensitiveNames.(bool) + *cfg.CaseSensitiveNames = d.Get("case_sensitive_names").(bool) } - useTokenGroups := d.Get("use_token_groups").(bool) - if useTokenGroups { - cfg.UseTokenGroups = useTokenGroups + if _, ok := d.Raw["use_token_groups"]; ok || !hadExisting { + cfg.UseTokenGroups = d.Get("use_token_groups").(bool) } return cfg, nil diff --git a/sdk/helper/parseutil/parseutil.go b/sdk/helper/parseutil/parseutil.go index 6acf70d426fe..ea8289b4361e 100644 --- a/sdk/helper/parseutil/parseutil.go +++ b/sdk/helper/parseutil/parseutil.go @@ -20,11 +20,12 @@ func ParseDurationSecond(in interface{}) (time.Duration, error) { if ok { in = jsonIn.String() } - switch in.(type) { + switch inp := in.(type) { + case nil: + // return default of zero case string: - inp := in.(string) if inp == "" { - return time.Duration(0), nil + return dur, nil } var err error // Look for a suffix otherwise its a plain second value @@ -42,17 +43,23 @@ func ParseDurationSecond(in interface{}) (time.Duration, error) { dur = time.Duration(secs) * time.Second } case int: - dur = time.Duration(in.(int)) * time.Second + dur = time.Duration(inp) * time.Second case int32: - dur = time.Duration(in.(int32)) * time.Second + dur = time.Duration(inp) * time.Second case int64: - dur = time.Duration(in.(int64)) * time.Second + dur = time.Duration(inp) * time.Second case uint: - dur = time.Duration(in.(uint)) * time.Second + dur = time.Duration(inp) * time.Second case uint32: - dur = time.Duration(in.(uint32)) * time.Second + dur = time.Duration(inp) * time.Second case uint64: - dur = time.Duration(in.(uint64)) * time.Second + dur = time.Duration(inp) * time.Second + case float32: + dur = time.Duration(inp) * time.Second + case float64: + dur = time.Duration(inp) * time.Second + case time.Duration: + dur = inp default: return 0, errors.New("could not parse duration from input") } diff --git a/sdk/helper/tlsutil/tlsutil.go b/sdk/helper/tlsutil/tlsutil.go index 23fa8a6060b3..236d32ec6748 100644 --- a/sdk/helper/tlsutil/tlsutil.go +++ b/sdk/helper/tlsutil/tlsutil.go @@ -2,11 +2,15 @@ package tlsutil import ( "crypto/tls" + "crypto/x509" + "errors" "fmt" "github.com/hashicorp/vault/sdk/helper/strutil" ) +var ErrInvalidCertParams = errors.New("invalid certificate parameters") + // TLSLookup maps the tls_min_version configuration to the internal value var TLSLookup = map[string]uint16{ "tls10": tls.VersionTLS10, @@ -65,3 +69,41 @@ func GetCipherName(cipher uint16) (string, error) { } return "", fmt.Errorf("unsupported cipher %d", cipher) } + +func ClientTLSConfig(caCert []byte, clientCert []byte, clientKey []byte) (*tls.Config, error) { + var tlsConfig *tls.Config + var pool *x509.CertPool + + switch { + case len(caCert) != 0: + // Valid + case len(clientCert) != 0 && len(clientKey) != 0: + // Valid + default: + return nil, ErrInvalidCertParams + } + + if len(caCert) != 0 { + pool = x509.NewCertPool() + pool.AppendCertsFromPEM(caCert) + } + + tlsConfig = &tls.Config{ + RootCAs: pool, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, + } + + var cert tls.Certificate + var err error + if len(clientCert) != 0 && len(clientKey) != 0 { + cert, err = tls.X509KeyPair(clientCert, clientKey) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil +} diff --git a/sdk/helper/tokenutil/tokenutil.go b/sdk/helper/tokenutil/tokenutil.go new file mode 100644 index 000000000000..e225f866192d --- /dev/null +++ b/sdk/helper/tokenutil/tokenutil.go @@ -0,0 +1,408 @@ +package tokenutil + +import ( + "errors" + "fmt" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// TokenParams contains a set of common parameters that auth plugins can use +// for setting token behavior +type TokenParams struct { + // The set of CIDRs that tokens generated using this role will be bound to + TokenBoundCIDRs []*sockaddr.SockAddrMarshaler `json:"token_bound_cidrs"` + + // If set, the token entry will have an explicit maximum TTL set, rather + // than deferring to role/mount values + TokenExplicitMaxTTL time.Duration `json:"token_explicit_max_ttl" mapstructure:"token_explicit_max_ttl"` + + // The max TTL to use for the token + TokenMaxTTL time.Duration `json:"token_max_ttl" mapstructure:"token_max_ttl"` + + // If set, core will not automatically add default to the policy list + TokenNoDefaultPolicy bool `json:"token_no_default_policy" mapstructure:"token_no_default_policy"` + + // The maximum number of times a token issued from this role may be used. + TokenNumUses int `json:"token_num_uses" mapstructure:"token_num_uses"` + + // If non-zero, tokens created using this role will be able to be renewed + // forever, but will have a fixed renewal period of this value + TokenPeriod time.Duration `json:"token_period" mapstructure:"token_period"` + + // The policies to set + TokenPolicies []string `json:"token_policies" mapstructure:"token_policies"` + + // The type of token this role should issue + TokenType logical.TokenType `json:"token_type" mapstructure:"token_type"` + + // The TTL to user for the token + TokenTTL time.Duration `json:"token_ttl" mapstructure:"token_ttl"` +} + +// AddTokenFields adds fields to an existing role. It panics if it would +// overwrite an existing field. +func AddTokenFields(m map[string]*framework.FieldSchema) { + AddTokenFieldsWithAllowList(m, nil) +} + +// AddTokenFields adds fields to an existing role. It panics if it would +// overwrite an existing field. Allowed can be use to restrict the set, e.g. if +// there would be conflicts. +func AddTokenFieldsWithAllowList(m map[string]*framework.FieldSchema, allowed []string) { + r := TokenFields() + for k, v := range r { + if len(allowed) > 0 && !strutil.StrListContains(allowed, k) { + continue + } + if _, has := m[k]; has { + panic(fmt.Sprintf("adding role field %s would overwrite existing field", k)) + } + m[k] = v + } +} + +// TokenFields provides a set of field schemas for the parameters +func TokenFields() map[string]*framework.FieldSchema { + return map[string]*framework.FieldSchema{ + "token_bound_cidrs": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Bound CIDRs", + }, + }, + + "token_explicit_max_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: tokenExplicitMaxTTLHelp, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Explicit Maximum TTL", + }, + }, + + "token_max_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: "The maximum lifetime of the generated token", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Maximum TTL", + }, + }, + + "token_no_default_policy": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "If true, the 'default' policy will not automatically be added to generated tokens", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Do Not Attach 'default' Policy To Generated Tokens", + }, + }, + + "token_period": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: tokenPeriodHelp, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Period", + }, + }, + + "token_policies": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated list of policies", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Policies", + }, + }, + + "token_type": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "default-service", + Description: "The type of token to generate, service or batch", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Type", + }, + }, + + "token_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: "The initial ttl of the token to generate", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Initial TTL", + }, + }, + + "token_num_uses": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: "The maximum number of times a token may be used, a value of zero means unlimited", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Maximum Uses of Generated Tokens", + }, + }, + } +} + +// ParseTokenFields provides common field parsing functionality into a TokenFields struct +func (t *TokenParams) ParseTokenFields(req *logical.Request, d *framework.FieldData) error { + if boundCIDRsRaw, ok := d.GetOk("token_bound_cidrs"); ok { + boundCIDRs, err := parseutil.ParseAddrs(boundCIDRsRaw.([]string)) + if err != nil { + return err + } + t.TokenBoundCIDRs = boundCIDRs + } + + if explicitMaxTTLRaw, ok := d.GetOk("token_explicit_max_ttl"); ok { + t.TokenExplicitMaxTTL = time.Duration(explicitMaxTTLRaw.(int)) * time.Second + } + + if maxTTLRaw, ok := d.GetOk("token_max_ttl"); ok { + t.TokenMaxTTL = time.Duration(maxTTLRaw.(int)) * time.Second + } + if t.TokenMaxTTL < 0 { + return errors.New("'token_max_ttl' cannot be negative") + } + + if noDefaultRaw, ok := d.GetOk("token_no_default_policy"); ok { + t.TokenNoDefaultPolicy = noDefaultRaw.(bool) + } + + if periodRaw, ok := d.GetOk("token_period"); ok { + t.TokenPeriod = time.Duration(periodRaw.(int)) * time.Second + } + if t.TokenPeriod < 0 { + return errors.New("'token_period' cannot be negative") + } + + if policiesRaw, ok := d.GetOk("token_policies"); ok { + t.TokenPolicies = policiesRaw.([]string) + } + + if tokenTypeRaw, ok := d.GetOk("token_type"); ok { + var tokenType logical.TokenType + tokenTypeStr := tokenTypeRaw.(string) + switch tokenTypeStr { + case "", "default": + tokenType = logical.TokenTypeDefault + case "service": + tokenType = logical.TokenTypeService + case "batch": + tokenType = logical.TokenTypeBatch + default: + return fmt.Errorf("invalid 'token_type' value %q", tokenTypeStr) + } + t.TokenType = tokenType + } + + if t.TokenType == logical.TokenTypeBatch || t.TokenType == logical.TokenTypeDefaultBatch { + if t.TokenPeriod != 0 { + return errors.New("'token_type' cannot be 'batch' or 'default_batch' when set to generate periodic tokens") + } + if t.TokenNumUses != 0 { + return errors.New("'token_type' cannot be 'batch' or 'default_batch' when set to generate tokens with limited use count") + } + } + + if ttlRaw, ok := d.GetOk("token_ttl"); ok { + t.TokenTTL = time.Duration(ttlRaw.(int)) * time.Second + } + if t.TokenTTL < 0 { + return errors.New("'token_ttl' cannot be negative") + } + if t.TokenTTL > 0 && t.TokenMaxTTL > 0 && t.TokenTTL > t.TokenMaxTTL { + return errors.New("'token_ttl' cannot be greater than 'token_max_ttl'") + } + + if tokenNumUses, ok := d.GetOk("token_num_uses"); ok { + t.TokenNumUses = tokenNumUses.(int) + } + if t.TokenNumUses < 0 { + return errors.New("'token_num_uses' cannot be negative") + } + + return nil +} + +// PopulateTokenData adds information from TokenParams into the map +func (t *TokenParams) PopulateTokenData(m map[string]interface{}) { + m["token_bound_cidrs"] = t.TokenBoundCIDRs + m["token_explicit_max_ttl"] = int64(t.TokenExplicitMaxTTL.Seconds()) + m["token_max_ttl"] = int64(t.TokenMaxTTL.Seconds()) + m["token_no_default_policy"] = t.TokenNoDefaultPolicy + m["token_period"] = int64(t.TokenPeriod.Seconds()) + m["token_policies"] = t.TokenPolicies + m["token_type"] = t.TokenType.String() + m["token_ttl"] = int64(t.TokenTTL.Seconds()) + m["token_num_uses"] = t.TokenNumUses + + if len(t.TokenPolicies) == 0 { + m["token_policies"] = []string{} + } + + if len(t.TokenBoundCIDRs) == 0 { + m["token_bound_cidrs"] = []string{} + } +} + +// PopulateTokenAuth populates Auth with parameters +func (t *TokenParams) PopulateTokenAuth(auth *logical.Auth) { + auth.BoundCIDRs = t.TokenBoundCIDRs + auth.ExplicitMaxTTL = t.TokenExplicitMaxTTL + auth.MaxTTL = t.TokenMaxTTL + auth.NoDefaultPolicy = t.TokenNoDefaultPolicy + auth.Period = t.TokenPeriod + auth.Policies = t.TokenPolicies + auth.Renewable = true + auth.TokenType = t.TokenType + auth.TTL = t.TokenTTL + auth.NumUses = t.TokenNumUses +} + +func DeprecationText(param string) string { + return fmt.Sprintf("Use %q instead. If this and %q are both specified, only %q will be used.", param, param, param) +} + +func upgradeDurationValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *time.Duration) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + *oldVal = time.Duration(raw.(int)) * time.Second + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = 0 + } + } + + return nil +} + +func upgradeIntValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *int) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + *oldVal = raw.(int) + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = 0 + } + } + + return nil +} + +func upgradeStringSliceValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *[]string) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + // Special case: if we're looking at "token_policies" parse the policies + if newKey == "token_policies" { + *oldVal = policyutil.ParsePolicies(raw) + } else { + *oldVal = raw.([]string) + } + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = nil + } + } + + return nil +} + +func upgradeSockAddrSliceValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *[]*sockaddr.SockAddrMarshaler) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + boundCIDRs, err := parseutil.ParseAddrs(raw) + if err != nil { + return err + } + *oldVal = boundCIDRs + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = nil + } + } + + return nil +} + +// UpgradeValue takes in old/new data keys and old/new values and calls out to +// a helper function to perform upgrades in a standardized way. It reqiures +// pointers in all cases so that we can set directly into the target struct. +func UpgradeValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal interface{}) error { + switch typedOldVal := oldVal.(type) { + case *time.Duration: + typedNewVal, ok := newVal.(*time.Duration) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeDurationValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + case *int: + typedNewVal, ok := newVal.(*int) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeIntValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + case *[]string: + typedNewVal, ok := newVal.(*[]string) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeStringSliceValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + case *[]*sockaddr.SockAddrMarshaler: + typedNewVal, ok := newVal.(*[]*sockaddr.SockAddrMarshaler) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeSockAddrSliceValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + default: + return errors.New("unhandled type in tokenutil.UpgradeValue") + } +} + +const ( + tokenPeriodHelp = `If set, tokens created via this role +will have no max lifetime; instead, their +renewal period will be fixed to this value. +This takes an integer number of seconds, +or a string duration (e.g. "24h").` + tokenExplicitMaxTTLHelp = `If set, tokens created via this role +carry an explicit maximum TTL. During renewal, +the current maximum TTL values of the role +and the mount are not checked for changes, +and any updates to these values will have +no effect on the token being renewed.` +) diff --git a/sdk/logical/audit.go b/sdk/logical/audit.go new file mode 100644 index 000000000000..8ba70f37e01a --- /dev/null +++ b/sdk/logical/audit.go @@ -0,0 +1,19 @@ +package logical + +type LogInput struct { + Type string + Auth *Auth + Request *Request + Response *Response + OuterErr error + NonHMACReqDataKeys []string + NonHMACRespDataKeys []string +} + +type MarshalOptions struct { + ValueHasher func(string) string +} + +type OptMarshaler interface { + MarshalJSONWithOptions(*MarshalOptions) ([]byte, error) +} diff --git a/sdk/logical/auth.go b/sdk/logical/auth.go index 89aa9165909a..2bfb6e0015a1 100644 --- a/sdk/logical/auth.go +++ b/sdk/logical/auth.go @@ -38,6 +38,11 @@ type Auth struct { // different namespaces indexed by respective namespace identifiers ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies" mapstructure:"external_namespace_policies" structs:"external_namespace_policies"` + // Indicates that the default policy should not be added by core when + // creating a token. The default policy will still be added if it's + // explicitly defined. + NoDefaultPolicy bool `json:"no_default_policy" mapstructure:"no_default_policy" structs:"no_default_policy"` + // Metadata is used to attach arbitrary string-type metadata to // an authenticated user. This metadata will be outputted into the // audit log. diff --git a/sdk/logical/identity.pb.go b/sdk/logical/identity.pb.go index 473da1c66f63..f8844e275f88 100644 --- a/sdk/logical/identity.pb.go +++ b/sdk/logical/identity.pb.go @@ -32,6 +32,8 @@ type Entity struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` + // Disabled is true if the entity is disabled. + Disabled bool `sentinel:"" protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` } func (m *Entity) Reset() { *m = Entity{} } @@ -87,6 +89,13 @@ func (m *Entity) GetMetadata() map[string]string { return nil } +func (m *Entity) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + type Alias struct { // MountType is the backend mount's type to which this identity belongs MountType string `sentinel:"" protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` @@ -165,24 +174,25 @@ func init() { func init() { proto.RegisterFile("sdk/logical/identity.proto", fileDescriptor_4a34d35719c603a1) } var fileDescriptor_4a34d35719c603a1 = []byte{ - // 291 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x91, 0x4f, 0x6b, 0x83, 0x40, - 0x10, 0xc5, 0x51, 0xf3, 0xa7, 0x99, 0x12, 0x29, 0x4b, 0x0f, 0x12, 0x1a, 0x08, 0x81, 0x16, 0x4f, - 0x0a, 0xed, 0x25, 0x6d, 0x4f, 0x29, 0xc9, 0x21, 0x87, 0x5e, 0xa4, 0xa7, 0x5e, 0xca, 0x44, 0x97, - 0xb8, 0x44, 0x5d, 0x71, 0xc7, 0x80, 0x5f, 0xb2, 0xe7, 0x7e, 0x9c, 0x92, 0x75, 0x23, 0x09, 0x3d, - 0xf7, 0x36, 0xfe, 0xde, 0x38, 0xfb, 0xe6, 0x0d, 0x4c, 0x54, 0xb2, 0x0f, 0x33, 0xb9, 0x13, 0x31, - 0x66, 0xa1, 0x48, 0x78, 0x41, 0x82, 0x9a, 0xa0, 0xac, 0x24, 0x49, 0x36, 0x34, 0x7c, 0xfe, 0x6d, - 0xc1, 0x60, 0xad, 0x15, 0xe6, 0x82, 0xbd, 0x59, 0x79, 0xd6, 0xcc, 0xf2, 0x47, 0x91, 0xbd, 0x59, - 0x31, 0x06, 0xbd, 0x02, 0x73, 0xee, 0xd9, 0x9a, 0xe8, 0x9a, 0xf9, 0x30, 0xc4, 0x4c, 0xa0, 0xe2, - 0xca, 0x73, 0x66, 0x8e, 0x7f, 0xfd, 0xe8, 0x06, 0x66, 0x52, 0xb0, 0x3c, 0xf2, 0xe8, 0x24, 0xb3, - 0x67, 0xb8, 0xca, 0x39, 0x61, 0x82, 0x84, 0x5e, 0x4f, 0xb7, 0x4e, 0xbb, 0xd6, 0xf6, 0xc1, 0xe0, - 0xdd, 0xe8, 0xeb, 0x82, 0xaa, 0x26, 0xea, 0xda, 0x27, 0xaf, 0x30, 0xbe, 0x90, 0xd8, 0x0d, 0x38, - 0x7b, 0xde, 0x18, 0x6b, 0xc7, 0x92, 0xdd, 0x42, 0xff, 0x80, 0x59, 0x7d, 0x32, 0xd7, 0x7e, 0xbc, - 0xd8, 0x0b, 0x6b, 0xfe, 0x63, 0x41, 0x5f, 0x5b, 0x61, 0x53, 0x80, 0x5c, 0xd6, 0x05, 0x7d, 0x51, - 0x53, 0x72, 0xf3, 0xf3, 0x48, 0x93, 0x8f, 0xa6, 0xe4, 0xec, 0x1e, 0xdc, 0x56, 0xc6, 0x38, 0xe6, - 0x4a, 0xc9, 0xca, 0xcc, 0x1a, 0x6b, 0xba, 0x34, 0xb0, 0x4b, 0xc1, 0x39, 0x4b, 0x61, 0xf1, 0x67, - 0xb7, 0xbb, 0xcb, 0x18, 0xfe, 0x65, 0xb5, 0x37, 0xff, 0xf3, 0x61, 0x27, 0x28, 0xad, 0xb7, 0x41, - 0x2c, 0xf3, 0x30, 0x45, 0x95, 0x8a, 0x58, 0x56, 0x65, 0x78, 0xc0, 0x3a, 0xa3, 0xf0, 0xec, 0xda, - 0xdb, 0x81, 0xbe, 0xf2, 0xd3, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x36, 0xa9, 0x44, 0x63, 0x03, - 0x02, 0x00, 0x00, + // 310 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xbf, 0x6a, 0xc3, 0x30, + 0x10, 0xc6, 0x91, 0x9d, 0xbf, 0x57, 0x12, 0x8a, 0xe8, 0x60, 0x42, 0x03, 0x21, 0xd0, 0xe2, 0xc9, + 0x86, 0x76, 0x49, 0xdb, 0x29, 0x25, 0x19, 0x32, 0x74, 0x31, 0x9d, 0xba, 0x94, 0x8b, 0x2d, 0x62, + 0x11, 0xd9, 0x32, 0x96, 0x1c, 0xf0, 0x9b, 0xf6, 0x19, 0xfa, 0x14, 0x25, 0xb2, 0x62, 0x12, 0x4a, + 0xa7, 0x6e, 0xba, 0xdf, 0x77, 0x77, 0xba, 0xfb, 0x38, 0x98, 0xa8, 0x64, 0x1f, 0x0a, 0xb9, 0xe3, + 0x31, 0x8a, 0x90, 0x27, 0x2c, 0xd7, 0x5c, 0xd7, 0x41, 0x51, 0x4a, 0x2d, 0x69, 0xdf, 0xf2, 0xf9, + 0x37, 0x81, 0xde, 0xda, 0x28, 0x74, 0x0c, 0xce, 0x66, 0xe5, 0x91, 0x19, 0xf1, 0x87, 0x91, 0xb3, + 0x59, 0x51, 0x0a, 0x9d, 0x1c, 0x33, 0xe6, 0x39, 0x86, 0x98, 0x37, 0xf5, 0xa1, 0x8f, 0x82, 0xa3, + 0x62, 0xca, 0x73, 0x67, 0xae, 0x7f, 0xf5, 0x30, 0x0e, 0x6c, 0xa7, 0x60, 0x79, 0xe4, 0xd1, 0x49, + 0xa6, 0x4f, 0x30, 0xc8, 0x98, 0xc6, 0x04, 0x35, 0x7a, 0x1d, 0x93, 0x3a, 0x6d, 0x53, 0x9b, 0x0f, + 0x83, 0x37, 0xab, 0xaf, 0x73, 0x5d, 0xd6, 0x51, 0x9b, 0x4e, 0x27, 0x30, 0x48, 0xb8, 0xc2, 0xad, + 0x60, 0x89, 0xd7, 0x9d, 0x11, 0x7f, 0x10, 0xb5, 0xf1, 0xe4, 0x05, 0x46, 0x17, 0x65, 0xf4, 0x1a, + 0xdc, 0x3d, 0xab, 0xed, 0xd8, 0xc7, 0x27, 0xbd, 0x81, 0xee, 0x01, 0x45, 0x75, 0x1a, 0xbc, 0x09, + 0x9e, 0x9d, 0x05, 0x99, 0x7f, 0x11, 0xe8, 0x9a, 0x31, 0xe9, 0x14, 0x20, 0x93, 0x55, 0xae, 0x3f, + 0x75, 0x5d, 0x30, 0x5b, 0x3c, 0x34, 0xe4, 0xbd, 0x2e, 0x18, 0xbd, 0x83, 0x71, 0x23, 0x63, 0x1c, + 0x33, 0xa5, 0x64, 0x69, 0x7b, 0x8d, 0x0c, 0x5d, 0x5a, 0xd8, 0x3a, 0xe4, 0x9e, 0x39, 0xb4, 0xf8, + 0xb5, 0xf7, 0xed, 0xa5, 0x45, 0x7f, 0xad, 0xfd, 0xaf, 0xd5, 0x5e, 0xfd, 0x8f, 0xfb, 0x1d, 0xd7, + 0x69, 0xb5, 0x0d, 0x62, 0x99, 0x85, 0x29, 0xaa, 0x94, 0xc7, 0xb2, 0x2c, 0xc2, 0x03, 0x56, 0x42, + 0x87, 0x67, 0x97, 0xb0, 0xed, 0x99, 0x0b, 0x78, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0xfa, 0xa9, + 0x8a, 0x39, 0x1f, 0x02, 0x00, 0x00, } diff --git a/sdk/logical/identity.proto b/sdk/logical/identity.proto index 6e060538dfc9..65e27435f08d 100644 --- a/sdk/logical/identity.proto +++ b/sdk/logical/identity.proto @@ -16,6 +16,9 @@ message Entity { // Metadata represents the custom data tied to this entity map metadata = 4; + + // Disabled is true if the entity is disabled. + bool disabled = 5; } message Alias { diff --git a/sdk/logical/logical.go b/sdk/logical/logical.go index a3456e9671ae..cc3f6ae47533 100644 --- a/sdk/logical/logical.go +++ b/sdk/logical/logical.go @@ -124,3 +124,8 @@ type Paths struct { // unless it ends with '/' in which case it will be treated as a prefix. SealWrapStorage []string } + +type Auditor interface { + AuditRequest(ctx context.Context, input *LogInput) error + AuditResponse(ctx context.Context, input *LogInput) error +} diff --git a/sdk/logical/request.go b/sdk/logical/request.go index 87326e236b75..e030d7ac28d5 100644 --- a/sdk/logical/request.go +++ b/sdk/logical/request.go @@ -2,6 +2,7 @@ package logical import ( "fmt" + "io" "strings" "time" ) @@ -171,6 +172,14 @@ type Request struct { // ClientTokenSource tells us where the client token was sourced from, so // we can delete it before sending off to plugins ClientTokenSource ClientTokenSource + + // RequestReader if set can be used to read the full request body from the + // http request that generated this logical.Request object. + RequestReader io.ReadCloser `json:"-" sentinel:""` + + // ResponseWriter if set can be used to stream a response value to the http + // request that generated this logical.Request object. + ResponseWriter *HTTPResponseWriter `json:"-" sentinel:""` } // Get returns a data field and guards for nil Data diff --git a/sdk/logical/response.go b/sdk/logical/response.go index cb6e08b40f76..7f8dbab851ba 100644 --- a/sdk/logical/response.go +++ b/sdk/logical/response.go @@ -4,6 +4,8 @@ import ( "encoding/json" "errors" "fmt" + "io" + "sync/atomic" "github.com/hashicorp/vault/sdk/helper/wrapping" ) @@ -177,3 +179,31 @@ func RespondWithStatusCode(resp *Response, req *Request, code int) (*Response, e return ret, nil } + +// HTTPResponseWriter is optionally added to a request object and can be used to +// write directly to the HTTP response writter. +type HTTPResponseWriter struct { + writer io.Writer + written *uint32 +} + +// NewHTTPResponseWriter creates a new HTTPRepoinseWriter object that wraps the +// provided io.Writer. +func NewHTTPResponseWriter(w io.Writer) *HTTPResponseWriter { + return &HTTPResponseWriter{ + writer: w, + written: new(uint32), + } +} + +// Write will write the bytes to the underlying io.Writer. +func (rw *HTTPResponseWriter) Write(bytes []byte) (int, error) { + atomic.StoreUint32(rw.written, 1) + + return rw.writer.Write(bytes) +} + +// Written tells us if the writer has been written to yet. +func (rw *HTTPResponseWriter) Written() bool { + return atomic.LoadUint32(rw.written) == 1 +} diff --git a/sdk/logical/storage.go b/sdk/logical/storage.go index 687788270836..477d65a0936f 100644 --- a/sdk/logical/storage.go +++ b/sdk/logical/storage.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/jsonutil" ) @@ -108,21 +109,46 @@ func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix strin // ClearView is used to delete all the keys in a view func ClearView(ctx context.Context, view ClearableView) error { + return ClearViewWithLogging(ctx, view, nil) +} + +func ClearViewWithLogging(ctx context.Context, view ClearableView, logger hclog.Logger) error { if view == nil { return nil } + if logger == nil { + logger = hclog.NewNullLogger() + } + // Collect all the keys keys, err := CollectKeys(ctx, view) if err != nil { return err } + logger.Debug("clearing view", "total_keys", len(keys)) + // Delete all the keys - for _, key := range keys { + var pctDone int + for idx, key := range keys { + // Rather than keep trying to do stuff with a canceled context, bail; + // storage will fail anyways + if ctx.Err() != nil { + return ctx.Err() + } if err := view.Delete(ctx, key); err != nil { return err } + + newPctDone := idx * 100.0 / len(keys) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("view deletion progress", "percent", pctDone, "keys_deleted", idx) + } } + + logger.Debug("view cleared") + return nil } diff --git a/sdk/logical/system_view.go b/sdk/logical/system_view.go index 82f995176b0c..52fc2bd6aced 100644 --- a/sdk/logical/system_view.go +++ b/sdk/logical/system_view.go @@ -70,6 +70,11 @@ type SystemView interface { PluginEnv(context.Context) (*PluginEnvironment, error) } +type ExtendedSystemView interface { + Auditor() Auditor + ForwardGenericRequest(context.Context, *Request) (*Response, error) +} + type StaticSystemView struct { DefaultLeaseTTLVal time.Duration MaxLeaseTTLVal time.Duration @@ -86,6 +91,24 @@ type StaticSystemView struct { PluginEnvironment *PluginEnvironment } +type noopAuditor struct{} + +func (a noopAuditor) AuditRequest(ctx context.Context, input *LogInput) error { + return nil +} + +func (a noopAuditor) AuditResponse(ctx context.Context, input *LogInput) error { + return nil +} + +func (d StaticSystemView) Auditor() Auditor { + return noopAuditor{} +} + +func (d StaticSystemView) ForwardGenericRequest(ctx context.Context, req *Request) (*Response, error) { + return nil, errors.New("ForwardGenericRequest is not implemented in StaticSystemView") +} + func (d StaticSystemView) DefaultLeaseTTL() time.Duration { return d.DefaultLeaseTTLVal } diff --git a/sdk/logical/testing.go b/sdk/logical/testing.go index c5a7d9ce348b..da435c90a173 100644 --- a/sdk/logical/testing.go +++ b/sdk/logical/testing.go @@ -14,10 +14,11 @@ import ( // TestRequest is a helper to create a purely in-memory Request struct. func TestRequest(t testing.T, op Operation, path string) *Request { return &Request{ - Operation: op, - Path: path, - Data: make(map[string]interface{}), - Storage: new(InmemStorage), + Operation: op, + Path: path, + Data: make(map[string]interface{}), + Storage: new(InmemStorage), + Connection: &Connection{}, } } diff --git a/sdk/physical/cache.go b/sdk/physical/cache.go index b79aeee9effb..619b10a6e924 100644 --- a/sdk/physical/cache.go +++ b/sdk/physical/cache.go @@ -23,6 +23,7 @@ var cacheExceptionsPaths = []string{ "index-dr/pages/", "sys/expire/", "core/poison-pill", + "core/raft/tls", } // Cache is used to wrap an underlying physical backend diff --git a/sdk/physical/error.go b/sdk/physical/error.go index d4c6f80e110c..8091f178bc87 100644 --- a/sdk/physical/error.go +++ b/sdk/physical/error.go @@ -4,6 +4,7 @@ import ( "context" "errors" "math/rand" + "sync" "time" log "github.com/hashicorp/go-hclog" @@ -18,6 +19,7 @@ const ( type ErrorInjector struct { backend Backend errorPercent int + randomLock *sync.Mutex random *rand.Rand } @@ -42,6 +44,7 @@ func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInje return &ErrorInjector{ backend: b, errorPercent: errorPercent, + randomLock: new(sync.Mutex), random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), } } @@ -59,7 +62,9 @@ func (e *ErrorInjector) SetErrorPercentage(p int) { } func (e *ErrorInjector) addError() error { + e.randomLock.Lock() roll := e.random.Intn(100) + e.randomLock.Unlock() if roll < e.errorPercent { return errors.New("random error") } diff --git a/sdk/physical/latency.go b/sdk/physical/latency.go index 930492204999..51bb560c2f76 100644 --- a/sdk/physical/latency.go +++ b/sdk/physical/latency.go @@ -3,6 +3,7 @@ package physical import ( "context" "math/rand" + "sync" "time" log "github.com/hashicorp/go-hclog" @@ -19,6 +20,7 @@ type LatencyInjector struct { backend Backend latency time.Duration jitterPercent int + randomLock *sync.Mutex random *rand.Rand } @@ -45,6 +47,7 @@ func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log backend: b, latency: latency, jitterPercent: jitter, + randomLock: new(sync.Mutex), random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), } } @@ -68,7 +71,9 @@ func (l *LatencyInjector) addLatency() { if l.jitterPercent > 0 { min := 100 - l.jitterPercent max := 100 + l.jitterPercent + l.randomLock.Lock() percent = l.random.Intn(max-min) + min + l.randomLock.Unlock() } latencyDuration := time.Duration(int(l.latency) * percent / 100) time.Sleep(latencyDuration) diff --git a/sdk/plugin/grpc_system_test.go b/sdk/plugin/grpc_system_test.go index 1bbd824783eb..e53a51752742 100644 --- a/sdk/plugin/grpc_system_test.go +++ b/sdk/plugin/grpc_system_test.go @@ -8,7 +8,7 @@ import ( "reflect" - "github.com/gogo/protobuf/proto" + "github.com/golang/protobuf/proto" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" @@ -184,6 +184,7 @@ func TestSystem_GRPC_entityInfo(t *testing.T) { }, }, }, + Disabled: true, } client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ diff --git a/sdk/plugin/pb/backend.pb.go b/sdk/plugin/pb/backend.pb.go index f2ab2b6b865f..bfa10aaf699e 100644 --- a/sdk/plugin/pb/backend.pb.go +++ b/sdk/plugin/pb/backend.pb.go @@ -528,7 +528,9 @@ type Auth struct { // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens. ExplicitMaxTTL int64 `sentinel:"" protobuf:"varint,16,opt,name=explicit_max_ttl,json=explicitMaxTtl,proto3" json:"explicit_max_ttl,omitempty"` // TokenType is the type of token being requested - TokenType uint32 `sentinel:"" protobuf:"varint,17,opt,name=token_type,json=tokenType,proto3" json:"token_type,omitempty"` + TokenType uint32 `sentinel:"" protobuf:"varint,17,opt,name=token_type,json=tokenType,proto3" json:"token_type,omitempty"` + // Whether the default policy should be added automatically by core + NoDefaultPolicy bool `sentinel:"" protobuf:"varint,18,opt,name=no_default_policy,json=noDefaultPolicy,proto3" json:"no_default_policy,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -678,6 +680,13 @@ func (m *Auth) GetTokenType() uint32 { return 0 } +func (m *Auth) GetNoDefaultPolicy() bool { + if m != nil { + return m.NoDefaultPolicy + } + return false +} + type TokenEntry struct { ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Accessor string `sentinel:"" protobuf:"bytes,2,opt,name=accessor,proto3" json:"accessor,omitempty"` @@ -2715,164 +2724,165 @@ func init() { func init() { proto.RegisterFile("sdk/plugin/pb/backend.proto", fileDescriptor_4dbf1dfe0c11846b) } var fileDescriptor_4dbf1dfe0c11846b = []byte{ - // 2499 bytes of a gzipped FileDescriptorProto + // 2519 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0xdb, 0x72, 0x1b, 0xc7, 0xd1, 0x2e, 0x00, 0xc4, 0xa9, 0x71, 0x22, 0x46, 0xb4, 0xfe, 0x15, 0x24, 0xff, 0x82, 0xd7, 0x91, - 0x0c, 0x2b, 0x36, 0x68, 0x51, 0x71, 0x2c, 0x27, 0x65, 0xa7, 0x68, 0x8a, 0x96, 0x19, 0x93, 0x36, - 0x6b, 0x09, 0xc5, 0x39, 0x55, 0xc1, 0x83, 0xdd, 0x21, 0xb8, 0xc5, 0xc5, 0xee, 0x66, 0x76, 0x96, - 0x22, 0xae, 0xf2, 0x16, 0x79, 0x8d, 0xdc, 0xa6, 0x72, 0x93, 0xbb, 0x94, 0x2b, 0xf7, 0x79, 0x8d, - 0x3c, 0x43, 0x6a, 0x7a, 0x66, 0x4f, 0x00, 0x68, 0xc9, 0x55, 0xce, 0xdd, 0x4c, 0x77, 0xcf, 0xa9, - 0xe7, 0xeb, 0xaf, 0x7b, 0x76, 0xe1, 0x6e, 0xe4, 0x5c, 0xee, 0x86, 0x5e, 0x3c, 0x77, 0xfd, 0xdd, - 0x70, 0xb6, 0x3b, 0xa3, 0xf6, 0x25, 0xf3, 0x9d, 0x71, 0xc8, 0x03, 0x11, 0x90, 0x72, 0x38, 0x1b, - 0xdc, 0x9f, 0x07, 0xc1, 0xdc, 0x63, 0xbb, 0x28, 0x99, 0xc5, 0xe7, 0xbb, 0xc2, 0x5d, 0xb0, 0x48, - 0xd0, 0x45, 0xa8, 0x8c, 0x06, 0x03, 0x39, 0x83, 0x17, 0xcc, 0x5d, 0x9b, 0x7a, 0xbb, 0xae, 0xc3, - 0x7c, 0xe1, 0x8a, 0xa5, 0xd6, 0x19, 0x79, 0x9d, 0x5a, 0x45, 0x69, 0xcc, 0x3a, 0x54, 0x0f, 0x17, - 0xa1, 0x58, 0x9a, 0x43, 0xa8, 0x7d, 0xc1, 0xa8, 0xc3, 0x38, 0xb9, 0x0d, 0xb5, 0x0b, 0x6c, 0x19, - 0xa5, 0x61, 0x65, 0xd4, 0xb4, 0x74, 0xcf, 0xfc, 0x03, 0xc0, 0xa9, 0x1c, 0x73, 0xc8, 0x79, 0xc0, - 0xc9, 0x1d, 0x68, 0x30, 0xce, 0xa7, 0x62, 0x19, 0x32, 0xa3, 0x34, 0x2c, 0x8d, 0x3a, 0x56, 0x9d, - 0x71, 0x3e, 0x59, 0x86, 0x8c, 0xfc, 0x1f, 0xc8, 0xe6, 0x74, 0x11, 0xcd, 0x8d, 0xf2, 0xb0, 0x24, - 0x67, 0x60, 0x9c, 0x9f, 0x44, 0xf3, 0x64, 0x8c, 0x1d, 0x38, 0xcc, 0xa8, 0x0c, 0x4b, 0xa3, 0x0a, - 0x8e, 0x39, 0x08, 0x1c, 0x66, 0xfe, 0xa5, 0x04, 0xd5, 0x53, 0x2a, 0x2e, 0x22, 0x42, 0x60, 0x8b, - 0x07, 0x81, 0xd0, 0x8b, 0x63, 0x9b, 0x8c, 0xa0, 0x17, 0xfb, 0x34, 0x16, 0x17, 0xf2, 0x54, 0x36, - 0x15, 0xcc, 0x31, 0xca, 0xa8, 0x5e, 0x15, 0x93, 0xb7, 0xa1, 0xe3, 0x05, 0x36, 0xf5, 0xa6, 0x91, - 0x08, 0x38, 0x9d, 0xcb, 0x75, 0xa4, 0x5d, 0x1b, 0x85, 0x67, 0x4a, 0x46, 0x1e, 0x41, 0x3f, 0x62, - 0xd4, 0x9b, 0xbe, 0xe4, 0x34, 0x4c, 0x0d, 0xb7, 0xd4, 0x84, 0x52, 0xf1, 0x0d, 0xa7, 0xa1, 0xb6, - 0x35, 0xff, 0x51, 0x83, 0xba, 0xc5, 0xfe, 0x14, 0xb3, 0x48, 0x90, 0x2e, 0x94, 0x5d, 0x07, 0x4f, - 0xdb, 0xb4, 0xca, 0xae, 0x43, 0xc6, 0x40, 0x2c, 0x16, 0x7a, 0x72, 0x69, 0x37, 0xf0, 0x0f, 0xbc, - 0x38, 0x12, 0x8c, 0xeb, 0x33, 0x6f, 0xd0, 0x90, 0x7b, 0xd0, 0x0c, 0x42, 0xc6, 0x51, 0x86, 0x0e, - 0x68, 0x5a, 0x99, 0x40, 0x1e, 0x3c, 0xa4, 0xe2, 0xc2, 0xd8, 0x42, 0x05, 0xb6, 0xa5, 0xcc, 0xa1, - 0x82, 0x1a, 0x55, 0x25, 0x93, 0x6d, 0x62, 0x42, 0x2d, 0x62, 0x36, 0x67, 0xc2, 0xa8, 0x0d, 0x4b, - 0xa3, 0xd6, 0x1e, 0x8c, 0xc3, 0xd9, 0xf8, 0x0c, 0x25, 0x96, 0xd6, 0x90, 0x7b, 0xb0, 0x25, 0xfd, - 0x62, 0xd4, 0xd1, 0xa2, 0x21, 0x2d, 0xf6, 0x63, 0x71, 0x61, 0xa1, 0x94, 0xec, 0x41, 0x5d, 0xdd, - 0x69, 0x64, 0x34, 0x86, 0x95, 0x51, 0x6b, 0xcf, 0x90, 0x06, 0xfa, 0x94, 0x63, 0x05, 0x83, 0xe8, - 0xd0, 0x17, 0x7c, 0x69, 0x25, 0x86, 0xe4, 0x2d, 0x68, 0xdb, 0x9e, 0xcb, 0x7c, 0x31, 0x15, 0xc1, - 0x25, 0xf3, 0x8d, 0x26, 0xee, 0xa8, 0xa5, 0x64, 0x13, 0x29, 0x22, 0x7b, 0xf0, 0x46, 0xde, 0x64, - 0x4a, 0x6d, 0x9b, 0x45, 0x51, 0xc0, 0x0d, 0x40, 0xdb, 0x5b, 0x39, 0xdb, 0x7d, 0xad, 0x92, 0xd3, - 0x3a, 0x6e, 0x14, 0x7a, 0x74, 0x39, 0xf5, 0xe9, 0x82, 0x19, 0x2d, 0x35, 0xad, 0x96, 0x7d, 0x45, - 0x17, 0x8c, 0xdc, 0x87, 0xd6, 0x22, 0x88, 0x7d, 0x31, 0x0d, 0x03, 0xd7, 0x17, 0x46, 0x1b, 0x2d, - 0x00, 0x45, 0xa7, 0x52, 0x42, 0xde, 0x04, 0xd5, 0x53, 0x60, 0xec, 0x28, 0xbf, 0xa2, 0x04, 0xe1, - 0xf8, 0x00, 0xba, 0x4a, 0x9d, 0xee, 0xa7, 0x8b, 0x26, 0x1d, 0x94, 0xa6, 0x3b, 0xf9, 0x00, 0x9a, - 0x88, 0x07, 0xd7, 0x3f, 0x0f, 0x8c, 0x1e, 0xfa, 0xed, 0x56, 0xce, 0x2d, 0x12, 0x13, 0x47, 0xfe, - 0x79, 0x60, 0x35, 0x5e, 0xea, 0x16, 0xf9, 0x04, 0xee, 0x16, 0xce, 0xcb, 0xd9, 0x82, 0xba, 0xbe, - 0xeb, 0xcf, 0xa7, 0x71, 0xc4, 0x22, 0x63, 0x1b, 0x11, 0x6e, 0xe4, 0x4e, 0x6d, 0x25, 0x06, 0x2f, - 0x22, 0x16, 0x91, 0xbb, 0xd0, 0x54, 0x41, 0x3a, 0x75, 0x1d, 0xa3, 0x8f, 0x5b, 0x6a, 0x28, 0xc1, - 0x91, 0x43, 0xde, 0x81, 0x5e, 0x18, 0x78, 0xae, 0xbd, 0x9c, 0x06, 0x57, 0x8c, 0x73, 0xd7, 0x61, - 0x06, 0x19, 0x96, 0x46, 0x0d, 0xab, 0xab, 0xc4, 0x5f, 0x6b, 0xe9, 0xa6, 0xd0, 0xb8, 0x85, 0x86, - 0x6b, 0xa1, 0x31, 0x06, 0xb0, 0x03, 0xdf, 0x67, 0x36, 0xc2, 0x6f, 0x07, 0x4f, 0xd8, 0x95, 0x27, - 0x3c, 0x48, 0xa5, 0x56, 0xce, 0x62, 0xf0, 0x39, 0xb4, 0xf3, 0x50, 0x20, 0xdb, 0x50, 0xb9, 0x64, - 0x4b, 0x0d, 0x7f, 0xd9, 0x24, 0x43, 0xa8, 0x5e, 0x51, 0x2f, 0x66, 0x08, 0x79, 0x0d, 0x44, 0x35, - 0xc4, 0x52, 0x8a, 0x5f, 0x94, 0x9f, 0x96, 0xcc, 0xbf, 0x57, 0x61, 0x4b, 0x82, 0x8f, 0x7c, 0x08, - 0x1d, 0x8f, 0xd1, 0x88, 0x4d, 0x83, 0x50, 0x2e, 0x10, 0xe1, 0x54, 0xad, 0xbd, 0x6d, 0x39, 0xec, - 0x58, 0x2a, 0xbe, 0x56, 0x72, 0xab, 0xed, 0xe5, 0x7a, 0x32, 0xa4, 0x5d, 0x5f, 0x30, 0xee, 0x53, - 0x6f, 0x8a, 0xc1, 0xa0, 0x02, 0xac, 0x9d, 0x08, 0x9f, 0xc9, 0xa0, 0x58, 0xc5, 0x51, 0x65, 0x1d, - 0x47, 0x03, 0x68, 0xa0, 0xef, 0x5c, 0x16, 0xe9, 0x60, 0x4f, 0xfb, 0x64, 0x0f, 0x1a, 0x0b, 0x26, - 0xa8, 0x8e, 0x35, 0x19, 0x12, 0xb7, 0x93, 0x98, 0x19, 0x9f, 0x68, 0x85, 0x0a, 0x88, 0xd4, 0x6e, - 0x2d, 0x22, 0x6a, 0xeb, 0x11, 0x31, 0x80, 0x46, 0x0a, 0xba, 0xba, 0xba, 0xe1, 0xa4, 0x2f, 0x69, - 0x36, 0x64, 0xdc, 0x0d, 0x1c, 0xa3, 0x81, 0x40, 0xd1, 0x3d, 0x49, 0x92, 0x7e, 0xbc, 0x50, 0x10, - 0x6a, 0x2a, 0x92, 0xf4, 0xe3, 0xc5, 0x3a, 0x62, 0x60, 0x05, 0x31, 0x3f, 0x81, 0x2a, 0xf5, 0x5c, - 0x1a, 0x61, 0x08, 0xc9, 0x9b, 0xd5, 0x7c, 0x3f, 0xde, 0x97, 0x52, 0x4b, 0x29, 0xc9, 0x13, 0xe8, - 0xcc, 0x79, 0x10, 0x87, 0x53, 0xec, 0xb2, 0xc8, 0x68, 0xe3, 0x69, 0x57, 0xad, 0xdb, 0x68, 0xb4, - 0xaf, 0x6c, 0x64, 0x04, 0xce, 0x82, 0xd8, 0x77, 0xa6, 0xb6, 0xeb, 0xf0, 0xc8, 0xe8, 0xa0, 0xf3, - 0x00, 0x45, 0x07, 0x52, 0x22, 0x43, 0x4c, 0x85, 0x40, 0xea, 0xe0, 0x2e, 0xda, 0x74, 0x50, 0x7a, - 0x9a, 0x78, 0xf9, 0xa7, 0xd0, 0x4f, 0x12, 0x53, 0x66, 0xd9, 0x43, 0xcb, 0xed, 0x44, 0x91, 0x1a, - 0x8f, 0x60, 0x9b, 0x5d, 0x4b, 0x0a, 0x75, 0xc5, 0x74, 0x41, 0xaf, 0xa7, 0x42, 0x78, 0x3a, 0xa4, - 0xba, 0x89, 0xfc, 0x84, 0x5e, 0x4f, 0x84, 0x27, 0xe3, 0x5f, 0xad, 0x8e, 0xf1, 0xdf, 0xc7, 0x64, - 0xd4, 0x44, 0x89, 0x8c, 0xff, 0xc1, 0x2f, 0xa1, 0x53, 0xb8, 0xc2, 0x0d, 0x40, 0xde, 0xc9, 0x03, - 0xb9, 0x99, 0x07, 0xef, 0xbf, 0xb6, 0x00, 0xf0, 0x2e, 0xd5, 0xd0, 0xd5, 0x0c, 0x90, 0xbf, 0xe0, - 0xf2, 0x86, 0x0b, 0xa6, 0x9c, 0xf9, 0x42, 0x83, 0x51, 0xf7, 0xbe, 0x17, 0x87, 0x49, 0x0e, 0xa8, - 0xe6, 0x72, 0xc0, 0x7b, 0xb0, 0x25, 0x31, 0x67, 0xd4, 0x32, 0xaa, 0xce, 0x76, 0x84, 0xe8, 0x54, - 0xc8, 0x44, 0xab, 0xb5, 0x40, 0xa8, 0xaf, 0x07, 0x42, 0x1e, 0x61, 0x8d, 0x22, 0xc2, 0xde, 0x86, - 0x8e, 0xcd, 0x19, 0xe6, 0xa3, 0xa9, 0x2c, 0x30, 0x34, 0x02, 0xdb, 0x89, 0x70, 0xe2, 0x2e, 0x98, - 0xf4, 0x9f, 0xbc, 0x0c, 0x40, 0x95, 0x6c, 0x6e, 0xbc, 0xab, 0xd6, 0xc6, 0xbb, 0xc2, 0xec, 0xee, - 0x31, 0xcd, 0xe2, 0xd8, 0xce, 0x45, 0x42, 0xa7, 0x10, 0x09, 0x05, 0xb8, 0x77, 0x57, 0xe0, 0xbe, - 0x82, 0xc9, 0xde, 0x1a, 0x26, 0xdf, 0x82, 0xb6, 0x74, 0x40, 0x14, 0x52, 0x9b, 0xc9, 0x09, 0xb6, - 0x95, 0x23, 0x52, 0xd9, 0x91, 0x83, 0x11, 0x1c, 0xcf, 0x66, 0xcb, 0x8b, 0xc0, 0x63, 0x19, 0x09, - 0xb7, 0x52, 0xd9, 0x91, 0x23, 0xf7, 0x8b, 0xa8, 0x22, 0x88, 0x2a, 0x6c, 0x0f, 0x3e, 0x82, 0x66, - 0xea, 0xf5, 0x1f, 0x04, 0xa6, 0xbf, 0x96, 0xa0, 0x9d, 0x27, 0x3a, 0x39, 0x78, 0x32, 0x39, 0xc6, - 0xc1, 0x15, 0x4b, 0x36, 0x65, 0x89, 0xc0, 0x99, 0xcf, 0x5e, 0xd2, 0x99, 0xa7, 0x26, 0x68, 0x58, - 0x99, 0x40, 0x6a, 0x5d, 0xdf, 0xe6, 0x6c, 0x91, 0xa0, 0xaa, 0x62, 0x65, 0x02, 0xf2, 0x31, 0x80, - 0x1b, 0x45, 0x31, 0x53, 0x37, 0xb7, 0x85, 0x34, 0x30, 0x18, 0xab, 0xba, 0x71, 0x9c, 0xd4, 0x8d, - 0xe3, 0x49, 0x52, 0x37, 0x5a, 0x4d, 0xb4, 0xc6, 0x2b, 0xbd, 0x0d, 0x35, 0x79, 0x41, 0x93, 0x63, - 0x44, 0x5e, 0xc5, 0xd2, 0x3d, 0xf3, 0xcf, 0x50, 0x53, 0x95, 0xc5, 0xff, 0x94, 0xbc, 0xef, 0x40, - 0x43, 0xcd, 0xed, 0x3a, 0x3a, 0x56, 0xea, 0xd8, 0x3f, 0x72, 0xcc, 0xef, 0xca, 0xd0, 0xb0, 0x58, - 0x14, 0x06, 0x7e, 0xc4, 0x72, 0x95, 0x4f, 0xe9, 0x95, 0x95, 0x4f, 0x79, 0x63, 0xe5, 0x93, 0xd4, - 0x53, 0x95, 0x5c, 0x3d, 0x35, 0x80, 0x06, 0x67, 0x8e, 0xcb, 0x99, 0x2d, 0x74, 0xed, 0x95, 0xf6, - 0xa5, 0xee, 0x25, 0xe5, 0x32, 0x65, 0x47, 0x98, 0x17, 0x9a, 0x56, 0xda, 0x27, 0x8f, 0xf3, 0x05, - 0x83, 0x2a, 0xc5, 0x76, 0x54, 0xc1, 0xa0, 0xb6, 0xbb, 0xa1, 0x62, 0x78, 0x92, 0x15, 0x5e, 0x75, - 0x8c, 0xe6, 0x3b, 0xf9, 0x01, 0x9b, 0x2b, 0xaf, 0x1f, 0x2d, 0x0f, 0x7f, 0x57, 0x86, 0xed, 0xd5, - 0xbd, 0x6d, 0x40, 0xe0, 0x0e, 0x54, 0x55, 0x3e, 0xd3, 0xf0, 0x15, 0x6b, 0x99, 0xac, 0xb2, 0x42, - 0x74, 0xbf, 0x5a, 0x25, 0x8d, 0x57, 0x43, 0xaf, 0x48, 0x28, 0xef, 0xc2, 0xb6, 0x74, 0x51, 0xc8, - 0x9c, 0xac, 0x46, 0x53, 0x0c, 0xd8, 0xd3, 0xf2, 0xb4, 0x4a, 0x7b, 0x04, 0xfd, 0xc4, 0x34, 0xe3, - 0x86, 0x5a, 0xc1, 0xf6, 0x30, 0xa1, 0x88, 0xdb, 0x50, 0x3b, 0x0f, 0xf8, 0x82, 0x0a, 0x4d, 0x82, - 0xba, 0x57, 0x20, 0x39, 0x64, 0xdb, 0x86, 0xc2, 0x64, 0x22, 0x94, 0xef, 0x10, 0x49, 0x3e, 0xe9, - 0x1b, 0x01, 0x59, 0xb0, 0x61, 0x35, 0x92, 0xb7, 0x81, 0xf9, 0x5b, 0xe8, 0xad, 0x94, 0x85, 0x1b, - 0x1c, 0x99, 0x2d, 0x5f, 0x2e, 0x2c, 0x5f, 0x98, 0xb9, 0xb2, 0x32, 0xf3, 0xef, 0xa0, 0xff, 0x05, - 0xf5, 0x1d, 0x8f, 0xe9, 0xf9, 0xf7, 0xf9, 0x3c, 0x92, 0x09, 0x4e, 0xbf, 0x52, 0xa6, 0x3a, 0xfb, - 0x74, 0xac, 0xa6, 0x96, 0x1c, 0x39, 0xe4, 0x01, 0xd4, 0xb9, 0xb2, 0xd6, 0x00, 0x68, 0xe5, 0xea, - 0x56, 0x2b, 0xd1, 0x99, 0xdf, 0x02, 0x29, 0x4c, 0x2d, 0x1f, 0x28, 0x4b, 0x32, 0x92, 0xe8, 0x57, - 0xa0, 0xd0, 0x51, 0xd5, 0xce, 0x63, 0xd2, 0x4a, 0xb5, 0x64, 0x08, 0x15, 0xc6, 0xb9, 0x5e, 0x02, - 0x0b, 0xc7, 0xec, 0x39, 0x68, 0x49, 0x95, 0xf9, 0x33, 0xe8, 0x9f, 0x85, 0xcc, 0x76, 0xa9, 0x87, - 0x4f, 0x39, 0xb5, 0xc0, 0x7d, 0xa8, 0x4a, 0x27, 0x27, 0x84, 0xd1, 0xc4, 0x81, 0xa8, 0x56, 0x72, - 0xf3, 0x5b, 0x30, 0xd4, 0xbe, 0x0e, 0xaf, 0xdd, 0x48, 0x30, 0xdf, 0x66, 0x07, 0x17, 0xcc, 0xbe, - 0xfc, 0x11, 0x4f, 0x7e, 0x05, 0x77, 0x36, 0xad, 0x90, 0xec, 0xaf, 0x65, 0xcb, 0xde, 0xf4, 0x5c, - 0xe6, 0x0e, 0x5c, 0xa3, 0x61, 0x01, 0x8a, 0x3e, 0x97, 0x12, 0x79, 0x8f, 0x4c, 0x8e, 0x8b, 0x34, - 0x1f, 0xeb, 0x5e, 0xe2, 0x8f, 0xca, 0xcd, 0xfe, 0xf8, 0x5b, 0x09, 0x9a, 0x67, 0x4c, 0xc4, 0x21, - 0x9e, 0xe5, 0x2e, 0x34, 0x67, 0x3c, 0xb8, 0x64, 0x3c, 0x3b, 0x4a, 0x43, 0x09, 0x8e, 0x1c, 0xf2, - 0x18, 0x6a, 0x07, 0x81, 0x7f, 0xee, 0xce, 0xf1, 0x61, 0xab, 0x89, 0x21, 0x1d, 0x3b, 0x56, 0x3a, - 0x45, 0x0c, 0xda, 0x90, 0x0c, 0xa1, 0xa5, 0x3f, 0x13, 0xbc, 0x78, 0x71, 0xf4, 0x2c, 0xa9, 0x78, - 0x73, 0xa2, 0xc1, 0xc7, 0xd0, 0xca, 0x0d, 0xfc, 0x41, 0xa9, 0xea, 0xff, 0x01, 0x70, 0x75, 0xe5, - 0xa3, 0x6d, 0x75, 0x54, 0x3d, 0x52, 0x1e, 0xed, 0x3e, 0x34, 0x65, 0x71, 0xa5, 0xd4, 0x49, 0x92, - 0x2c, 0x65, 0x49, 0xd2, 0x7c, 0x00, 0xfd, 0x23, 0xff, 0x8a, 0x7a, 0xae, 0x43, 0x05, 0xfb, 0x92, - 0x2d, 0xd1, 0x05, 0x6b, 0x3b, 0x30, 0xcf, 0xa0, 0xad, 0x5f, 0xda, 0xaf, 0xb5, 0xc7, 0xb6, 0xde, - 0xe3, 0xf7, 0x07, 0xd1, 0xbb, 0xd0, 0xd3, 0x93, 0x1e, 0xbb, 0x3a, 0x84, 0x64, 0x8d, 0xc1, 0xd9, - 0xb9, 0x7b, 0xad, 0xa7, 0xd6, 0x3d, 0xf3, 0x29, 0x6c, 0xe7, 0x4c, 0xd3, 0xe3, 0x5c, 0xb2, 0x65, - 0x94, 0x7c, 0x81, 0x90, 0xed, 0xc4, 0x03, 0xe5, 0xcc, 0x03, 0x26, 0x74, 0xf5, 0xc8, 0xe7, 0x4c, - 0xdc, 0x70, 0xba, 0x2f, 0xd3, 0x8d, 0x3c, 0x67, 0x7a, 0xf2, 0x87, 0x50, 0x65, 0xf2, 0xa4, 0xf9, - 0xfc, 0x99, 0xf7, 0x80, 0xa5, 0xd4, 0x1b, 0x16, 0x7c, 0x9a, 0x2e, 0x78, 0x1a, 0xab, 0x05, 0x5f, - 0x73, 0x2e, 0xf3, 0xed, 0x74, 0x1b, 0xa7, 0xb1, 0xb8, 0xe9, 0x46, 0x1f, 0x40, 0x5f, 0x1b, 0x3d, - 0x63, 0x1e, 0x13, 0xec, 0x86, 0x23, 0x3d, 0x04, 0x52, 0x30, 0xbb, 0x69, 0xba, 0x7b, 0xd0, 0x98, - 0x4c, 0x8e, 0x53, 0x6d, 0x91, 0x1b, 0xcd, 0x4f, 0xa0, 0x7f, 0x16, 0x3b, 0xc1, 0x29, 0x77, 0xaf, - 0x5c, 0x8f, 0xcd, 0xd5, 0x62, 0x49, 0xf1, 0x5b, 0xca, 0x15, 0xbf, 0x1b, 0xb3, 0x91, 0x39, 0x02, - 0x52, 0x18, 0x9e, 0xde, 0x5b, 0x14, 0x3b, 0x81, 0x0e, 0x61, 0x6c, 0x9b, 0x23, 0x68, 0x4f, 0xa8, - 0x2c, 0x36, 0x1c, 0x65, 0x63, 0x40, 0x5d, 0xa8, 0xbe, 0x36, 0x4b, 0xba, 0xe6, 0x1e, 0xec, 0x1c, - 0x50, 0xfb, 0xc2, 0xf5, 0xe7, 0xcf, 0xdc, 0x48, 0x56, 0x5b, 0x7a, 0xc4, 0x00, 0x1a, 0x8e, 0x16, - 0xe8, 0x21, 0x69, 0xdf, 0x7c, 0x1f, 0xde, 0xc8, 0x7d, 0xe6, 0x39, 0x13, 0x34, 0xf1, 0xc7, 0x0e, - 0x54, 0x23, 0xd9, 0xc3, 0x11, 0x55, 0x4b, 0x75, 0xcc, 0xaf, 0x60, 0x27, 0x9f, 0x80, 0x65, 0xed, - 0x93, 0x1c, 0x1c, 0xab, 0x92, 0x52, 0xae, 0x2a, 0xd1, 0x3e, 0x2b, 0x67, 0xf9, 0x64, 0x1b, 0x2a, - 0xbf, 0xfe, 0x66, 0xa2, 0xc1, 0x2e, 0x9b, 0xe6, 0x1f, 0xe5, 0xf2, 0xc5, 0xf9, 0xd4, 0xf2, 0x85, - 0xd2, 0xa4, 0xf4, 0x5a, 0xa5, 0xc9, 0x3a, 0xde, 0xde, 0x87, 0xfe, 0x89, 0x17, 0xd8, 0x97, 0x87, - 0x7e, 0xce, 0x1b, 0x06, 0xd4, 0x99, 0x9f, 0x77, 0x46, 0xd2, 0x35, 0xdf, 0x81, 0xde, 0x71, 0x60, - 0x53, 0xef, 0x24, 0x88, 0x7d, 0x91, 0x7a, 0x01, 0xbf, 0xbb, 0x69, 0x53, 0xd5, 0x31, 0xdf, 0x87, - 0xae, 0x4e, 0xd1, 0xfe, 0x79, 0x90, 0x30, 0x63, 0x96, 0xcc, 0x4b, 0xc5, 0x42, 0xdf, 0x3c, 0x86, - 0x5e, 0x66, 0xae, 0xe6, 0x7d, 0x07, 0x6a, 0x4a, 0xad, 0xcf, 0xd6, 0x4b, 0x5f, 0xaf, 0xca, 0xd2, - 0xd2, 0xea, 0x0d, 0x87, 0x5a, 0x40, 0xf7, 0x14, 0xbf, 0x7f, 0x1e, 0xfa, 0x57, 0x6a, 0xb2, 0x23, - 0x20, 0xea, 0x8b, 0xe8, 0x94, 0xf9, 0x57, 0x2e, 0x0f, 0x7c, 0x2c, 0xae, 0x4b, 0xba, 0x84, 0x49, - 0x26, 0x4e, 0x07, 0x25, 0x16, 0x56, 0x3f, 0x5c, 0x15, 0x6d, 0xf4, 0x21, 0x64, 0x5f, 0x57, 0x64, - 0xaa, 0xe1, 0x6c, 0x11, 0x08, 0x36, 0xa5, 0x8e, 0x93, 0x44, 0x0b, 0x28, 0xd1, 0xbe, 0xe3, 0xf0, - 0xbd, 0xff, 0x94, 0xa1, 0xfe, 0x99, 0x22, 0x70, 0xf2, 0x29, 0x74, 0x0a, 0xe9, 0x9a, 0xbc, 0x81, - 0x65, 0xdd, 0x6a, 0x71, 0x30, 0xb8, 0xbd, 0x26, 0x56, 0xe7, 0xfa, 0x00, 0xda, 0xf9, 0x64, 0x4c, - 0x30, 0xf1, 0xe2, 0xb7, 0xde, 0x01, 0xce, 0xb4, 0x9e, 0xa9, 0xcf, 0x60, 0x67, 0x53, 0x9a, 0x24, - 0xf7, 0xb2, 0x15, 0xd6, 0x53, 0xf4, 0xe0, 0xcd, 0x9b, 0xb4, 0x49, 0x7a, 0xad, 0x1f, 0x78, 0x8c, - 0xfa, 0x71, 0x98, 0xdf, 0x41, 0xd6, 0x24, 0x8f, 0xa1, 0x53, 0x48, 0x14, 0xea, 0x9c, 0x6b, 0xb9, - 0x23, 0x3f, 0xe4, 0x21, 0x54, 0x31, 0x39, 0x91, 0x4e, 0x21, 0x4b, 0x0e, 0xba, 0x69, 0x57, 0xad, - 0x3d, 0x84, 0x2d, 0xfc, 0x02, 0x98, 0x5b, 0x18, 0x47, 0xa4, 0x99, 0x6b, 0xef, 0xdf, 0x25, 0xa8, - 0x27, 0x5f, 0x85, 0x1f, 0xc3, 0x96, 0xcc, 0x01, 0xe4, 0x56, 0x8e, 0x46, 0x93, 0xfc, 0x31, 0xd8, - 0x59, 0x11, 0xaa, 0x05, 0xc6, 0x50, 0x79, 0xce, 0x04, 0x21, 0x39, 0xa5, 0x4e, 0x06, 0x83, 0x5b, - 0x45, 0x59, 0x6a, 0x7f, 0x1a, 0x17, 0xed, 0x35, 0x97, 0x17, 0xec, 0x53, 0x96, 0xfe, 0x08, 0x6a, - 0x8a, 0x65, 0x95, 0x53, 0xd6, 0xf8, 0x59, 0x5d, 0xfe, 0x3a, 0x1f, 0xef, 0xfd, 0x73, 0x0b, 0xe0, - 0x6c, 0x19, 0x09, 0xb6, 0xf8, 0x8d, 0xcb, 0x5e, 0x92, 0x47, 0xd0, 0x7b, 0xc6, 0xce, 0x69, 0xec, - 0x09, 0x7c, 0xaa, 0x49, 0x36, 0xc9, 0xf9, 0x04, 0x0b, 0xbe, 0x94, 0xac, 0x1f, 0x42, 0xeb, 0x84, - 0x5e, 0xbf, 0xda, 0xee, 0x53, 0xe8, 0x14, 0x38, 0x58, 0x6f, 0x71, 0x95, 0xd5, 0xf5, 0x16, 0xd7, - 0xd9, 0xfa, 0x21, 0xd4, 0x35, 0x33, 0xe7, 0xd7, 0xc0, 0x1c, 0x56, 0x60, 0xec, 0x9f, 0x43, 0x6f, - 0x85, 0x97, 0xf3, 0xf6, 0xf8, 0x39, 0x64, 0x23, 0x6f, 0x3f, 0x95, 0xaf, 0x9d, 0x22, 0x37, 0xe7, - 0x07, 0xea, 0x97, 0xd7, 0x26, 0xf2, 0x7e, 0x5e, 0x7c, 0x27, 0xe1, 0x13, 0xd5, 0x58, 0xa5, 0xcf, - 0x84, 0xbc, 0x07, 0x77, 0x36, 0x69, 0xd2, 0x10, 0xcc, 0x33, 0xe8, 0x5a, 0x08, 0xae, 0xd3, 0xeb, - 0x7b, 0x00, 0x19, 0x89, 0xe6, 0xed, 0x11, 0x1e, 0xab, 0xfc, 0xfa, 0x21, 0x40, 0x46, 0x8d, 0x0a, - 0x55, 0x45, 0x66, 0x55, 0xc3, 0x56, 0xe9, 0xf3, 0x11, 0x34, 0x53, 0x3a, 0xcb, 0xaf, 0x81, 0x13, - 0x14, 0xd9, 0xf1, 0xb3, 0x47, 0xbf, 0x1f, 0xcd, 0x5d, 0x71, 0x11, 0xcf, 0xc6, 0x76, 0xb0, 0xd8, - 0xbd, 0xa0, 0xd1, 0x85, 0x6b, 0x07, 0x3c, 0xdc, 0xbd, 0x92, 0x60, 0xda, 0x2d, 0xfc, 0xb4, 0x9a, - 0xd5, 0xf0, 0xa1, 0xf7, 0xe4, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x0c, 0x01, 0xf3, 0xcc, - 0x1a, 0x00, 0x00, + 0x0c, 0x33, 0x36, 0x68, 0xd1, 0x71, 0x2c, 0x27, 0x65, 0xa7, 0x68, 0x8a, 0x96, 0x19, 0x93, 0x36, + 0x6b, 0x09, 0xc7, 0x39, 0x55, 0xc1, 0x83, 0xdd, 0x21, 0xb8, 0xc5, 0xc5, 0xee, 0x66, 0x76, 0x96, + 0x22, 0xae, 0xf2, 0x16, 0x79, 0x8d, 0xdc, 0xe6, 0x2e, 0x77, 0x29, 0x57, 0xee, 0xf3, 0x0a, 0xb9, + 0xcc, 0x33, 0xa4, 0xa6, 0x67, 0xf6, 0x04, 0x80, 0x96, 0x5c, 0xe5, 0xdc, 0xcd, 0x74, 0xf7, 0x9c, + 0x7a, 0xbe, 0xfe, 0xba, 0x67, 0x17, 0xee, 0x47, 0xce, 0xd5, 0x5e, 0xe8, 0xc5, 0x73, 0xd7, 0xdf, + 0x0b, 0x67, 0x7b, 0x33, 0x6a, 0x5f, 0x31, 0xdf, 0x19, 0x87, 0x3c, 0x10, 0x01, 0x29, 0x87, 0xb3, + 0xc1, 0xc3, 0x79, 0x10, 0xcc, 0x3d, 0xb6, 0x87, 0x92, 0x59, 0x7c, 0xb1, 0x27, 0xdc, 0x05, 0x8b, + 0x04, 0x5d, 0x84, 0xca, 0x68, 0x30, 0x90, 0x33, 0x78, 0xc1, 0xdc, 0xb5, 0xa9, 0xb7, 0xe7, 0x3a, + 0xcc, 0x17, 0xae, 0x58, 0x6a, 0x9d, 0x91, 0xd7, 0xa9, 0x55, 0x94, 0xc6, 0xac, 0x43, 0xf5, 0x68, + 0x11, 0x8a, 0xa5, 0x39, 0x84, 0xda, 0xe7, 0x8c, 0x3a, 0x8c, 0x93, 0xbb, 0x50, 0xbb, 0xc4, 0x96, + 0x51, 0x1a, 0x56, 0x46, 0x4d, 0x4b, 0xf7, 0xcc, 0x3f, 0x00, 0x9c, 0xc9, 0x31, 0x47, 0x9c, 0x07, + 0x9c, 0xdc, 0x83, 0x06, 0xe3, 0x7c, 0x2a, 0x96, 0x21, 0x33, 0x4a, 0xc3, 0xd2, 0xa8, 0x63, 0xd5, + 0x19, 0xe7, 0x93, 0x65, 0xc8, 0xc8, 0xff, 0x81, 0x6c, 0x4e, 0x17, 0xd1, 0xdc, 0x28, 0x0f, 0x4b, + 0x72, 0x06, 0xc6, 0xf9, 0x69, 0x34, 0x4f, 0xc6, 0xd8, 0x81, 0xc3, 0x8c, 0xca, 0xb0, 0x34, 0xaa, + 0xe0, 0x98, 0xc3, 0xc0, 0x61, 0xe6, 0x5f, 0x4a, 0x50, 0x3d, 0xa3, 0xe2, 0x32, 0x22, 0x04, 0xb6, + 0x78, 0x10, 0x08, 0xbd, 0x38, 0xb6, 0xc9, 0x08, 0x7a, 0xb1, 0x4f, 0x63, 0x71, 0x29, 0x4f, 0x65, + 0x53, 0xc1, 0x1c, 0xa3, 0x8c, 0xea, 0x55, 0x31, 0x79, 0x13, 0x3a, 0x5e, 0x60, 0x53, 0x6f, 0x1a, + 0x89, 0x80, 0xd3, 0xb9, 0x5c, 0x47, 0xda, 0xb5, 0x51, 0x78, 0xae, 0x64, 0x64, 0x17, 0xfa, 0x11, + 0xa3, 0xde, 0xf4, 0x05, 0xa7, 0x61, 0x6a, 0xb8, 0xa5, 0x26, 0x94, 0x8a, 0x6f, 0x38, 0x0d, 0xb5, + 0xad, 0xf9, 0xf7, 0x1a, 0xd4, 0x2d, 0xf6, 0xa7, 0x98, 0x45, 0x82, 0x74, 0xa1, 0xec, 0x3a, 0x78, + 0xda, 0xa6, 0x55, 0x76, 0x1d, 0x32, 0x06, 0x62, 0xb1, 0xd0, 0x93, 0x4b, 0xbb, 0x81, 0x7f, 0xe8, + 0xc5, 0x91, 0x60, 0x5c, 0x9f, 0x79, 0x83, 0x86, 0x3c, 0x80, 0x66, 0x10, 0x32, 0x8e, 0x32, 0x74, + 0x40, 0xd3, 0xca, 0x04, 0xf2, 0xe0, 0x21, 0x15, 0x97, 0xc6, 0x16, 0x2a, 0xb0, 0x2d, 0x65, 0x0e, + 0x15, 0xd4, 0xa8, 0x2a, 0x99, 0x6c, 0x13, 0x13, 0x6a, 0x11, 0xb3, 0x39, 0x13, 0x46, 0x6d, 0x58, + 0x1a, 0xb5, 0xf6, 0x61, 0x1c, 0xce, 0xc6, 0xe7, 0x28, 0xb1, 0xb4, 0x86, 0x3c, 0x80, 0x2d, 0xe9, + 0x17, 0xa3, 0x8e, 0x16, 0x0d, 0x69, 0x71, 0x10, 0x8b, 0x4b, 0x0b, 0xa5, 0x64, 0x1f, 0xea, 0xea, + 0x4e, 0x23, 0xa3, 0x31, 0xac, 0x8c, 0x5a, 0xfb, 0x86, 0x34, 0xd0, 0xa7, 0x1c, 0x2b, 0x18, 0x44, + 0x47, 0xbe, 0xe0, 0x4b, 0x2b, 0x31, 0x24, 0x6f, 0x40, 0xdb, 0xf6, 0x5c, 0xe6, 0x8b, 0xa9, 0x08, + 0xae, 0x98, 0x6f, 0x34, 0x71, 0x47, 0x2d, 0x25, 0x9b, 0x48, 0x11, 0xd9, 0x87, 0xd7, 0xf2, 0x26, + 0x53, 0x6a, 0xdb, 0x2c, 0x8a, 0x02, 0x6e, 0x00, 0xda, 0xde, 0xc9, 0xd9, 0x1e, 0x68, 0x95, 0x9c, + 0xd6, 0x71, 0xa3, 0xd0, 0xa3, 0xcb, 0xa9, 0x4f, 0x17, 0xcc, 0x68, 0xa9, 0x69, 0xb5, 0xec, 0x4b, + 0xba, 0x60, 0xe4, 0x21, 0xb4, 0x16, 0x41, 0xec, 0x8b, 0x69, 0x18, 0xb8, 0xbe, 0x30, 0xda, 0x68, + 0x01, 0x28, 0x3a, 0x93, 0x12, 0xf2, 0x3a, 0xa8, 0x9e, 0x02, 0x63, 0x47, 0xf9, 0x15, 0x25, 0x08, + 0xc7, 0x47, 0xd0, 0x55, 0xea, 0x74, 0x3f, 0x5d, 0x34, 0xe9, 0xa0, 0x34, 0xdd, 0xc9, 0x7b, 0xd0, + 0x44, 0x3c, 0xb8, 0xfe, 0x45, 0x60, 0xf4, 0xd0, 0x6f, 0x77, 0x72, 0x6e, 0x91, 0x98, 0x38, 0xf6, + 0x2f, 0x02, 0xab, 0xf1, 0x42, 0xb7, 0xc8, 0xc7, 0x70, 0xbf, 0x70, 0x5e, 0xce, 0x16, 0xd4, 0xf5, + 0x5d, 0x7f, 0x3e, 0x8d, 0x23, 0x16, 0x19, 0xdb, 0x88, 0x70, 0x23, 0x77, 0x6a, 0x2b, 0x31, 0xf8, + 0x3a, 0x62, 0x11, 0xb9, 0x0f, 0x4d, 0x15, 0xa4, 0x53, 0xd7, 0x31, 0xfa, 0xb8, 0xa5, 0x86, 0x12, + 0x1c, 0x3b, 0xe4, 0x2d, 0xe8, 0x85, 0x81, 0xe7, 0xda, 0xcb, 0x69, 0x70, 0xcd, 0x38, 0x77, 0x1d, + 0x66, 0x90, 0x61, 0x69, 0xd4, 0xb0, 0xba, 0x4a, 0xfc, 0x95, 0x96, 0x6e, 0x0a, 0x8d, 0x3b, 0x68, + 0xb8, 0x16, 0x1a, 0x63, 0x00, 0x3b, 0xf0, 0x7d, 0x66, 0x23, 0xfc, 0x76, 0xf0, 0x84, 0x5d, 0x79, + 0xc2, 0xc3, 0x54, 0x6a, 0xe5, 0x2c, 0x06, 0x9f, 0x41, 0x3b, 0x0f, 0x05, 0xb2, 0x0d, 0x95, 0x2b, + 0xb6, 0xd4, 0xf0, 0x97, 0x4d, 0x32, 0x84, 0xea, 0x35, 0xf5, 0x62, 0x86, 0x90, 0xd7, 0x40, 0x54, + 0x43, 0x2c, 0xa5, 0xf8, 0x45, 0xf9, 0x69, 0xc9, 0xfc, 0x77, 0x15, 0xb6, 0x24, 0xf8, 0xc8, 0x07, + 0xd0, 0xf1, 0x18, 0x8d, 0xd8, 0x34, 0x08, 0xe5, 0x02, 0x11, 0x4e, 0xd5, 0xda, 0xdf, 0x96, 0xc3, + 0x4e, 0xa4, 0xe2, 0x2b, 0x25, 0xb7, 0xda, 0x5e, 0xae, 0x27, 0x43, 0xda, 0xf5, 0x05, 0xe3, 0x3e, + 0xf5, 0xa6, 0x18, 0x0c, 0x2a, 0xc0, 0xda, 0x89, 0xf0, 0x99, 0x0c, 0x8a, 0x55, 0x1c, 0x55, 0xd6, + 0x71, 0x34, 0x80, 0x06, 0xfa, 0xce, 0x65, 0x91, 0x0e, 0xf6, 0xb4, 0x4f, 0xf6, 0xa1, 0xb1, 0x60, + 0x82, 0xea, 0x58, 0x93, 0x21, 0x71, 0x37, 0x89, 0x99, 0xf1, 0xa9, 0x56, 0xa8, 0x80, 0x48, 0xed, + 0xd6, 0x22, 0xa2, 0xb6, 0x1e, 0x11, 0x03, 0x68, 0xa4, 0xa0, 0xab, 0xab, 0x1b, 0x4e, 0xfa, 0x92, + 0x66, 0x43, 0xc6, 0xdd, 0xc0, 0x31, 0x1a, 0x08, 0x14, 0xdd, 0x93, 0x24, 0xe9, 0xc7, 0x0b, 0x05, + 0xa1, 0xa6, 0x22, 0x49, 0x3f, 0x5e, 0xac, 0x23, 0x06, 0x56, 0x10, 0xf3, 0x13, 0xa8, 0x52, 0xcf, + 0xa5, 0x11, 0x86, 0x90, 0xbc, 0x59, 0xcd, 0xf7, 0xe3, 0x03, 0x29, 0xb5, 0x94, 0x92, 0xbc, 0x0f, + 0x9d, 0x39, 0x0f, 0xe2, 0x70, 0x8a, 0x5d, 0x16, 0x19, 0x6d, 0x3c, 0xed, 0xaa, 0x75, 0x1b, 0x8d, + 0x0e, 0x94, 0x8d, 0x8c, 0xc0, 0x59, 0x10, 0xfb, 0xce, 0xd4, 0x76, 0x1d, 0x1e, 0x19, 0x1d, 0x74, + 0x1e, 0xa0, 0xe8, 0x50, 0x4a, 0x64, 0x88, 0xa9, 0x10, 0x48, 0x1d, 0xdc, 0x45, 0x9b, 0x0e, 0x4a, + 0xcf, 0x12, 0x2f, 0xff, 0x14, 0xfa, 0x49, 0x62, 0xca, 0x2c, 0x7b, 0x68, 0xb9, 0x9d, 0x28, 0x52, + 0xe3, 0x11, 0x6c, 0xb3, 0x1b, 0x49, 0xa1, 0xae, 0x98, 0x2e, 0xe8, 0xcd, 0x54, 0x08, 0x4f, 0x87, + 0x54, 0x37, 0x91, 0x9f, 0xd2, 0x9b, 0x89, 0xf0, 0x64, 0xfc, 0xab, 0xd5, 0x31, 0xfe, 0xfb, 0x98, + 0x8c, 0x9a, 0x28, 0xc1, 0xf8, 0xdf, 0x85, 0xbe, 0x1f, 0x4c, 0x1d, 0x76, 0x41, 0x63, 0x4f, 0xa8, + 0x75, 0x97, 0x3a, 0x98, 0x7a, 0x7e, 0xf0, 0x4c, 0xc9, 0x71, 0xd9, 0xe5, 0xe0, 0x97, 0xd0, 0x29, + 0x5c, 0xf7, 0x06, 0xd0, 0xef, 0xe4, 0x41, 0xdf, 0xcc, 0x03, 0xfd, 0x9f, 0x5b, 0x00, 0x78, 0xef, + 0x6a, 0xe8, 0x6a, 0xb6, 0xc8, 0x83, 0xa1, 0xbc, 0x01, 0x0c, 0x94, 0x33, 0x5f, 0x68, 0xe0, 0xea, + 0xde, 0xf7, 0x62, 0x36, 0xc9, 0x17, 0xd5, 0x5c, 0xbe, 0x78, 0x07, 0xb6, 0x24, 0x3e, 0x8d, 0x5a, + 0x46, 0xeb, 0xd9, 0x8e, 0x10, 0xc9, 0x0a, 0xc5, 0x68, 0xb5, 0x16, 0x34, 0xf5, 0xf5, 0xa0, 0xc9, + 0xa3, 0xb1, 0x51, 0x44, 0xe3, 0x9b, 0xd0, 0xb1, 0x39, 0xc3, 0xdc, 0x35, 0x95, 0xc5, 0x88, 0x46, + 0x6b, 0x3b, 0x11, 0x4e, 0xdc, 0x05, 0x93, 0xfe, 0x93, 0x17, 0x07, 0xa8, 0x92, 0xcd, 0x8d, 0xf7, + 0xda, 0xda, 0x78, 0xaf, 0x58, 0x09, 0x78, 0x4c, 0x33, 0x3e, 0xb6, 0x73, 0x51, 0xd3, 0x29, 0x44, + 0x4d, 0x21, 0x34, 0xba, 0x2b, 0xa1, 0xb1, 0x82, 0xdf, 0xde, 0x1a, 0x7e, 0xdf, 0x80, 0xb6, 0x74, + 0x40, 0x14, 0x52, 0x9b, 0xc9, 0x09, 0xb6, 0x95, 0x23, 0x52, 0xd9, 0xb1, 0x83, 0xd1, 0x1e, 0xcf, + 0x66, 0xcb, 0xcb, 0xc0, 0x63, 0x19, 0x61, 0xb7, 0x52, 0xd9, 0xb1, 0x23, 0xf7, 0x8b, 0x08, 0x24, + 0x88, 0x40, 0x6c, 0x0f, 0x3e, 0x84, 0x66, 0xea, 0xf5, 0x1f, 0x04, 0xa6, 0xbf, 0x96, 0xa0, 0x9d, + 0x27, 0x45, 0x39, 0x78, 0x32, 0x39, 0xc1, 0xc1, 0x15, 0x4b, 0x36, 0x65, 0x39, 0xc1, 0x99, 0xcf, + 0x5e, 0xd0, 0x99, 0xa7, 0x26, 0x68, 0x58, 0x99, 0x40, 0x6a, 0x5d, 0xdf, 0xe6, 0x6c, 0x91, 0xa0, + 0xaa, 0x62, 0x65, 0x02, 0xf2, 0x11, 0x80, 0x1b, 0x45, 0x31, 0x53, 0x37, 0xb7, 0x85, 0x94, 0x31, + 0x18, 0xab, 0x1a, 0x73, 0x9c, 0xd4, 0x98, 0xe3, 0x49, 0x52, 0x63, 0x5a, 0x4d, 0xb4, 0xc6, 0x2b, + 0xbd, 0x0b, 0x35, 0x79, 0x41, 0x93, 0x13, 0x44, 0x5e, 0xc5, 0xd2, 0x3d, 0xf3, 0xcf, 0x50, 0x53, + 0x55, 0xc8, 0xff, 0x94, 0xe8, 0xef, 0x41, 0x43, 0xcd, 0xed, 0x3a, 0x3a, 0x56, 0xea, 0xd8, 0x3f, + 0x76, 0xcc, 0xef, 0xca, 0xd0, 0xb0, 0x58, 0x14, 0x06, 0x7e, 0xc4, 0x72, 0x55, 0x52, 0xe9, 0xa5, + 0x55, 0x52, 0x79, 0x63, 0x95, 0x94, 0xd4, 0x5e, 0x95, 0x5c, 0xed, 0x35, 0x80, 0x06, 0x67, 0x8e, + 0xcb, 0x99, 0x2d, 0x74, 0x9d, 0x96, 0xf6, 0xa5, 0xee, 0x05, 0xe5, 0x32, 0xbd, 0x47, 0x98, 0x43, + 0x9a, 0x56, 0xda, 0x27, 0x4f, 0xf2, 0xc5, 0x85, 0x2a, 0xdb, 0x76, 0x54, 0x71, 0xa1, 0xb6, 0xbb, + 0xa1, 0xba, 0x78, 0x3f, 0x2b, 0xd2, 0xea, 0x18, 0xcd, 0xf7, 0xf2, 0x03, 0x36, 0x57, 0x69, 0x3f, + 0x5a, 0xce, 0xfe, 0xae, 0x0c, 0xdb, 0xab, 0x7b, 0xdb, 0x80, 0xc0, 0x1d, 0xa8, 0xaa, 0xdc, 0xa7, + 0xe1, 0x2b, 0xd6, 0xb2, 0x5e, 0x65, 0x85, 0xe8, 0x7e, 0xb5, 0x4a, 0x1a, 0x2f, 0x87, 0x5e, 0x91, + 0x50, 0xde, 0x86, 0x6d, 0xe9, 0xa2, 0x90, 0x39, 0x59, 0x3d, 0xa7, 0x18, 0xb0, 0xa7, 0xe5, 0x69, + 0x45, 0xb7, 0x0b, 0xfd, 0xc4, 0x34, 0xe3, 0x86, 0x5a, 0xc1, 0xf6, 0x28, 0xa1, 0x88, 0xbb, 0x50, + 0xbb, 0x08, 0xf8, 0x82, 0x0a, 0x4d, 0x82, 0xba, 0x57, 0x20, 0x39, 0x64, 0xdb, 0x86, 0xc2, 0x64, + 0x22, 0x94, 0x6f, 0x16, 0x49, 0x3e, 0xe9, 0x7b, 0x02, 0x59, 0xb0, 0x61, 0x35, 0x92, 0x77, 0x84, + 0xf9, 0x5b, 0xe8, 0xad, 0x94, 0x90, 0x1b, 0x1c, 0x99, 0x2d, 0x5f, 0x2e, 0x2c, 0x5f, 0x98, 0xb9, + 0xb2, 0x32, 0xf3, 0xef, 0xa0, 0xff, 0x39, 0xf5, 0x1d, 0x8f, 0xe9, 0xf9, 0x0f, 0xf8, 0x3c, 0x92, + 0xc9, 0x50, 0xbf, 0x68, 0xa6, 0x3a, 0xfb, 0x74, 0xac, 0xa6, 0x96, 0x1c, 0x3b, 0xe4, 0x11, 0xd4, + 0xb9, 0xb2, 0xd6, 0x00, 0x68, 0xe5, 0x6a, 0x5c, 0x2b, 0xd1, 0x99, 0xdf, 0x02, 0x29, 0x4c, 0x2d, + 0x1f, 0x33, 0x4b, 0x32, 0x92, 0xe8, 0x57, 0xa0, 0xd0, 0x51, 0xd5, 0xce, 0x63, 0xd2, 0x4a, 0xb5, + 0x64, 0x08, 0x15, 0xc6, 0xb9, 0x5e, 0x02, 0x8b, 0xcc, 0xec, 0xe9, 0x68, 0x49, 0x95, 0xf9, 0x33, + 0xe8, 0x9f, 0x87, 0xcc, 0x76, 0xa9, 0x87, 0xcf, 0x3e, 0xb5, 0xc0, 0x43, 0xa8, 0x4a, 0x27, 0x27, + 0x84, 0xd1, 0xc4, 0x81, 0xa8, 0x56, 0x72, 0xf3, 0x5b, 0x30, 0xd4, 0xbe, 0x8e, 0x6e, 0xdc, 0x48, + 0x30, 0xdf, 0x66, 0x87, 0x97, 0xcc, 0xbe, 0xfa, 0x11, 0x4f, 0x7e, 0x0d, 0xf7, 0x36, 0xad, 0x90, + 0xec, 0xaf, 0x65, 0xcb, 0xde, 0xf4, 0x42, 0xe6, 0x0e, 0x5c, 0xa3, 0x61, 0x01, 0x8a, 0x3e, 0x93, + 0x12, 0x79, 0x8f, 0x4c, 0x8e, 0x8b, 0x34, 0x1f, 0xeb, 0x5e, 0xe2, 0x8f, 0xca, 0xed, 0xfe, 0xf8, + 0x5b, 0x09, 0x9a, 0xe7, 0x4c, 0xc4, 0x21, 0x9e, 0xe5, 0x3e, 0x34, 0x67, 0x3c, 0xb8, 0x62, 0x3c, + 0x3b, 0x4a, 0x43, 0x09, 0x8e, 0x1d, 0xf2, 0x04, 0x6a, 0x87, 0x81, 0x7f, 0xe1, 0xce, 0xf1, 0x11, + 0xac, 0x89, 0x21, 0x1d, 0x3b, 0x56, 0x3a, 0x45, 0x0c, 0xda, 0x90, 0x0c, 0xa1, 0xa5, 0x3f, 0x29, + 0x7c, 0xfd, 0xf5, 0xf1, 0xb3, 0xa4, 0x3a, 0xce, 0x89, 0x06, 0x1f, 0x41, 0x2b, 0x37, 0xf0, 0x07, + 0xa5, 0xaa, 0xff, 0x07, 0xc0, 0xd5, 0x95, 0x8f, 0xb6, 0xd5, 0x51, 0xf5, 0x48, 0x79, 0xb4, 0x87, + 0xd0, 0x94, 0x85, 0x98, 0x52, 0x27, 0x49, 0xb2, 0x94, 0x25, 0x49, 0xf3, 0x11, 0xf4, 0x8f, 0xfd, + 0x6b, 0xea, 0xb9, 0x0e, 0x15, 0xec, 0x0b, 0xb6, 0x44, 0x17, 0xac, 0xed, 0xc0, 0x3c, 0x87, 0xb6, + 0x7e, 0x95, 0xbf, 0xd2, 0x1e, 0xdb, 0x7a, 0x8f, 0xdf, 0x1f, 0x44, 0x6f, 0x43, 0x4f, 0x4f, 0x7a, + 0xe2, 0xea, 0x10, 0x92, 0x35, 0x06, 0x67, 0x17, 0xee, 0x8d, 0x9e, 0x5a, 0xf7, 0xcc, 0xa7, 0xb0, + 0x9d, 0x33, 0x4d, 0x8f, 0x73, 0xc5, 0x96, 0x51, 0xf2, 0xb5, 0x42, 0xb6, 0x13, 0x0f, 0x94, 0x33, + 0x0f, 0x98, 0xd0, 0xd5, 0x23, 0x9f, 0x33, 0x71, 0xcb, 0xe9, 0xbe, 0x48, 0x37, 0xf2, 0x9c, 0xe9, + 0xc9, 0x1f, 0x43, 0x95, 0xc9, 0x93, 0xe6, 0xf3, 0x67, 0xde, 0x03, 0x96, 0x52, 0x6f, 0x58, 0xf0, + 0x69, 0xba, 0xe0, 0x59, 0xac, 0x16, 0x7c, 0xc5, 0xb9, 0xcc, 0x37, 0xd3, 0x6d, 0x9c, 0xc5, 0xe2, + 0xb6, 0x1b, 0x7d, 0x04, 0x7d, 0x6d, 0xf4, 0x8c, 0x79, 0x4c, 0xb0, 0x5b, 0x8e, 0xf4, 0x18, 0x48, + 0xc1, 0xec, 0xb6, 0xe9, 0x1e, 0x40, 0x63, 0x32, 0x39, 0x49, 0xb5, 0x45, 0x6e, 0x34, 0x3f, 0x86, + 0xfe, 0x79, 0xec, 0x04, 0x67, 0xdc, 0xbd, 0x76, 0x3d, 0x36, 0x57, 0x8b, 0x25, 0xc5, 0x6f, 0x29, + 0x57, 0xfc, 0x6e, 0xcc, 0x46, 0xe6, 0x08, 0x48, 0x61, 0x78, 0x7a, 0x6f, 0x51, 0xec, 0x04, 0x3a, + 0x84, 0xb1, 0x6d, 0x8e, 0xa0, 0x3d, 0xa1, 0xb2, 0xd8, 0x70, 0x94, 0x8d, 0x01, 0x75, 0xa1, 0xfa, + 0xda, 0x2c, 0xe9, 0x9a, 0xfb, 0xb0, 0x73, 0x48, 0xed, 0x4b, 0xd7, 0x9f, 0x3f, 0x73, 0x23, 0x59, + 0x6d, 0xe9, 0x11, 0x03, 0x68, 0x38, 0x5a, 0xa0, 0x87, 0xa4, 0x7d, 0xf3, 0x5d, 0x78, 0x2d, 0xf7, + 0x49, 0xe8, 0x5c, 0xd0, 0xc4, 0x1f, 0x3b, 0x50, 0x8d, 0x64, 0x0f, 0x47, 0x54, 0x2d, 0xd5, 0x31, + 0xbf, 0x84, 0x9d, 0x7c, 0x02, 0x96, 0xb5, 0x4f, 0x72, 0x70, 0xac, 0x4a, 0x4a, 0xb9, 0xaa, 0x44, + 0xfb, 0xac, 0x9c, 0xe5, 0x93, 0x6d, 0xa8, 0xfc, 0xfa, 0x9b, 0x89, 0x06, 0xbb, 0x6c, 0x9a, 0x7f, + 0x94, 0xcb, 0x17, 0xe7, 0x53, 0xcb, 0x17, 0x4a, 0x93, 0xd2, 0x2b, 0x95, 0x26, 0xeb, 0x78, 0x7b, + 0x17, 0xfa, 0xa7, 0x5e, 0x60, 0x5f, 0x1d, 0xf9, 0x39, 0x6f, 0x18, 0x50, 0x67, 0x7e, 0xde, 0x19, + 0x49, 0xd7, 0x7c, 0x0b, 0x7a, 0x27, 0x81, 0x4d, 0xbd, 0xd3, 0x20, 0xf6, 0x45, 0xea, 0x05, 0xfc, + 0x46, 0xa7, 0x4d, 0x55, 0xc7, 0x7c, 0x17, 0xba, 0x3a, 0x45, 0xfb, 0x17, 0x41, 0xc2, 0x8c, 0x59, + 0x32, 0x2f, 0x15, 0x0b, 0x7d, 0xf3, 0x04, 0x7a, 0x99, 0xb9, 0x9a, 0xf7, 0x2d, 0xa8, 0x29, 0xb5, + 0x3e, 0x5b, 0x2f, 0x7d, 0xe9, 0x2a, 0x4b, 0x4b, 0xab, 0x37, 0x1c, 0x6a, 0x01, 0xdd, 0x33, 0xfc, + 0x56, 0x7a, 0xe4, 0x5f, 0xab, 0xc9, 0x8e, 0x81, 0xa8, 0xaf, 0xa7, 0x53, 0xe6, 0x5f, 0xbb, 0x3c, + 0xf0, 0xb1, 0xb8, 0x2e, 0xe9, 0x12, 0x26, 0x99, 0x38, 0x1d, 0x94, 0x58, 0x58, 0xfd, 0x70, 0x55, + 0xb4, 0xd1, 0x87, 0x90, 0x7d, 0x89, 0x91, 0xa9, 0x86, 0xb3, 0x45, 0x20, 0xd8, 0x94, 0x3a, 0x4e, + 0x12, 0x2d, 0xa0, 0x44, 0x07, 0x8e, 0xc3, 0xf7, 0xff, 0x53, 0x86, 0xfa, 0xa7, 0x8a, 0xc0, 0xc9, + 0x27, 0xd0, 0x29, 0xa4, 0x6b, 0xf2, 0x1a, 0x96, 0x75, 0xab, 0xc5, 0xc1, 0xe0, 0xee, 0x9a, 0x58, + 0x9d, 0xeb, 0x3d, 0x68, 0xe7, 0x93, 0x31, 0xc1, 0xc4, 0x8b, 0xdf, 0x85, 0x07, 0x38, 0xd3, 0x7a, + 0xa6, 0x3e, 0x87, 0x9d, 0x4d, 0x69, 0x92, 0x3c, 0xc8, 0x56, 0x58, 0x4f, 0xd1, 0x83, 0xd7, 0x6f, + 0xd3, 0x26, 0xe9, 0xb5, 0x7e, 0xe8, 0x31, 0xea, 0xc7, 0x61, 0x7e, 0x07, 0x59, 0x93, 0x3c, 0x81, + 0x4e, 0x21, 0x51, 0xa8, 0x73, 0xae, 0xe5, 0x8e, 0xfc, 0x90, 0xc7, 0x50, 0xc5, 0xe4, 0x44, 0x3a, + 0x85, 0x2c, 0x39, 0xe8, 0xa6, 0x5d, 0xb5, 0xf6, 0x10, 0xb6, 0xf0, 0x6b, 0x41, 0x6e, 0x61, 0x1c, + 0x91, 0x66, 0xae, 0xfd, 0x7f, 0x95, 0xa0, 0x9e, 0x7c, 0x41, 0x7e, 0x02, 0x5b, 0x32, 0x07, 0x90, + 0x3b, 0x39, 0x1a, 0x4d, 0xf2, 0xc7, 0x60, 0x67, 0x45, 0xa8, 0x16, 0x18, 0x43, 0xe5, 0x39, 0x13, + 0x84, 0xe4, 0x94, 0x3a, 0x19, 0x0c, 0xee, 0x14, 0x65, 0xa9, 0xfd, 0x59, 0x5c, 0xb4, 0xd7, 0x5c, + 0x5e, 0xb0, 0x4f, 0x59, 0xfa, 0x43, 0xa8, 0x29, 0x96, 0x55, 0x4e, 0x59, 0xe3, 0x67, 0x75, 0xf9, + 0xeb, 0x7c, 0xbc, 0xff, 0x8f, 0x2d, 0x80, 0xf3, 0x65, 0x24, 0xd8, 0xe2, 0x37, 0x2e, 0x7b, 0x41, + 0x76, 0xa1, 0xa7, 0xbf, 0x89, 0xe0, 0x53, 0x4d, 0xb2, 0x49, 0xce, 0x27, 0x58, 0xf0, 0xa5, 0x64, + 0xfd, 0x18, 0x5a, 0xa7, 0xf4, 0xe6, 0xe5, 0x76, 0x9f, 0x40, 0xa7, 0xc0, 0xc1, 0x7a, 0x8b, 0xab, + 0xac, 0xae, 0xb7, 0xb8, 0xce, 0xd6, 0x8f, 0xa1, 0xae, 0x99, 0x39, 0xbf, 0x06, 0xe6, 0xb0, 0x02, + 0x63, 0xff, 0x1c, 0x7a, 0x2b, 0xbc, 0x9c, 0xb7, 0xc7, 0xcf, 0x21, 0x1b, 0x79, 0xfb, 0xa9, 0x7c, + 0xed, 0x14, 0xb9, 0x39, 0x3f, 0x50, 0xbf, 0xbc, 0x36, 0x91, 0xf7, 0xf3, 0xe2, 0x3b, 0x09, 0x9f, + 0xa8, 0xc6, 0x2a, 0x7d, 0x26, 0xe4, 0x3d, 0xb8, 0xb7, 0x49, 0x93, 0x86, 0x60, 0x9e, 0x41, 0xd7, + 0x42, 0x70, 0x9d, 0x5e, 0xdf, 0x01, 0xc8, 0x48, 0x34, 0x6f, 0x8f, 0xf0, 0x58, 0xe5, 0xd7, 0x0f, + 0x00, 0x32, 0x6a, 0x54, 0xa8, 0x2a, 0x32, 0xab, 0x1a, 0xb6, 0x4a, 0x9f, 0xbb, 0xd0, 0x4c, 0xe9, + 0x2c, 0xbf, 0x06, 0x4e, 0x50, 0x64, 0xc7, 0x4f, 0x77, 0x7f, 0x3f, 0x9a, 0xbb, 0xe2, 0x32, 0x9e, + 0x8d, 0xed, 0x60, 0xb1, 0x77, 0x49, 0xa3, 0x4b, 0xd7, 0x0e, 0x78, 0xb8, 0x77, 0x2d, 0xc1, 0xb4, + 0x57, 0xf8, 0xc1, 0x35, 0xab, 0xe1, 0x43, 0xef, 0xfd, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xbd, + 0xc6, 0x6e, 0xfa, 0xf8, 0x1a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/sdk/plugin/pb/backend.proto b/sdk/plugin/pb/backend.proto index 65f4ef80dc55..ca86c9c13928 100644 --- a/sdk/plugin/pb/backend.proto +++ b/sdk/plugin/pb/backend.proto @@ -210,6 +210,9 @@ message Auth { // TokenType is the type of token being requested uint32 token_type = 17; + + // Whether the default policy should be added automatically by core + bool no_default_policy = 18; } message TokenEntry { diff --git a/sdk/plugin/pb/translation.go b/sdk/plugin/pb/translation.go index 3ef85a082145..23c7e718cb88 100644 --- a/sdk/plugin/pb/translation.go +++ b/sdk/plugin/pb/translation.go @@ -507,6 +507,7 @@ func LogicalAuthToProtoAuth(a *logical.Auth) (*Auth, error) { Policies: a.Policies, TokenPolicies: a.TokenPolicies, IdentityPolicies: a.IdentityPolicies, + NoDefaultPolicy: a.NoDefaultPolicy, Metadata: a.Metadata, ClientToken: a.ClientToken, Accessor: a.Accessor, @@ -554,6 +555,7 @@ func ProtoAuthToLogicalAuth(a *Auth) (*logical.Auth, error) { Policies: a.Policies, TokenPolicies: a.TokenPolicies, IdentityPolicies: a.IdentityPolicies, + NoDefaultPolicy: a.NoDefaultPolicy, Metadata: a.Metadata, ClientToken: a.ClientToken, Accessor: a.Accessor, diff --git a/sdk/queue/README.md b/sdk/queue/README.md new file mode 100644 index 000000000000..345db195d573 --- /dev/null +++ b/sdk/queue/README.md @@ -0,0 +1,9 @@ +Vault SDK - Queue +================= + +The `queue` package provides Vault plugins with a Priority Queue. It can be used +as an in-memory list of `queue.Item` sorted by their `priority`, and offers +methods to find or remove items by their key. Internally it +uses `container/heap`; see [Example Priority +Queue](https://golang.org/pkg/container/heap/#example__priorityQueue) + diff --git a/sdk/queue/priority_queue.go b/sdk/queue/priority_queue.go new file mode 100644 index 000000000000..a0fda087f9be --- /dev/null +++ b/sdk/queue/priority_queue.go @@ -0,0 +1,193 @@ +// Package queue provides Vault plugins with a Priority Queue. It can be used +// as an in-memory list of queue.Item sorted by their priority, and offers +// methods to find or remove items by their key. Internally it uses +// container/heap; see Example Priority Queue: +// https://golang.org/pkg/container/heap/#example__priorityQueue +package queue + +import ( + "container/heap" + "errors" + "sync" + + "github.com/mitchellh/copystructure" +) + +// ErrEmpty is returned for queues with no items +var ErrEmpty = errors.New("queue is empty") + +// ErrDuplicateItem is returned when the queue attmepts to push an item to a key that +// already exists. The queue does not attempt to update, instead returns this +// error. If an Item needs to be updated or replaced, pop the item first. +var ErrDuplicateItem = errors.New("duplicate item") + +// New initializes the internal data structures and returns a new +// PriorityQueue +func New() *PriorityQueue { + pq := PriorityQueue{ + data: make(queue, 0), + dataMap: make(map[string]*Item), + } + heap.Init(&pq.data) + return &pq +} + +// PriorityQueue facilitates queue of Items, providing Push, Pop, and +// PopByKey convenience methods. The ordering (priority) is an int64 value +// with the smallest value is the highest priority. PriorityQueue maintains both +// an internal slice for the queue as well as a map of the same items with their +// keys as the index. This enables users to find specific items by key. The map +// must be kept in sync with the data slice. +// See https://golang.org/pkg/container/heap/#example__priorityQueue +type PriorityQueue struct { + // data is the internal structure that holds the queue, and is operated on by + // heap functions + data queue + + // dataMap represents all the items in the queue, with unique indexes, used + // for finding specific items. dataMap is kept in sync with the data slice + dataMap map[string]*Item + + // lock is a read/write mutex, and used to facilitate read/write locks on the + // data and dataMap fields + lock sync.RWMutex +} + +// queue is the internal data structure used to satisfy heap.Interface. This +// prevents users from calling Pop and Push heap methods directly +type queue []*Item + +// Item is something managed in the priority queue +type Item struct { + // Key is a unique string used to identify items in the internal data map + Key string + // Value is an unspecified type that implementations can use to store + // information + Value interface{} + + // Priority determines ordering in the queue, with the lowest value being the + // highest priority + Priority int64 + + // index is an internal value used by the heap package, and should not be + // modified by any consumer of the priority queue + index int +} + +// Len returns the count of items in the Priority Queue +func (pq *PriorityQueue) Len() int { + pq.lock.RLock() + defer pq.lock.RUnlock() + return pq.data.Len() +} + +// Pop pops the highest priority item from the queue. This is a +// wrapper/convenience method that calls heap.Pop, so consumers do not need to +// invoke heap functions directly +func (pq *PriorityQueue) Pop() (*Item, error) { + if pq.Len() == 0 { + return nil, ErrEmpty + } + + pq.lock.Lock() + defer pq.lock.Unlock() + + item := heap.Pop(&pq.data).(*Item) + delete(pq.dataMap, item.Key) + return item, nil +} + +// Push pushes an item on to the queue. This is a wrapper/convenience +// method that calls heap.Push, so consumers do not need to invoke heap +// functions directly. Items must have unique Keys, and Items in the queue +// cannot be updated. To modify an Item, users must first remove it and re-push +// it after modifications +func (pq *PriorityQueue) Push(i *Item) error { + if i == nil || i.Key == "" { + return errors.New("error adding item: Item Key is required") + } + + pq.lock.Lock() + defer pq.lock.Unlock() + + if _, ok := pq.dataMap[i.Key]; ok { + return ErrDuplicateItem + } + // Copy the item value(s) so that modifications to the source item does not + // affect the item on the queue + clone, err := copystructure.Copy(i) + if err != nil { + return err + } + + pq.dataMap[i.Key] = clone.(*Item) + heap.Push(&pq.data, clone) + return nil +} + +// PopByKey searches the queue for an item with the given key and removes it +// from the queue if found. Returns nil if not found. This method must fix the +// queue after removing any key. +func (pq *PriorityQueue) PopByKey(key string) (*Item, error) { + pq.lock.Lock() + defer pq.lock.Unlock() + + item, ok := pq.dataMap[key] + if !ok { + return nil, nil + } + + // Remove the item the heap and delete it from the dataMap + itemRaw := heap.Remove(&pq.data, item.index) + delete(pq.dataMap, key) + + if itemRaw != nil { + if i, ok := itemRaw.(*Item); ok { + return i, nil + } + } + + return nil, nil +} + +// Len returns the number of items in the queue data structure. Do not use this +// method directly on the queue, use PriorityQueue.Len() instead. +func (q queue) Len() int { return len(q) } + +// Less returns whether the Item with index i should sort before the Item with +// index j in the queue. This method is used by the queue to determine priority +// internally; the Item with the lower value wins. (priority zero is higher +// priority than 1). The priority of Items with equal values is undetermined. +func (q queue) Less(i, j int) bool { + return q[i].Priority < q[j].Priority +} + +// Swap swaps things in-place; part of sort.Interface +func (q queue) Swap(i, j int) { + q[i], q[j] = q[j], q[i] + q[i].index = i + q[j].index = j +} + +// Push is used by heap.Interface to push items onto the heap. This method is +// invoked by container/heap, and should not be used directly. +// See: https://golang.org/pkg/container/heap/#Interface +func (q *queue) Push(x interface{}) { + n := len(*q) + item := x.(*Item) + item.index = n + *q = append(*q, item) +} + +// Pop is used by heap.Interface to pop items off of the heap. This method is +// invoked by container/heap, and should not be used directly. +// See: https://golang.org/pkg/container/heap/#Interface +func (q *queue) Pop() interface{} { + old := *q + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.index = -1 // for safety + *q = old[0 : n-1] + return item +} diff --git a/sdk/queue/priority_queue_test.go b/sdk/queue/priority_queue_test.go new file mode 100644 index 000000000000..e570c1b0a732 --- /dev/null +++ b/sdk/queue/priority_queue_test.go @@ -0,0 +1,209 @@ +package queue + +import ( + "container/heap" + "fmt" + "testing" + "time" +) + +// Ensure we satisfy the heap.Interface +var _ heap.Interface = &queue{} + +// some tests rely on the ordering of items from this method +func testCases() (tc []*Item) { + // create a slice of items with priority / times offest by these seconds + for i, m := range []time.Duration{ + 5, + 183600, // 51 hours + 15, // 15 seconds + 45, // 45 seconds + 900, // 15 minutes + 300, // 5 minutes + 7200, // 2 hours + 183600, // 51 hours + 7201, // 2 hours, 1 second + 115200, // 32 hours + 1209600, // 2 weeks + } { + n := time.Now() + ft := n.Add(time.Second * m) + tc = append(tc, &Item{ + Key: fmt.Sprintf("item-%d", i), + Value: 1, + Priority: ft.Unix(), + }) + } + return +} + +func TestPriorityQueue_New(t *testing.T) { + pq := New() + + if len(pq.data) != len(pq.dataMap) || len(pq.data) != 0 { + t.Fatalf("error in queue/map size, expected data and map to be initialized, got (%d) and (%d)", len(pq.data), len(pq.dataMap)) + } + + if pq.Len() != 0 { + t.Fatalf("expected new queue to have zero size, got (%d)", pq.Len()) + } +} + +func TestPriorityQueue_Push(t *testing.T) { + pq := New() + + // don't allow nil pushing + if err := pq.Push(nil); err == nil { + t.Fatal("Expected error on pushing nil") + } + + tc := testCases() + tcl := len(tc) + for _, i := range tc { + if err := pq.Push(i); err != nil { + t.Fatal(err) + } + } + + if pq.Len() != tcl { + t.Fatalf("error adding items, expected (%d) items, got (%d)", tcl, pq.Len()) + } + + testValidateInternalData(t, pq, len(tc), false) + + item, err := pq.Pop() + if err != nil { + t.Fatalf("error popping item: %s", err) + } + if tc[0].Priority != item.Priority { + t.Fatalf("expected tc[0] and popped item to match, got (%q) and (%q)", tc[0], item.Priority) + } + if tc[0].Key != item.Key { + t.Fatalf("expected tc[0] and popped item to match, got (%q) and (%q)", tc[0], item.Priority) + } + + testValidateInternalData(t, pq, len(tc)-1, false) + + // push item with no key + dErr := pq.Push(tc[1]) + if dErr != ErrDuplicateItem { + t.Fatal(err) + } + // push item with no key + tc[2].Key = "" + kErr := pq.Push(tc[2]) + if kErr != nil && kErr.Error() != "error adding item: Item Key is required" { + t.Fatal(kErr) + } + + testValidateInternalData(t, pq, len(tc)-1, true) + + // check nil,nil error for not found + i, err := pq.PopByKey("empty") + if err != nil && i != nil { + t.Fatalf("expected nil error for PopByKey of non-existing key, got: %s", err) + } +} + +func TestPriorityQueue_Pop(t *testing.T) { + pq := New() + + tc := testCases() + for _, i := range tc { + if err := pq.Push(i); err != nil { + t.Fatal(err) + } + } + + topItem, err := pq.Pop() + if err != nil { + t.Fatalf("error calling pop: %s", err) + } + if tc[0].Priority != topItem.Priority { + t.Fatalf("expected tc[0] and popped item to match, got (%q) and (%q)", tc[0], topItem.Priority) + } + if tc[0].Key != topItem.Key { + t.Fatalf("expected tc[0] and popped item to match, got (%q) and (%q)", tc[0], topItem.Priority) + } + + var items []*Item + items = append(items, topItem) + // pop the remaining items, compare size of input and output + i, _ := pq.Pop() + for ; i != nil; i, _ = pq.Pop() { + items = append(items, i) + } + if len(items) != len(tc) { + t.Fatalf("expected popped item count to match test cases, got (%d)", len(items)) + } +} + +func TestPriorityQueue_PopByKey(t *testing.T) { + pq := New() + + tc := testCases() + for _, i := range tc { + if err := pq.Push(i); err != nil { + t.Fatal(err) + } + } + + // grab the top priority item, to capture it's value for checking later + item, _ := pq.Pop() + oldPriority := item.Priority + oldKey := item.Key + + // push the item back on, so it gets removed with PopByKey and we verify + // the top item has changed later + err := pq.Push(item) + if err != nil { + t.Fatalf("error re-pushing top item: %s", err) + } + + popKeys := []int{2, 4, 7, 1, 0} + for _, i := range popKeys { + item, err := pq.PopByKey(fmt.Sprintf("item-%d", i)) + if err != nil { + t.Fatalf("failed to pop item-%d, \n\terr: %s\n\titem: %#v", i, err, item) + } + } + + testValidateInternalData(t, pq, len(tc)-len(popKeys), false) + + // grab the top priority item again, to compare with the top item priority + // from above + item, _ = pq.Pop() + newPriority := item.Priority + newKey := item.Key + + if oldPriority == newPriority || oldKey == newKey { + t.Fatalf("expected old/new key and priority to differ, got (%s/%s) and (%d/%d)", oldKey, newKey, oldPriority, newPriority) + } + + testValidateInternalData(t, pq, len(tc)-len(popKeys)-1, true) +} + +// testValidateInternalData checks the internal data stucture of the PriorityQueue +// and verifies that items are in-sync. Use drain only at the end of a test, +// because it will mutate the input queue +func testValidateInternalData(t *testing.T, pq *PriorityQueue, expectedSize int, drain bool) { + actualSize := pq.Len() + if actualSize != expectedSize { + t.Fatalf("expected new queue size to be (%d), got (%d)", expectedSize, actualSize) + } + + if len(pq.data) != len(pq.dataMap) || len(pq.data) != expectedSize { + t.Fatalf("error in queue/map size, expected data and map to be (%d), got (%d) and (%d)", expectedSize, len(pq.data), len(pq.dataMap)) + } + + if drain && pq.Len() > 0 { + // pop all the items, verify lengths + i, _ := pq.Pop() + for ; i != nil; i, _ = pq.Pop() { + expectedSize-- + if len(pq.data) != len(pq.dataMap) || len(pq.data) != expectedSize { + t.Fatalf("error in queue/map size, expected data and map to be (%d), got (%d) and (%d)", expectedSize, len(pq.data), len(pq.dataMap)) + } + } + } +} diff --git a/sdk/version/version_base.go b/sdk/version/version_base.go index 31475104a0d5..b086bfa5f791 100644 --- a/sdk/version/version_base.go +++ b/sdk/version/version_base.go @@ -8,7 +8,7 @@ var ( // Whether cgo is enabled or not; set at build time CgoEnabled bool - Version = "1.1.2" - VersionPrerelease = "" + Version = "1.2.0" + VersionPrerelease = "beta1" VersionMetadata = "" ) diff --git a/terraform/aws/variables.tf b/terraform/aws/variables.tf index ed6bba4bcead..972e620cbddb 100644 --- a/terraform/aws/variables.tf +++ b/terraform/aws/variables.tf @@ -3,7 +3,7 @@ //------------------------------------------------------------------- variable "download-url" { - default = "https://releases.hashicorp.com/vault/1.1.2/vault_1.1.2_linux_amd64.zip" + default = "https://releases.hashicorp.com/vault/1.2.0-beta1/vault_1.2.0-beta1_linux_amd64.zip" description = "URL to download Vault" } diff --git a/ui/.eslintrc.js b/ui/.eslintrc.js index 781c3aa08723..2220472b85eb 100644 --- a/ui/.eslintrc.js +++ b/ui/.eslintrc.js @@ -13,6 +13,7 @@ module.exports = { rules: { 'no-unused-vars': ['error', { ignoreRestSiblings: true }], 'prettier/prettier': 'error', + 'ember/no-jquery': 'error', }, globals: { TextEncoderLite: true, diff --git a/ui/.storybook/config.js b/ui/.storybook/config.js index e7392da1f436..be3be01e833a 100644 --- a/ui/.storybook/config.js +++ b/ui/.storybook/config.js @@ -29,6 +29,9 @@ addDecorator(storyFn => { assign(element.style, styles.style); const innerElement = document.createElement('div'); + const wormhole = document.createElement('div'); + wormhole.setAttribute('id', 'ember-basic-dropdown-wormhole'); + innerElement.appendChild(wormhole); element.appendChild(innerElement); innerElement.appendTo = function appendTo(el) { diff --git a/ui/.storybook/preview-head.html b/ui/.storybook/preview-head.html index 9c353e54b153..13cafd050917 100644 --- a/ui/.storybook/preview-head.html +++ b/ui/.storybook/preview-head.html @@ -1,4 +1,6 @@ - + + + diff --git a/ui/app/adapters/application.js b/ui/app/adapters/application.js index 6a7a15a7fc5d..6e54c834b65d 100644 --- a/ui/app/adapters/application.js +++ b/ui/app/adapters/application.js @@ -3,13 +3,14 @@ import { assign } from '@ember/polyfills'; import { set } from '@ember/object'; import RSVP from 'rsvp'; import DS from 'ember-data'; +import AdapterFetch from 'ember-fetch/mixins/adapter-fetch'; import fetch from 'fetch'; import config from '../config/environment'; const { APP } = config; const { POLLING_URLS, NAMESPACE_ROOT_URLS } = APP; -export default DS.RESTAdapter.extend({ +export default DS.RESTAdapter.extend(AdapterFetch, { auth: service(), namespaceService: service('namespace'), controlGroup: service(), diff --git a/ui/app/adapters/generated-item-list.js b/ui/app/adapters/generated-item-list.js new file mode 100644 index 000000000000..313985be9d78 --- /dev/null +++ b/ui/app/adapters/generated-item-list.js @@ -0,0 +1,35 @@ +import { assign } from '@ember/polyfills'; +import ApplicationAdapter from './application'; + +export default ApplicationAdapter.extend({ + namespace: 'v1', + urlForItem() {}, + optionsForQuery(id) { + let data = {}; + if (!id) { + data['list'] = true; + } + return { data }; + }, + + fetchByQuery(store, query) { + const { id, method, type } = query; + return this.ajax(this.urlForItem(method, id, type), 'GET', this.optionsForQuery(id)).then(resp => { + const data = { + id, + name: id, + method, + }; + + return assign({}, resp, data); + }); + }, + + query(store, type, query) { + return this.fetchByQuery(store, query); + }, + + queryRecord(store, type, query) { + return this.fetchByQuery(store, query); + }, +}); diff --git a/ui/app/adapters/kmip/base.js b/ui/app/adapters/kmip/base.js new file mode 100644 index 000000000000..5815933da623 --- /dev/null +++ b/ui/app/adapters/kmip/base.js @@ -0,0 +1,64 @@ +import ApplicationAdapter from '../application'; +import { encodePath } from 'vault/utils/path-encoding-helpers'; + +export default ApplicationAdapter.extend({ + namespace: 'v1', + pathForType(type) { + return type.replace('kmip/', ''); + }, + + _url(modelType, meta = {}, id) { + let { backend, scope, role } = meta; + let type = this.pathForType(modelType); + let base; + switch (type) { + case 'scope': + base = `${encodePath(backend)}/scope`; + break; + case 'role': + base = `${encodePath(backend)}/scope/${encodePath(scope)}/role`; + break; + case 'credential': + base = `${encodePath(backend)}/scope/${encodePath(scope)}/role/${encodePath(role)}/credential`; + break; + } + if (id && type === 'credential') { + return `/v1/${base}/lookup?serial_number=${encodePath(id)}`; + } + + if (id) { + return `/v1/${base}/${encodePath(id)}`; + } + return `/v1/${base}`; + }, + + urlForQuery(query, modelType) { + let base = this._url(modelType, query); + return base + '?list=true'; + }, + + query(store, type, query) { + return this.ajax(this.urlForQuery(query, type.modelName), 'GET').then(resp => { + // remove pagination query items here + const { size, page, responsePath, pageFilter, ...modelAttrs } = query; + resp._requestQuery = modelAttrs; + return resp; + }); + }, + + queryRecord(store, type, query) { + let id = query.id; + delete query.id; + return this.ajax(this._url(type.modelName, query, id), 'GET').then(resp => { + resp.id = id; + resp = { ...resp, ...query }; + return resp; + }); + }, + buildURL(modelName, id, snapshot, requestType, query) { + if (requestType === 'createRecord') { + return this._super(...arguments); + } + return this._super(`${modelName}`, id, snapshot, requestType, query); + }, +}); diff --git a/ui/app/adapters/kmip/ca.js b/ui/app/adapters/kmip/ca.js new file mode 100644 index 000000000000..bc07c11c0d1d --- /dev/null +++ b/ui/app/adapters/kmip/ca.js @@ -0,0 +1,8 @@ +import BaseAdapter from './base'; + +export default BaseAdapter.extend({ + urlForFindRecord(id, modelName, snapshot) { + let name = this.pathForType(modelName); + return this.buildURL(id, name, snapshot); + }, +}); diff --git a/ui/app/adapters/kmip/config.js b/ui/app/adapters/kmip/config.js new file mode 100644 index 000000000000..32f27803f894 --- /dev/null +++ b/ui/app/adapters/kmip/config.js @@ -0,0 +1,19 @@ +import BaseAdapter from './base'; + +export default BaseAdapter.extend({ + _url(id, modelName, snapshot) { + let name = this.pathForType(modelName); + // id here will be the mount path, + // modelName will be config so we want to transpose the first two call args + return this.buildURL(id, name, snapshot); + }, + urlForFindRecord() { + return this._url(...arguments); + }, + urlForCreateRecord(modelName, snapshot) { + return this._url(snapshot.id, modelName, snapshot); + }, + urlForUpdateRecord() { + return this._url(...arguments); + }, +}); diff --git a/ui/app/adapters/kmip/credential.js b/ui/app/adapters/kmip/credential.js new file mode 100644 index 000000000000..d6c6bb8d72d4 --- /dev/null +++ b/ui/app/adapters/kmip/credential.js @@ -0,0 +1,30 @@ +import BaseAdapter from './base'; + +export default BaseAdapter.extend({ + createRecord(store, type, snapshot) { + let url = this._url(type.modelName, { + backend: snapshot.record.backend, + scope: snapshot.record.scope, + role: snapshot.record.role, + }); + url = `${url}/generate`; + return this.ajax(url, 'POST', { data: snapshot.serialize() }).then(model => { + model.data.id = model.data.serial_number; + return model; + }); + }, + + deleteRecord(store, type, snapshot) { + let url = this._url(type.modelName, { + backend: snapshot.record.backend, + scope: snapshot.record.scope, + role: snapshot.record.role, + }); + url = `${url}/revoke`; + return this.ajax(url, 'POST', { + data: { + serial_number: snapshot.id, + }, + }); + }, +}); diff --git a/ui/app/adapters/kmip/role.js b/ui/app/adapters/kmip/role.js new file mode 100644 index 000000000000..1f76a0400c5f --- /dev/null +++ b/ui/app/adapters/kmip/role.js @@ -0,0 +1,70 @@ +import BaseAdapter from './base'; +import { decamelize } from '@ember/string'; +import { getProperties } from '@ember/object'; + +export default BaseAdapter.extend({ + createRecord(store, type, snapshot) { + let name = snapshot.id || snapshot.attr('name'); + let url = this._url( + type.modelName, + { + backend: snapshot.record.backend, + scope: snapshot.record.scope, + }, + name + ); + return this.ajax(url, 'POST', { data: this.serialize(snapshot) }).then(() => { + return { + id: name, + name, + backend: snapshot.record.backend, + scope: snapshot.record.scope, + }; + }); + }, + + deleteRecord(store, type, snapshot) { + let name = snapshot.id || snapshot.attr('name'); + let url = this._url( + type.modelName, + { + backend: snapshot.record.backend, + scope: snapshot.record.scope, + }, + name + ); + return this.ajax(url, 'DELETE'); + }, + + serialize(snapshot) { + // the endpoint here won't allow sending `operation_all` and `operation_none` at the same time or with + // other values, so we manually check for them and send an abbreviated object + let json = snapshot.serialize(); + let keys = snapshot.record.nonOperationFields.map(decamelize); + let nonOperationFields = getProperties(json, keys); + for (let field in nonOperationFields) { + if (nonOperationFields[field] == null) { + delete nonOperationFields[field]; + } + } + if (json.operation_all) { + return { + operation_all: true, + ...nonOperationFields, + }; + } + if (json.operation_none) { + return { + operation_none: true, + ...nonOperationFields, + }; + } + delete json.operation_none; + delete json.operation_all; + return json; + }, + + updateRecord() { + return this.createRecord(...arguments); + }, +}); diff --git a/ui/app/adapters/kmip/scope.js b/ui/app/adapters/kmip/scope.js new file mode 100644 index 000000000000..f2d6a02e060d --- /dev/null +++ b/ui/app/adapters/kmip/scope.js @@ -0,0 +1,19 @@ +import BaseAdapter from './base'; + +export default BaseAdapter.extend({ + createRecord(store, type, snapshot) { + let name = snapshot.attr('name'); + return this.ajax(this._url(type.modelName, { backend: snapshot.record.backend }, name), 'POST').then( + () => { + return { + id: name, + name, + }; + } + ); + }, + + deleteRecord(store, type, snapshot) { + return this.ajax(this._url(type.modelName, { backend: snapshot.record.backend }, snapshot.id), 'DELETE'); + }, +}); diff --git a/ui/app/adapters/requests.js b/ui/app/adapters/requests.js new file mode 100644 index 000000000000..5f30e8ebb83a --- /dev/null +++ b/ui/app/adapters/requests.js @@ -0,0 +1,14 @@ +import Application from './application'; + +export default Application.extend({ + queryRecord() { + return this.ajax(this.urlForQuery(), 'GET').then(resp => { + resp.id = resp.request_id; + return resp; + }); + }, + + urlForQuery() { + return this.buildURL() + '/internal/counters/requests'; + }, +}); diff --git a/ui/app/app.js b/ui/app/app.js index 8b963f8e10cb..067a6f8c8744 100644 --- a/ui/app/app.js +++ b/ui/app/app.js @@ -8,10 +8,50 @@ defineModifier(); let App; +/* eslint-disable ember/avoid-leaking-state-in-ember-objects */ App = Application.extend({ modulePrefix: config.modulePrefix, podModulePrefix: config.podModulePrefix, Resolver, + engines: { + openApiExplorer: { + dependencies: { + services: ['auth', 'flash-messages', 'namespace', 'router', 'version'], + }, + }, + replication: { + dependencies: { + services: [ + 'auth', + 'flash-messages', + 'namespace', + 'replication-mode', + 'router', + 'store', + 'version', + 'wizard', + ], + }, + }, + kmip: { + dependencies: { + services: [ + 'auth', + 'flash-messages', + 'namespace', + 'path-help', + 'router', + 'store', + 'version', + 'wizard', + 'secret-mount-path', + ], + externalRoutes: { + secrets: 'vault.cluster.secrets.backends', + }, + }, + }, + }, }); loadInitializers(App, config.modulePrefix); diff --git a/ui/app/components/alert-banner.js b/ui/app/components/alert-banner.js deleted file mode 100644 index 1d27a499eae7..000000000000 --- a/ui/app/components/alert-banner.js +++ /dev/null @@ -1,32 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import { messageTypes } from 'vault/helpers/message-types'; - -/** - * @module AlertBanner - * `AlertBanner` components are used to inform users of important messages. - * - * @example - * ```js - * - * ``` - * - * @param type=null {String} - The banner type. This comes from the message-types helper. - * @param [message=null {String}] - The message to display within the banner. - * - */ - -export default Component.extend({ - type: null, - message: null, - yieldWithoutColumn: false, - classNameBindings: ['containerClass'], - - containerClass: computed('type', function() { - return 'message ' + messageTypes([this.get('type')]).class; - }), - - alertType: computed('type', function() { - return messageTypes([this.get('type')]); - }), -}); diff --git a/ui/app/components/alert-inline.js b/ui/app/components/alert-inline.js deleted file mode 100644 index 61638995ce06..000000000000 --- a/ui/app/components/alert-inline.js +++ /dev/null @@ -1,36 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import { messageTypes } from 'vault/helpers/message-types'; - -/** - * @module AlertInline - * `AlertInline` components are used to inform users of important messages. - * - * @example - * ```js - * - * ``` - * - * @param type=null{String} - The alert type. This comes from the message-types helper. - * @param [message=null]{String} - The message to display within the alert. - * - */ - -export default Component.extend({ - type: null, - message: null, - - classNames: ['message-inline'], - - textClass: computed('type', function() { - if (this.get('type') == 'danger') { - return messageTypes([this.get('type')]).glyphClass; - } - - return; - }), - - alertType: computed('type', function() { - return messageTypes([this.get('type')]); - }), -}); diff --git a/ui/app/components/auth-form.js b/ui/app/components/auth-form.js index fc8596404d21..288463430213 100644 --- a/ui/app/components/auth-form.js +++ b/ui/app/components/auth-form.js @@ -175,15 +175,17 @@ export default Component.extend(DEFAULTS, { handleError(e, prefixMessage = true) { this.set('loading', false); - if (!e.errors) { - return e; + let errors; + if (e.errors) { + errors = e.errors.map(error => { + if (error.detail) { + return error.detail; + } + return error; + }); + } else { + errors = [e]; } - let errors = e.errors.map(error => { - if (error.detail) { - return error.detail; - } - return error; - }); let message = prefixMessage ? 'Authentication failed: ' : ''; this.set('error', `${message}${errors.join('.')}`); }, diff --git a/ui/app/components/confirm-action.js b/ui/app/components/confirm-action.js deleted file mode 100644 index 7f3e76c1c71e..000000000000 --- a/ui/app/components/confirm-action.js +++ /dev/null @@ -1,79 +0,0 @@ -import Component from '@ember/component'; -import hbs from 'htmlbars-inline-precompile'; - -/** - * @module ConfirmAction - * `ConfirmAction` is a button followed by a confirmation message and button used to prevent users from performing actions they do not intend to. - * - * @example - * ```js - * { console.log('Action!') } }} - * @confirmMessage="Are you sure you want to delete this config?"> - * Delete - * - * ``` - * - * @property {Func} onConfirmAction=null - The action to take upon confirming. - * @property {String} [confirmMessage=Are you sure you want to do this?] - The message to display upon confirming. - * @property {String} [confirmButtonText=Delete] - The confirm button text. - * @property {String} [cancelButtonText=Cancel] - The cancel button text. - * @property {String} [disabledMessage=Complete the form to complete this action] - The message to display when the button is disabled. - * - */ - -export default Component.extend({ - tagName: 'span', - classNames: ['confirm-action'], - layout: hbs` - {{#if showConfirm ~}} - - {{if disabled disabledMessage confirmMessage}} - - - - {{else}} - - {{~/if}} - `, - - disabled: false, - disabledMessage: 'Complete the form to complete this action', - showConfirm: false, - messageClasses: 'is-size-8 has-text-grey', - confirmButtonClasses: 'is-danger is-outlined button', - containerClasses: '', - buttonClasses: 'button', - buttonText: 'Delete', - confirmMessage: 'Are you sure you want to do this?', - confirmButtonText: 'Delete', - cancelButtonClasses: 'button', - cancelButtonText: 'Cancel', - // the action to take when we confirm - onConfirmAction: null, - - actions: { - toggleConfirm() { - this.toggleProperty('showConfirm'); - }, - - onConfirm() { - const confirmAction = this.get('onConfirmAction'); - - if (typeof confirmAction !== 'function') { - throw new Error('confirm-action components expects `onConfirmAction` attr to be a function'); - } else { - confirmAction(); - this.toggleProperty('showConfirm'); - } - }, - }, -}); diff --git a/ui/app/components/console/ui-panel.js b/ui/app/components/console/ui-panel.js index 9d68704bfd94..788db5ed1b40 100644 --- a/ui/app/components/console/ui-panel.js +++ b/ui/app/components/console/ui-panel.js @@ -45,13 +45,13 @@ export default Component.extend({ let serviceArgs; if ( - executeUICommand( - command, - args => this.logAndOutput(args), - args => service.clearLog(args), - () => this.toggleProperty('isFullscreen'), - () => this.get('refreshRoute').perform() - ) + executeUICommand(command, args => this.logAndOutput(args), { + api: () => this.routeToExplore.perform(command), + clearall: () => service.clearLog(true), + clear: () => service.clearLog(), + fullscreen: () => this.toggleProperty('isFullscreen'), + refresh: () => this.refreshRoute.perform(), + }) ) { return; } @@ -104,6 +104,29 @@ export default Component.extend({ } }), + routeToExplore: task(function*(command) { + let filter = command.replace('api', '').trim(); + try { + yield this.router.transitionTo('vault.cluster.open-api-explorer.index', { + queryParams: { filter }, + }); + let content = + 'Welcome to the Vault API explorer! \nYou can search for endpoints, see what parameters they accept, and even execute requests with your current token.'; + if (filter) { + content = `Welcome to the Vault API explorer! \nWe've filtered the list of endpoints for '${filter}'.`; + } + this.logAndOutput(null, { + type: 'success', + content, + }); + } catch (error) { + this.logAndOutput(null, { + type: 'error', + content: 'There was a problem navigating to the api explorer.', + }); + } + }), + shiftCommandIndex(keyCode) { this.get('console').shiftCommandIndex(keyCode, val => { this.set('inputValue', val); diff --git a/ui/app/components/doc-link.js b/ui/app/components/doc-link.js deleted file mode 100644 index 35a9ac0b1a1c..000000000000 --- a/ui/app/components/doc-link.js +++ /dev/null @@ -1,20 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import hbs from 'htmlbars-inline-precompile'; - -export default Component.extend({ - tagName: 'a', - classNames: ['doc-link'], - attributeBindings: ['target', 'rel', 'href'], - - layout: hbs`{{yield}}`, - - target: '_blank', - rel: 'noreferrer noopener', - host: 'https://www.vaultproject.io', - - path: '/', - href: computed('host', 'path', function() { - return `${this.host}${this.path}`; - }), -}); diff --git a/ui/app/components/download-button.js b/ui/app/components/download-button.js deleted file mode 100644 index 46ce1219808a..000000000000 --- a/ui/app/components/download-button.js +++ /dev/null @@ -1,51 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import hbs from 'htmlbars-inline-precompile'; - -export default Component.extend({ - layout: hbs`{{#if hasBlock}} {{yield}} {{else}} {{actionText}} {{/if}}`, - tagName: 'a', - role: 'button', - attributeBindings: ['role', 'download', 'href'], - download: computed('filename', 'extension', function() { - return `${this.get('filename')}-${new Date().toISOString()}.${this.get('extension')}`; - }), - - fileLike: computed('data', 'mime', 'stringify', 'download', function() { - let file; - let data = this.get('data'); - let filename = this.get('download'); - let mime = this.get('mime'); - if (this.get('stringify')) { - data = JSON.stringify(data, null, 2); - } - if (window.navigator.msSaveOrOpenBlob) { - file = new Blob([data], { type: mime }); - file.name = filename; - } else { - file = new File([data], filename, { type: mime }); - } - return file; - }), - - href: computed('fileLike', function() { - return window.URL.createObjectURL(this.get('fileLike')); - }), - - click(event) { - if (!window.navigator.msSaveOrOpenBlob) { - return; - } - event.preventDefault(); - let file = this.get('fileLike'); - //lol whyyyy - window.navigator.msSaveOrOpenBlob(file, file.name); - }, - - actionText: 'Download', - data: null, - filename: null, - mime: 'text/plain', - extension: 'txt', - stringify: false, -}); diff --git a/ui/app/components/edit-form.js b/ui/app/components/edit-form.js deleted file mode 100644 index 67521649b61a..000000000000 --- a/ui/app/components/edit-form.js +++ /dev/null @@ -1,50 +0,0 @@ -import { inject as service } from '@ember/service'; -import Component from '@ember/component'; -import { task } from 'ember-concurrency'; -import DS from 'ember-data'; - -export default Component.extend({ - flashMessages: service(), - - // public API - model: null, - successMessage: 'Saved!', - deleteSuccessMessage: 'Deleted!', - deleteButtonText: 'Delete', - saveButtonText: 'Save', - cancelLink: null, - - /* - * @param Function - * @public - * - * Optional param to call a function upon successfully saving a model - */ - onSave: () => {}, - - save: task(function*(model, options = { method: 'save' }) { - let { method } = options; - let messageKey = method === 'save' ? 'successMessage' : 'deleteSuccessMessage'; - try { - yield model[method](); - } catch (err) { - // err will display via model state - // AdapterErrors are handled by the error-message component - if (err instanceof DS.AdapterError === false) { - throw err; - } - return; - } - this.get('flashMessages').success(this.get(messageKey)); - yield this.get('onSave')({ saveType: method, model }); - }) - .drop() - .withTestWaiter(), - - willDestroy() { - let model = this.get('model'); - if ((model.get('isDirty') && !model.isDestroyed) || !model.isDestroying) { - model.rollbackAttributes(); - } - }, -}); diff --git a/ui/app/components/edition-badge.js b/ui/app/components/edition-badge.js deleted file mode 100644 index 025f657be6a4..000000000000 --- a/ui/app/components/edition-badge.js +++ /dev/null @@ -1,18 +0,0 @@ -import { computed } from '@ember/object'; -import Component from '@ember/component'; - -export default Component.extend({ - tagName: 'span', - classNames: 'tag is-outlined edition-badge', - attributeBindings: ['edition:aria-label'], - icon: computed('edition', function() { - const edition = this.get('edition'); - const entEditions = ['Enterprise', 'Premium', 'Pro']; - - if (entEditions.includes(edition)) { - return 'edition-enterprise'; - } else { - return 'edition-oss'; - } - }), -}); diff --git a/ui/app/components/empty-action-namespaces.js b/ui/app/components/empty-action-namespaces.js deleted file mode 100644 index 96167992d7ce..000000000000 --- a/ui/app/components/empty-action-namespaces.js +++ /dev/null @@ -1,2 +0,0 @@ -import OuterHTML from './outer-html'; -export default OuterHTML.extend(); diff --git a/ui/app/components/empty-state.js b/ui/app/components/empty-state.js deleted file mode 100644 index 04e6c3d213c7..000000000000 --- a/ui/app/components/empty-state.js +++ /dev/null @@ -1,6 +0,0 @@ -import OuterHTML from './outer-html'; - -export default OuterHTML.extend({ - title: null, - message: null, -}); diff --git a/ui/app/components/flex-table-column.js b/ui/app/components/flex-table-column.js deleted file mode 100644 index 361dd21b623b..000000000000 --- a/ui/app/components/flex-table-column.js +++ /dev/null @@ -1,7 +0,0 @@ -import Component from '@ember/component'; - -export default Component.extend({ - classNames: 'column', - header: null, - content: null, -}); diff --git a/ui/app/components/form-field-groups.js b/ui/app/components/form-field-groups.js deleted file mode 100644 index a2a5983f5644..000000000000 --- a/ui/app/components/form-field-groups.js +++ /dev/null @@ -1,39 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; - -/** - * @module FormFieldGroups - * `FormFieldGroups` components are field groups associated with a particular model. They render individual `FormField` components. - * - * @example - * ```js - * {{if model.fieldGroups}} - * - * {{/if}} - * - * ... - * - * - * ``` - * - * @param [renderGroup=null] {String} - A whitelist of groups to include in the render. - * @param model=null {DS.Model} - Model to be passed down to form-field component. If `fieldGroups` is present on the model then it will be iterated over and groups of `FormField` components will be rendered. - * @param onChange=null {Func} - Handler that will get set on the `FormField` component. - * - */ - -export default Component.extend({ - tagName: '', - - renderGroup: computed(function() { - return null; - }), - - model: null, - - onChange: () => {}, -}); diff --git a/ui/app/components/form-field.js b/ui/app/components/form-field.js deleted file mode 100644 index 1d1ac2e32a0e..000000000000 --- a/ui/app/components/form-field.js +++ /dev/null @@ -1,120 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import { capitalize } from 'vault/helpers/capitalize'; -import { humanize } from 'vault/helpers/humanize'; -import { dasherize } from 'vault/helpers/dasherize'; - -/** - * @module FormField - * `FormField` components are field elements associated with a particular model. - * - * @example - * ```js - * {{#each @model.fields as |attr|}} - * - * {{/each}} - * ``` - * - * @param [onChange=null] {Func} - Called whenever a value on the model changes via the component. - * @param attr=null {Object} - This is usually derived from ember model `attributes` lookup, and all members of `attr.options` are optional. - * @param model=null {DS.Model} - The Ember Data model that `attr` is defined on - * - */ - -export default Component.extend({ - 'data-test-field': true, - classNames: ['field'], - - onChange() {}, - - /* - * @public - * @param Object - * in the form of - * { - * name: "foo", - * options: { - * label: "Foo", - * defaultValue: "", - * editType: "ttl", - * helpText: "This will be in a tooltip" - * }, - * type: "boolean" - * } - * - */ - attr: null, - - /* - * @private - * @param string - * Computed property used in the label element next to the form element - * - */ - labelString: computed('attr.{name,options.label}', function() { - const label = this.get('attr.options.label'); - const name = this.get('attr.name'); - if (label) { - return label; - } - if (name) { - return capitalize([humanize([dasherize([name])])]); - } - }), - - // both the path to mutate on the model, and the path to read the value from - /* - * @private - * @param string - * - * Computed property used to set values on the passed model - * - */ - valuePath: computed('attr.{name,options.fieldValue}', function() { - return this.get('attr.options.fieldValue') || this.get('attr.name'); - }), - - model: null, - - /* - * @private - * @param object - * - * Used by the pgp-file component when an attr is editType of 'file' - */ - file: computed(function() { - return { value: '' }; - }), - emptyData: '{\n}', - - actions: { - setFile(_, keyFile) { - const path = this.get('valuePath'); - const { value } = keyFile; - this.get('model').set(path, value); - this.get('onChange')(path, value); - this.set('file', keyFile); - }, - - setAndBroadcast(path, value) { - this.get('model').set(path, value); - this.get('onChange')(path, value); - }, - - setAndBroadcastBool(path, trueVal, falseVal, value) { - let valueToSet = value === true ? trueVal : falseVal; - this.send('setAndBroadcast', path, valueToSet); - }, - - codemirrorUpdated(path, isString, value, codemirror) { - codemirror.performLint(); - const hasErrors = codemirror.state.lint.marked.length > 0; - let valToSet = isString ? value : JSON.parse(value); - - if (!hasErrors) { - this.get('model').set(path, valToSet); - this.get('onChange')(path, valToSet); - } - }, - }, -}); diff --git a/ui/app/components/generated-item-list.js b/ui/app/components/generated-item-list.js new file mode 100644 index 000000000000..61258ba040ca --- /dev/null +++ b/ui/app/components/generated-item-list.js @@ -0,0 +1,31 @@ +import { inject as service } from '@ember/service'; +import Component from '@ember/component'; +import { getOwner } from '@ember/application'; + +/** + * @module GeneratedItemList + * The `GeneratedItemList` component lists generated items related to mounts (e.g. groups, roles, users) + * + * @example + * ```js + * + * ``` + * + * @property model=null {DS.Model} - The corresponding item model that is being configured. + * @property itemType {String} - the type of item displayed + * + */ + +export default Component.extend({ + model: null, + itemType: null, + router: service(), + store: service(), + actions: { + refreshItemList() { + let route = getOwner(this).lookup(`route:${this.router.currentRouteName}`); + this.store.clearAllDatasets(); + route.refresh(); + }, + }, +}); diff --git a/ui/app/components/generated-item.js b/ui/app/components/generated-item.js new file mode 100644 index 000000000000..ccf815c8806d --- /dev/null +++ b/ui/app/components/generated-item.js @@ -0,0 +1,52 @@ +import { inject as service } from '@ember/service'; +import Component from '@ember/component'; +import { computed } from '@ember/object'; +import { task } from 'ember-concurrency'; +import DS from 'ember-data'; + +/** + * @module GeneratedItem + * The `GeneratedItem` component is the form to configure generated items related to mounts (e.g. groups, roles, users) + * + * @example + * ```js + * + * ``` + * + * @property model=null {DS.Model} - The corresponding item model that is being configured. + * @property mode {String} - which config mode to use. either `show`, `edit`, or `create` + * @property itemType {String} - the type of item displayed + * + */ + +export default Component.extend({ + model: null, + itemType: null, + flashMessages: service(), + router: service(), + props: computed(function() { + return this.model.serialize(); + }), + saveModel: task(function*() { + try { + yield this.model.save(); + } catch (err) { + // AdapterErrors are handled by the error-message component + // in the form + if (err instanceof DS.AdapterError === false) { + throw err; + } + return; + } + this.router.transitionTo('vault.cluster.access.method.item.list').followRedirects(); + this.flashMessages.success(`The ${this.itemType} configuration was saved successfully.`); + }).withTestWaiter(), + actions: { + deleteItem() { + this.model.destroyRecord().then(() => { + this.router.transitionTo('vault.cluster.access.method.item.list').followRedirects(); + this.flashMessages.success(`${this.model.id} ${this.itemType} was deleted successfully.`); + }); + }, + }, +}); diff --git a/ui/app/components/http-requests-bar-chart.js b/ui/app/components/http-requests-bar-chart.js new file mode 100644 index 000000000000..0b4b8c9033c2 --- /dev/null +++ b/ui/app/components/http-requests-bar-chart.js @@ -0,0 +1,220 @@ +import Component from '@ember/component'; +import d3 from 'd3-selection'; +import d3Scale from 'd3-scale'; +import d3Axis from 'd3-axis'; +import d3TimeFormat from 'd3-time-format'; +import d3Tip from 'd3-tip'; +import d3Transition from 'd3-transition'; +import d3Ease from 'd3-ease'; +import { assign } from '@ember/polyfills'; +import { computed } from '@ember/object'; +import { run } from '@ember/runloop'; +import { task, waitForEvent } from 'ember-concurrency'; + +/** + * @module HttpRequestsBarChart + * HttpRequestsBarChart components are used to render a bar chart with the total number of HTTP Requests to a Vault server per month. + * + * @example + * ```js + * + * ``` + * + * @param counters=null {Array} - A list of objects containing the total number of HTTP Requests for each month. `counters` should be the response from the `/internal/counters/requests` endpoint which looks like: + * COUNTERS = [ + * { + * "start_time": "2019-05-01T00:00:00Z", + * "total": 50 + * } + * ] + */ + +const HEIGHT = 240; +const HOVER_PADDING = 12; +const BASE_SPEED = 150; +const DURATION = BASE_SPEED * 2; + +export default Component.extend({ + classNames: ['http-requests-bar-chart-container'], + counters: null, + margin: Object.freeze({ top: 24, right: 16, bottom: 24, left: 16 }), + padding: 0.04, + width: 0, + height() { + const { margin } = this; + return HEIGHT - margin.top - margin.bottom; + }, + + parsedCounters: computed('counters', function() { + // parse the start times so bars and ticks display properly + const { counters } = this; + return counters.map(counter => { + return assign({}, counter, { start_time: d3TimeFormat.isoParse(counter.start_time) }); + }); + }), + + yScale: computed('parsedCounters', 'height', function() { + const { parsedCounters } = this; + const height = this.height(); + const counterTotals = parsedCounters.map(c => c.total); + + return d3Scale + .scaleLinear() + .domain([0, Math.max(...counterTotals)]) + .range([height, 0]); + }), + + xScale: computed('parsedCounters', 'width', function() { + const { parsedCounters, width, margin, padding } = this; + + return d3Scale + .scaleBand() + .domain(parsedCounters.map(c => c.start_time)) + .rangeRound([0, width - margin.left - margin.right], 0.05) + .paddingInner(padding) + .paddingOuter(padding); + }), + + didInsertElement() { + this._super(...arguments); + const { margin } = this; + + // set the width after the element has been rendered because the chart axes depend on it. + // this helps us avoid an arbitrary hardcoded width which causes alignment & resizing problems. + run.schedule('afterRender', this, () => { + this.set('width', this.element.clientWidth - margin.left - margin.right); + this.renderBarChart(); + }); + }, + + didUpdateAttrs() { + this.renderBarChart(); + }, + + renderBarChart() { + const { margin, width, xScale, yScale, parsedCounters, elementId } = this; + const height = this.height(); + const barChartSVG = d3.select('.http-requests-bar-chart'); + const barsContainer = d3.select(`#bars-container-${elementId}`); + + // initialize the tooltip + const tip = d3Tip() + .attr('class', 'd3-tooltip') + .offset([HOVER_PADDING / 2, 0]) + .html(function(d) { + const formatter = d3TimeFormat.utcFormat('%B %Y'); + return ` +

${formatter(d.start_time)}

+

${Intl.NumberFormat().format(d.total)} Requests

+ `; + }); + + barChartSVG.call(tip); + + // render the chart + d3.select('.http-requests-bar-chart') + .attr('width', width + margin.left + margin.right) + .attr('height', height + margin.top + margin.bottom) + .attr('viewBox', `0 0 ${width} ${height}`); + + // scale and render the axes + const yAxis = d3Axis + .axisRight(yScale) + .ticks(3, '.0s') + .tickSizeOuter(0); + const xAxis = d3Axis + .axisBottom(xScale) + .tickFormat(d3TimeFormat.utcFormat('%b %Y')) + .tickSizeOuter(0); + + barChartSVG + .select('g.x-axis') + .attr('transform', `translate(0,${height})`) + .call(xAxis); + + barChartSVG + .select('g.y-axis') + .attr('transform', `translate(${width - margin.left - margin.right}, 0)`) + .call(yAxis); + + // render the gridlines + const gridlines = d3Axis + .axisRight(yScale) + .ticks(3) + .tickFormat('') + .tickSize(width - margin.left - margin.right); + + barChartSVG.select('.gridlines').call(gridlines); + + // render the bars + const bars = barsContainer.selectAll('.bar').data(parsedCounters, c => +c.start_time); + + const barsEnter = bars + .enter() + .append('rect') + .attr('class', 'bar'); + + const t = d3Transition + .transition() + .duration(DURATION) + .ease(d3Ease.easeQuad); + + bars + .merge(barsEnter) + .attr('x', counter => xScale(counter.start_time)) + // set the initial y value to 0 so the bars animate upwards + .attr('y', () => yScale(0)) + .attr('width', xScale.bandwidth()) + .transition(t) + .delay(function(d, i) { + return i * BASE_SPEED; + }) + .attr('height', counter => height - yScale(counter.total)) + .attr('y', counter => yScale(counter.total)); + + bars.exit().remove(); + + // render transparent bars and bind the tooltip to them since we cannot + // bind the tooltip to the actual bars. this is because the bars are + // within a clipPath & you cannot bind DOM events to non-display elements. + const shadowBarsContainer = d3.select('.shadow-bars'); + + const shadowBars = shadowBarsContainer.selectAll('.bar').data(parsedCounters, c => +c.start_time); + + const shadowBarsEnter = shadowBars + .enter() + .append('rect') + .attr('class', 'bar') + .on('mouseenter', tip.show) + .on('mouseleave', tip.hide); + + shadowBars + .merge(shadowBarsEnter) + .attr('width', xScale.bandwidth()) + .attr('height', counter => height - yScale(counter.total) + HOVER_PADDING) + .attr('x', counter => xScale(counter.start_time)) + .attr('y', counter => yScale(counter.total) - HOVER_PADDING) + .attr('fill', 'transparent') + .attr('stroke', 'transparent'); + + shadowBars.exit().remove(); + }, + + updateDimensions() { + const newWidth = this.element.clientWidth; + const { margin } = this; + + this.set('width', newWidth - margin.left - margin.right); + this.renderBarChart(); + }, + + waitForResize: task(function*() { + while (true) { + yield waitForEvent(window, 'resize'); + run.scheduleOnce('afterRender', this, 'updateDimensions'); + } + }) + .on('didInsertElement') + .cancelOn('willDestroyElement') + .drop(), +}); diff --git a/ui/app/components/http-requests-container.js b/ui/app/components/http-requests-container.js new file mode 100644 index 000000000000..f58b01fce819 --- /dev/null +++ b/ui/app/components/http-requests-container.js @@ -0,0 +1,56 @@ +import Component from '@ember/component'; +import { computed } from '@ember/object'; +import isWithinRange from 'date-fns/is_within_range'; +import addMonths from 'date-fns/add_months'; + +/** + * @module HttpRequestsContainer + * The HttpRequestsContainer component is the parent component of the HttpRequestsDropdown, HttpRequestsBarChart, and HttpRequestsTable components. It is used to handle filtering the bar chart and table according to selected time window from the dropdown. + * + * @example + * ```js + * + * ``` + * + * @param counters=null {Array} - A list of objects containing the total number of HTTP Requests for each month. `counters` should be the response from the `/internal/counters/requests` endpoint which looks like: + * COUNTERS = [ + * { + * "start_time": "2019-05-01T00:00:00Z", + * "total": 50 + * } + * ] + */ + +export default Component.extend({ + classNames: ['http-requests-container'], + counters: null, + timeWindow: 'All', + filteredCounters: computed('counters', 'timeWindow', function() { + const { counters, timeWindow } = this; + if (timeWindow === 'All') { + return counters; + } + + let filteredCounters = []; + if (timeWindow === 'Last 12 Months') { + const today = new Date(); + const twelveMonthsAgo = addMonths(today, -12); + filteredCounters = counters.filter(counter => { + return isWithinRange(counter.start_time, twelveMonthsAgo, today); + }); + + return filteredCounters; + } + + filteredCounters = counters.filter(counter => { + const year = counter.start_time.substr(0, 4); + return year === timeWindow; + }); + return filteredCounters; + }), + actions: { + updateTimeWindow(newValue) { + this.set('timeWindow', newValue); + }, + }, +}); diff --git a/ui/app/components/http-requests-dropdown.js b/ui/app/components/http-requests-dropdown.js new file mode 100644 index 000000000000..7247c72961ac --- /dev/null +++ b/ui/app/components/http-requests-dropdown.js @@ -0,0 +1,51 @@ +import Component from '@ember/component'; +import { computed } from '@ember/object'; + +/** + * @module HttpRequestsDropdown + * HttpRequestsDropdown components are used to render a dropdown that filters the HttpRequestsBarChart. + * + * @example + * ```js + * + * ``` + * + * @param counters=null {Array} - A list of objects containing the total number of HTTP Requests for each month. `counters` should be the response from the `/internal/counters/requests` endpoint which looks like: + * COUNTERS = [ + * { + * "start_time": "2019-05-01T00:00:00Z", + * "total": 50 + * } + * ] + */ + +export default Component.extend({ + classNames: ['http-requests-dropdown'], + counters: null, + timeWindow: 'All', + options: computed('counters', function() { + let counters = this.counters || []; + let options = []; + if (counters.length) { + const years = counters + .map(counter => { + const year = new Date(counter.start_time); + return year.getUTCFullYear(); + }) + .uniq(); + years.sort().reverse(); + options = options.concat(years); + } + return options; + }), + onChange() {}, + actions: { + onSelectTimeWindow(e) { + const newValue = e.target.value; + const { timeWindow } = this; + if (newValue !== timeWindow) { + this.onChange(newValue); + } + }, + }, +}); diff --git a/ui/app/components/http-requests-table.js b/ui/app/components/http-requests-table.js new file mode 100644 index 000000000000..5b6c6e1a1d7c --- /dev/null +++ b/ui/app/components/http-requests-table.js @@ -0,0 +1,52 @@ +import Component from '@ember/component'; +import { computed } from '@ember/object'; +import { assign } from '@ember/polyfills'; + +/** + * @module HttpRequestsTable + * `HttpRequestsTable` components render a table with the total number of HTTP Requests to a Vault server per month. + * + * @example + * ```js + * + * ``` + * + * @param counters=null {Array} - A list of objects containing the total number of HTTP Requests for each month. `counters` should be the response from the `/internal/counters/requests` endpoint which looks like: + * COUNTERS = [ + * { + * "start_time": "2019-05-01T00:00:00Z", + * "total": 50 + * } + * ] + */ + +export default Component.extend({ + tagName: '', + counters: null, + countersWithChange: computed('counters', function() { + let counters = this.counters || []; + let countersWithPercentChange = []; + let previousMonthVal; + + counters.forEach(month => { + if (previousMonthVal) { + let percentChange = (((previousMonthVal - month.total) / previousMonthVal) * 100).toFixed(1); + // a negative value indicates a percentage increase, so we swap the value + percentChange = -percentChange; + let glyph; + if (percentChange > 0) { + glyph = 'arrow-up'; + } else if (percentChange < 0) { + glyph = 'arrow-down'; + } + const newCounter = assign({ percentChange, glyph }, month); + countersWithPercentChange.push(newCounter); + } else { + // we're looking at the first counter in the list, so there is no % change value. + countersWithPercentChange.push(month); + } + previousMonthVal = month.total; + }); + return countersWithPercentChange; + }), +}); diff --git a/ui/app/components/i-con.js b/ui/app/components/i-con.js deleted file mode 100644 index 669d8b8e5322..000000000000 --- a/ui/app/components/i-con.js +++ /dev/null @@ -1,83 +0,0 @@ -import { camelize } from '@ember/string'; -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import hbs from 'htmlbars-inline-precompile'; - -/** - * @module ICon - * `ICon` components are glyphs used to indicate important information. - * - * @example - * ```js - * - * ``` - * @param glyph=null {String} - The glyph type. - * - */ - -export const GLYPHS_WITH_SVG_TAG = [ - 'cancel-square-outline', - 'cancel-square-fill', - 'check-circle-fill', - 'check-plain', - 'checkmark-circled-outline', - 'close-circled-outline', - 'console', - 'control-lock', - 'docs', - 'download', - 'edition-enterprise', - 'edition-oss', - 'false', - 'file', - 'folder', - 'hidden', - 'information-reversed', - 'learn', - 'neutral-circled-outline', - 'perf-replication', - 'person', - 'role', - 'status-indicator', - 'stopwatch', - 'tour', - 'true', - 'upload', - 'video', - 'visible', -]; - -export default Component.extend({ - layout: hbs` - {{#if excludeSVG}} - {{partial partialName}} - {{else}} - - {{/if}} - `, - - tagName: 'span', - excludeIconClass: false, - classNameBindings: ['excludeIconClass::icon'], - classNames: ['has-current-color-fill'], - - attributeBindings: ['aria-label', 'aria-hidden'], - - glyph: null, - - excludeSVG: computed('glyph', function() { - let glyph = this.get('glyph'); - return glyph.startsWith('enable/') || GLYPHS_WITH_SVG_TAG.includes(glyph); - }), - - size: computed('glyph', function() { - return this.get('glyph').startsWith('enable/') ? 48 : 12; - }), - - partialName: computed('glyph', function() { - const glyph = this.get('glyph'); - return `svg/icons/${camelize(glyph)}`; - }), -}); diff --git a/ui/app/components/info-table-row.js b/ui/app/components/info-table-row.js deleted file mode 100644 index 1be591196dc0..000000000000 --- a/ui/app/components/info-table-row.js +++ /dev/null @@ -1,34 +0,0 @@ -import { typeOf } from '@ember/utils'; -import { computed } from '@ember/object'; -import { or } from '@ember/object/computed'; -import Component from '@ember/component'; - -export default Component.extend({ - 'data-test-component': 'info-table-row', - classNames: ['info-table-row'], - isVisible: or('alwaysRender', 'value'), - - /* - * @param boolean - * indicates if the component content should be always be rendered. - * when false, the value of `value` will be used to determine if the component should render - */ - alwaysRender: false, - - /* - * @param string - * the display name for the value - * - */ - label: null, - - /* - * - * the value of the data passed in - by default the content of the component will only show if there is a value - */ - value: null, - - valueIsBoolean: computed('value', function() { - return typeOf(this.get('value')) === 'boolean'; - }), -}); diff --git a/ui/app/components/info-tooltip.js b/ui/app/components/info-tooltip.js deleted file mode 100644 index f39f4bc58bd0..000000000000 --- a/ui/app/components/info-tooltip.js +++ /dev/null @@ -1,7 +0,0 @@ -import Component from '@ember/component'; - -export default Component.extend({ - 'data-test-component': 'info-tooltip', - tagName: 'span', - classNames: ['is-inline-block'], -}); diff --git a/ui/app/components/linked-block.js b/ui/app/components/linked-block.js deleted file mode 100644 index b829b2754805..000000000000 --- a/ui/app/components/linked-block.js +++ /dev/null @@ -1,47 +0,0 @@ -import { inject as service } from '@ember/service'; -import Component from '@ember/component'; -import hbs from 'htmlbars-inline-precompile'; -import { encodePath } from 'vault/utils/path-encoding-helpers'; - -let LinkedBlockComponent = Component.extend({ - router: service(), - - layout: hbs`{{yield}}`, - - classNames: 'linked-block', - - queryParams: null, - - encode: false, - - click(event) { - const $target = this.$(event.target); - const isAnchorOrButton = - $target.is('a') || - $target.is('button') || - $target.closest('button', event.currentTarget).length > 0 || - $target.closest('a', event.currentTarget).length > 0; - if (!isAnchorOrButton) { - let params = this.get('params'); - if (this.encode) { - params = params.map((param, index) => { - if (index === 0 || typeof param !== 'string') { - return param; - } - return encodePath(param); - }); - } - const queryParams = this.get('queryParams'); - if (queryParams) { - params.push({ queryParams }); - } - this.get('router').transitionTo(...params); - } - }, -}); - -LinkedBlockComponent.reopenClass({ - positionalParams: 'params', -}); - -export default LinkedBlockComponent; diff --git a/ui/app/components/list-item.js b/ui/app/components/list-item.js deleted file mode 100644 index c0a4396d1110..000000000000 --- a/ui/app/components/list-item.js +++ /dev/null @@ -1,24 +0,0 @@ -import { inject as service } from '@ember/service'; -import Component from '@ember/component'; -import { task } from 'ember-concurrency'; - -export default Component.extend({ - flashMessages: service(), - tagName: '', - linkParams: null, - componentName: null, - hasMenu: true, - - callMethod: task(function*(method, model, successMessage, failureMessage, successCallback = () => {}) { - let flash = this.get('flashMessages'); - try { - yield model[method](); - flash.success(successMessage); - successCallback(); - } catch (e) { - let errString = e.errors.join(' '); - flash.danger(failureMessage + errString); - model.rollbackAttributes(); - } - }), -}); diff --git a/ui/app/components/list-item/content.js b/ui/app/components/list-item/content.js deleted file mode 100644 index 4798652642ba..000000000000 --- a/ui/app/components/list-item/content.js +++ /dev/null @@ -1,5 +0,0 @@ -import Component from '@ember/component'; - -export default Component.extend({ - tagName: '', -}); diff --git a/ui/app/components/list-item/popup-menu.js b/ui/app/components/list-item/popup-menu.js deleted file mode 100644 index b1f4a59f1d96..000000000000 --- a/ui/app/components/list-item/popup-menu.js +++ /dev/null @@ -1,7 +0,0 @@ -import Component from '@ember/component'; - -export default Component.extend({ - tagName: '', - item: null, - hasMenu: null, -}); diff --git a/ui/app/components/list-pagination.js b/ui/app/components/list-pagination.js deleted file mode 100644 index d973566c1b15..000000000000 --- a/ui/app/components/list-pagination.js +++ /dev/null @@ -1,37 +0,0 @@ -import { gt } from '@ember/object/computed'; -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import { range } from 'ember-composable-helpers/helpers/range'; - -export default Component.extend({ - classNames: ['box', 'is-shadowless', 'list-pagination'], - page: null, - lastPage: null, - link: null, - model: null, - // number of links to show on each side of page - spread: 2, - hasNext: computed('page', 'lastPage', function() { - return this.get('page') < this.get('lastPage'); - }), - hasPrevious: computed('page', 'lastPage', function() { - return this.get('page') > 1; - }), - - segmentLinks: gt('lastPage', 10), - - pageRange: computed('page', 'lastPage', function() { - const { spread, page, lastPage } = this.getProperties('spread', 'page', 'lastPage'); - - let lower = Math.max(2, page - spread); - let upper = Math.min(lastPage - 1, lower + spread * 2); - // we're closer to lastPage than the spread - if (upper - lower < 5) { - lower = upper - 4; - } - if (lastPage <= 10) { - return range([1, lastPage, true]); - } - return range([lower, upper, true]); - }), -}); diff --git a/ui/app/components/list-view.js b/ui/app/components/list-view.js deleted file mode 100644 index 6c21340897e1..000000000000 --- a/ui/app/components/list-view.js +++ /dev/null @@ -1,22 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import { pluralize } from 'ember-inflector'; - -export default Component.extend({ - tagName: '', - items: null, - itemNoun: 'item', - // the dasherized name of a component to render - // in the EmptyState component if there are no items in items.length - emptyActions: '', - - emptyTitle: computed('itemNoun', function() { - let items = pluralize(this.get('itemNoun')); - return `No ${items} yet`; - }), - - emptyMessage: computed('itemNoun', function() { - let items = pluralize(this.get('itemNoun')); - return `Your ${items} will be listed here. Add your first ${this.get('itemNoun')} to get started.`; - }), -}); diff --git a/ui/app/components/masked-input.js b/ui/app/components/masked-input.js deleted file mode 100644 index 584d7afb0b3e..000000000000 --- a/ui/app/components/masked-input.js +++ /dev/null @@ -1,72 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import autosize from 'autosize'; - -/** - * @module MaskedInput - * `MaskedInput` components are textarea inputs where the input is hidden. They are used to enter sensitive information like passwords. - * - * @example - * - * - * @param [value] {String} - The value to display in the input. - * @param [placeholder=value] {String} - The placeholder to display before the user has entered any input. - * @param [allowCopy=null] {bool} - Whether or not the input should render with a copy button. - * @param [displayOnly=false] {bool} - Whether or not to display the value as a display only `pre` element or as an input. - * @param [onChange=Function.prototype] {Function|action} - A function to call when the value of the input changes. - * - * - */ - -export default Component.extend({ - value: null, - placeholder: 'value', - didInsertElement() { - this._super(...arguments); - autosize(this.element.querySelector('textarea')); - }, - didUpdate() { - this._super(...arguments); - autosize.update(this.element.querySelector('textarea')); - }, - willDestroyElement() { - this._super(...arguments); - autosize.destroy(this.element.querySelector('textarea')); - }, - shouldObscure: computed('isMasked', 'isFocused', 'value', function() { - if (this.get('value') === '') { - return false; - } - if (this.get('isFocused') === true) { - return false; - } - return this.get('isMasked'); - }), - displayValue: computed('shouldObscure', function() { - if (this.get('shouldObscure')) { - return '■ ■ ■ ■ ■ ■ ■ ■ ■ ■ ■ ■'; - } else { - return this.get('value'); - } - }), - isMasked: true, - isFocused: false, - displayOnly: false, - onKeyDown() {}, - onChange() {}, - actions: { - toggleMask() { - this.toggleProperty('isMasked'); - }, - updateValue(e) { - let value = e.target.value; - this.set('value', value); - this.onChange(value); - }, - }, -}); diff --git a/ui/app/components/message-error.js b/ui/app/components/message-error.js deleted file mode 100644 index 6003dce9fd30..000000000000 --- a/ui/app/components/message-error.js +++ /dev/null @@ -1,36 +0,0 @@ -import { computed } from '@ember/object'; -import Component from '@ember/component'; - -export default Component.extend({ - model: null, - errors: computed(function() { - return []; - }), - errorMessage: null, - - displayErrors: computed( - 'errorMessage', - 'model.{isError,adapterError.message,adapterError.errors.@each}', - 'errors', - 'errors.@each', - function() { - const errorMessage = this.get('errorMessage'); - const errors = this.get('errors'); - const modelIsError = this.get('model.isError'); - if (errorMessage) { - return [errorMessage]; - } - - if (errors && errors.length > 0) { - return errors; - } - - if (modelIsError) { - if (this.get('model.adapterError.errors.length') > 0) { - return this.get('model.adapterError.errors'); - } - return [this.get('model.adapterError.message')]; - } - } - ), -}); diff --git a/ui/app/components/mount-backend-form.js b/ui/app/components/mount-backend-form.js index 83f719447866..683e5f29d24f 100644 --- a/ui/app/components/mount-backend-form.js +++ b/ui/app/components/mount-backend-form.js @@ -3,7 +3,7 @@ import { computed } from '@ember/object'; import Component from '@ember/component'; import { task } from 'ember-concurrency'; import { methods } from 'vault/helpers/mountable-auth-methods'; -import { engines } from 'vault/helpers/mountable-secret-engines'; +import { engines, KMIP } from 'vault/helpers/mountable-secret-engines'; const METHODS = methods(); const ENGINES = engines(); @@ -12,6 +12,7 @@ export default Component.extend({ store: service(), wizard: service(), flashMessages: service(), + version: service(), /* * @param Function @@ -51,7 +52,15 @@ export default Component.extend({ }, mountTypes: computed('mountType', function() { - return this.mountType === 'secret' ? ENGINES : METHODS; + return this.mountType === 'secret' ? this.engines : METHODS; + }), + + engines: computed('version.features[]', function() { + if (this.version.hasFeature('KMIP')) { + return ENGINES.concat([KMIP]); + } else { + return ENGINES; + } }), willDestroy() { diff --git a/ui/app/components/namespace-reminder.js b/ui/app/components/namespace-reminder.js deleted file mode 100644 index 464f4b6c6467..000000000000 --- a/ui/app/components/namespace-reminder.js +++ /dev/null @@ -1,19 +0,0 @@ -import { inject as service } from '@ember/service'; -import { not } from '@ember/object/computed'; -import Component from '@ember/component'; -import { computed } from '@ember/object'; - -export default Component.extend({ - namespace: service(), - showMessage: not('namespace.inRootNamespace'), - //public API - noun: null, - mode: 'edit', - modeVerb: computed(function() { - let mode = this.get('mode'); - if (!mode) { - return ''; - } - return mode.endsWith('e') ? `${mode}d` : `${mode}ed`; - }), -}); diff --git a/ui/app/components/navigate-input.js b/ui/app/components/navigate-input.js deleted file mode 100644 index ce408b20e6a2..000000000000 --- a/ui/app/components/navigate-input.js +++ /dev/null @@ -1,199 +0,0 @@ -import { schedule, debounce } from '@ember/runloop'; -import { inject as service } from '@ember/service'; -import Component from '@ember/component'; -import utils from 'vault/lib/key-utils'; -import keys from 'vault/lib/keycodes'; -import FocusOnInsertMixin from 'vault/mixins/focus-on-insert'; -import { encodePath } from 'vault/utils/path-encoding-helpers'; - -const routeFor = function(type, mode) { - const MODES = { - secrets: 'vault.cluster.secrets.backend', - 'secrets-cert': 'vault.cluster.secrets.backend', - 'policy-show': 'vault.cluster.policy', - 'policy-list': 'vault.cluster.policies', - leases: 'vault.cluster.access.leases', - }; - let useSuffix = true; - const typeVal = mode === 'secrets' || mode === 'leases' ? type : type.replace('-root', ''); - const modeKey = mode + '-' + typeVal; - const modeVal = MODES[modeKey] || MODES[mode]; - if (modeKey === 'policy-list') { - useSuffix = false; - } - - return useSuffix ? modeVal + '.' + typeVal : modeVal; -}; - -export default Component.extend(FocusOnInsertMixin, { - router: service(), - - classNames: ['navigate-filter'], - - // these get passed in from the outside - // actions that get passed in - filterFocusDidChange: null, - filterDidChange: null, - mode: 'secrets', - shouldNavigateTree: false, - extraNavParams: null, - - baseKey: null, - filter: null, - filterMatchesKey: null, - firstPartialMatch: null, - - transitionToRoute(...args) { - let params = args.map((param, index) => { - if (index === 0 || typeof param !== 'string') { - return param; - } - return encodePath(param); - }); - - this.get('router').transitionTo(...params); - }, - - shouldFocus: false, - - didReceiveAttrs() { - this._super(...arguments); - if (!this.get('filter')) return; - schedule('afterRender', this, 'forceFocus'); - }, - - keyForNav(key) { - if (this.get('mode') !== 'secrets-cert') { - return key; - } - return `cert/${key}`; - }, - onEnter: function(val) { - let { baseKey, mode } = this; - let extraParams = this.get('extraNavParams'); - if (mode.startsWith('secrets') && (!val || val === baseKey)) { - return; - } - if (this.get('filterMatchesKey') && !utils.keyIsFolder(val)) { - let params = [routeFor('show', mode), extraParams, this.keyForNav(val)].compact(); - this.transitionToRoute(...params); - } else { - if (mode === 'policies') { - return; - } - let route = routeFor('create', mode); - if (baseKey) { - this.transitionToRoute(route, this.keyForNav(baseKey), { - queryParams: { - initialKey: val, - }, - }); - } else { - this.transitionToRoute(route + '-root', { - queryParams: { - initialKey: this.keyForNav(val), - }, - }); - } - } - }, - - // pop to the nearest parentKey or to the root - onEscape: function(val) { - var key = utils.parentKeyForKey(val) || ''; - this.get('filterDidChange')(key); - this.filterUpdated(key); - }, - - onTab: function(event) { - var firstPartialMatch = this.get('firstPartialMatch.id'); - if (!firstPartialMatch) { - return; - } - event.preventDefault(); - this.get('filterDidChange')(firstPartialMatch); - this.filterUpdated(firstPartialMatch); - }, - - // as you type, navigates through the k/v tree - filterUpdated: function(val) { - var mode = this.get('mode'); - if (mode === 'policies' || !this.get('shouldNavigateTree')) { - this.filterUpdatedNoNav(val, mode); - return; - } - // select the key to nav to, assumed to be a folder - var key = val ? val.trim() : ''; - var isFolder = utils.keyIsFolder(key); - - if (!isFolder) { - // nav to the closest parentKey (or the root) - key = utils.parentKeyForKey(val) || ''; - } - - const pageFilter = val.replace(key, ''); - this.navigate(this.keyForNav(key), mode, pageFilter); - }, - - navigate(key, mode, pageFilter) { - const route = routeFor(key ? 'list' : 'list-root', mode); - let args = [route]; - if (key) { - args.push(key); - } - if (pageFilter && !utils.keyIsFolder(pageFilter)) { - args.push({ - queryParams: { - page: 1, - pageFilter, - }, - }); - } else { - args.push({ - queryParams: { - page: 1, - pageFilter: null, - }, - }); - } - this.transitionToRoute(...args); - }, - - filterUpdatedNoNav: function(val, mode) { - var key = val ? val.trim() : null; - this.transitionToRoute(routeFor('list-root', mode), { - queryParams: { - pageFilter: key, - page: 1, - }, - }); - }, - - actions: { - handleInput: function(filter) { - this.get('filterDidChange')(filter); - debounce(this, 'filterUpdated', filter, 200); - }, - - setFilterFocused: function(isFocused) { - this.get('filterFocusDidChange')(isFocused); - }, - - handleKeyPress: function(event) { - if (event.keyCode === keys.TAB) { - this.onTab(event); - } - }, - - handleKeyUp: function(event) { - var keyCode = event.keyCode; - let val = event.target.value; - if (keyCode === keys.ENTER) { - this.onEnter(val); - } - if (keyCode === keys.ESC) { - this.onEscape(val); - } - }, - }, -}); diff --git a/ui/app/components/page-header-level-left.js b/ui/app/components/page-header-level-left.js deleted file mode 100644 index 4798652642ba..000000000000 --- a/ui/app/components/page-header-level-left.js +++ /dev/null @@ -1,5 +0,0 @@ -import Component from '@ember/component'; - -export default Component.extend({ - tagName: '', -}); diff --git a/ui/app/components/page-header-level-right.js b/ui/app/components/page-header-level-right.js deleted file mode 100644 index 4798652642ba..000000000000 --- a/ui/app/components/page-header-level-right.js +++ /dev/null @@ -1,5 +0,0 @@ -import Component from '@ember/component'; - -export default Component.extend({ - tagName: '', -}); diff --git a/ui/app/components/page-header.js b/ui/app/components/page-header.js deleted file mode 100644 index 9a92d16670f6..000000000000 --- a/ui/app/components/page-header.js +++ /dev/null @@ -1,6 +0,0 @@ -import Component from '@ember/component'; - -export default Component.extend({ - tagName: '', - hasLevel: true, -}); diff --git a/ui/app/components/popup-menu.js b/ui/app/components/popup-menu.js deleted file mode 100644 index 3f263270c4e2..000000000000 --- a/ui/app/components/popup-menu.js +++ /dev/null @@ -1,5 +0,0 @@ -import Component from '@ember/component'; - -export default Component.extend({ - tagName: 'span', -}); diff --git a/ui/app/components/replication-actions.js b/ui/app/components/replication-actions.js deleted file mode 100644 index c2ad4ae8334f..000000000000 --- a/ui/app/components/replication-actions.js +++ /dev/null @@ -1,59 +0,0 @@ -import { alias } from '@ember/object/computed'; -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import ReplicationActions from 'vault/mixins/replication-actions'; - -const DEFAULTS = { - token: null, - primary_api_addr: null, - primary_cluster_addr: null, - errors: [], - id: null, - replicationMode: null, - force: false, -}; - -export default Component.extend(ReplicationActions, DEFAULTS, { - replicationMode: null, - selectedAction: null, - tagName: 'form', - - didReceiveAttrs() { - this._super(...arguments); - }, - - model: null, - cluster: alias('model'), - loading: false, - onSubmit: null, - - reset() { - if (!this || this.isDestroyed || this.isDestroying) { - return; - } - this.setProperties(DEFAULTS); - }, - - replicationDisplayMode: computed('replicationMode', function() { - const replicationMode = this.get('replicationMode'); - if (replicationMode === 'dr') { - return 'DR'; - } - if (replicationMode === 'performance') { - return 'Performance'; - } - }), - - actions: { - onSubmit() { - return this.submitHandler(...arguments); - }, - clear() { - this.reset(); - this.setProperties({ - token: null, - id: null, - }); - }, - }, -}); diff --git a/ui/app/components/replication-mode-summary.js b/ui/app/components/replication-mode-summary.js deleted file mode 100644 index cd7716b8ee05..000000000000 --- a/ui/app/components/replication-mode-summary.js +++ /dev/null @@ -1,58 +0,0 @@ -import { inject as service } from '@ember/service'; -import { equal } from '@ember/object/computed'; -import { getProperties, get, computed } from '@ember/object'; -import Component from '@ember/component'; - -const replicationAttr = function(attr) { - return computed('mode', `cluster.{dr,performance}.${attr}`, function() { - const { mode, cluster } = getProperties(this, 'mode', 'cluster'); - return get(cluster, `${mode}.${attr}`); - }); -}; -export default Component.extend({ - version: service(), - router: service(), - namespace: service(), - classNameBindings: ['isMenu::box', 'isMenu::level'], - attributeBindings: ['href', 'target'], - display: 'banner', - isMenu: equal('display', 'menu'), - href: computed('display', 'mode', 'replicationEnabled', 'version.hasPerfReplication', function() { - const display = this.get('display'); - const mode = this.get('mode'); - if (mode === 'performance' && display === 'menu' && this.get('version.hasPerfReplication') === false) { - return 'https://www.hashicorp.com/products/vault'; - } - if (this.get('replicationEnabled') || display === 'menu') { - return this.get('router').urlFor( - 'vault.cluster.replication.mode.index', - this.get('cluster.name'), - mode - ); - } - return null; - }), - target: computed('isPerformance', 'version.hasPerfReplication', function() { - if (this.get('isPerformance') && this.get('version.hasPerfReplication') === false) { - return '_blank'; - } - return null; - }), - internalLink: false, - isPerformance: equal('mode', 'performance'), - replicationEnabled: replicationAttr('replicationEnabled'), - replicationUnsupported: equal('cluster.mode', 'unsupported'), - replicationDisabled: replicationAttr('replicationDisabled'), - syncProgressPercent: replicationAttr('syncProgressPercent'), - syncProgress: replicationAttr('syncProgress'), - secondaryId: replicationAttr('secondaryId'), - modeForUrl: replicationAttr('modeForUrl'), - clusterIdDisplay: replicationAttr('clusterIdDisplay'), - mode: null, - cluster: null, - partialName: computed('display', function() { - return this.get('display') === 'menu' - ? 'partials/replication/replication-mode-summary-menu' - : 'partials/replication/replication-mode-summary'; - }), -}); diff --git a/ui/app/components/role-edit.js b/ui/app/components/role-edit.js index 9291276444f5..fb9bc80b6b3a 100644 --- a/ui/app/components/role-edit.js +++ b/ui/app/components/role-edit.js @@ -1,7 +1,7 @@ import { inject as service } from '@ember/service'; import { or } from '@ember/object/computed'; import { isBlank } from '@ember/utils'; -import $ from 'jquery'; +import { task, waitForEvent } from 'ember-concurrency'; import Component from '@ember/component'; import { set, get } from '@ember/object'; import FocusOnInsertMixin from 'vault/mixins/focus-on-insert'; @@ -42,20 +42,22 @@ export default Component.extend(FocusOnInsertMixin, { } }, - didInsertElement() { - this._super(...arguments); - $(document).on('keyup.keyEdit', this.onEscape.bind(this)); - }, - willDestroyElement() { this._super(...arguments); - const model = this.get('model'); - if (get(model, 'isError')) { - model.rollbackAttributes(); + if (this.model && this.model.isError) { + this.model.rollbackAttributes(); } - $(document).off('keyup.keyEdit'); }, + waitForKeyUp: task(function*() { + while (true) { + let event = yield waitForEvent(document.body, 'keyup'); + this.onEscape(event); + } + }) + .on('didInsertElement') + .cancelOn('willDestroyElement'), + transitionToRoute() { this.get('router').transitionTo(...arguments); }, @@ -84,18 +86,6 @@ export default Component.extend(FocusOnInsertMixin, { }, actions: { - handleKeyDown(_, e) { - e.stopPropagation(); - if (!(e.keyCode === keys.ENTER && e.metaKey)) { - return; - } - let $form = this.$('form'); - if ($form.length) { - $form.submit(); - } - $form = null; - }, - createOrUpdate(type, event) { event.preventDefault(); @@ -112,10 +102,6 @@ export default Component.extend(FocusOnInsertMixin, { }); }, - handleChange() { - this.hasDataChanges(); - }, - setValue(key, event) { set(get(this, 'model'), key, event.target.checked); }, diff --git a/ui/app/components/search-select.js b/ui/app/components/search-select.js index e486113f6646..9cb453a6faa9 100644 --- a/ui/app/components/search-select.js +++ b/ui/app/components/search-select.js @@ -2,7 +2,23 @@ import Component from '@ember/component'; import { inject as service } from '@ember/service'; import { task } from 'ember-concurrency'; import { computed } from '@ember/object'; +import { singularize } from 'ember-inflector'; +/** + * @module SearchSelect + * The `SearchSelect` is an implementation of the [ember-power-select-with-create](https://github.com/poteto/ember-cli-flash) used for form elements where options come dynamically from the API. + * @example + * + * + * @param id {String} - The name of the form field + * @param models {String} - An array of model types to fetch from the API. + * @param onChange {Func} - The onchange action for this form field. + * @param inputValue {String} - A comma-separated string or an array of strings. + * @param [helpText] {String} - Text to be displayed in the info tooltip for this form field + * @param label {String} - Label for this form field + * @param fallbackComponent {String} - name of component to be rendered if the API call 403s + * + */ export default Component.extend({ 'data-test-component': 'search-select', classNames: ['field', 'search-select'], @@ -36,6 +52,26 @@ export default Component.extend({ this._super(...arguments); this.set('selectedOptions', this.inputValue || []); }, + formatOptions: function(options) { + options = options.toArray().map(option => { + option.searchText = `${option.name} ${option.id}`; + return option; + }); + let formattedOptions = this.selectedOptions.map(option => { + let matchingOption = options.findBy('id', option); + options.removeObject(matchingOption); + return { + id: option, + name: matchingOption ? matchingOption.name : option, + searchText: matchingOption ? matchingOption.searchText : option, + }; + }); + this.set('selectedOptions', formattedOptions); + if (this.options) { + options = this.options.concat(options); + } + this.set('options', options); + }, fetchOptions: task(function*() { for (let modelType of this.models) { if (modelType.includes('identity')) { @@ -43,20 +79,7 @@ export default Component.extend({ } try { let options = yield this.store.query(modelType, {}); - options = options.toArray().map(option => { - option.searchText = `${option.name} ${option.id}`; - return option; - }); - let formattedOptions = this.selectedOptions.map(option => { - let matchingOption = options.findBy('id', option); - options.removeObject(matchingOption); - return { id: option, name: matchingOption.name, searchText: matchingOption.searchText }; - }); - this.set('selectedOptions', formattedOptions); - if (this.options) { - options = this.options.concat(options); - } - this.set('options', options); + this.formatOptions(options); } catch (err) { if (err.httpStatus === 404) { //leave options alone, it's okay @@ -66,6 +89,12 @@ export default Component.extend({ this.set('shouldUseFallback', true); return; } + //special case for storybook + if (this.staticOptions) { + let options = this.staticOptions; + this.formatOptions(options); + return; + } throw err; } } @@ -81,6 +110,11 @@ export default Component.extend({ onChange(val) { this.onChange(val); }, + createOption(optionId) { + let newOption = { name: optionId, id: optionId }; + this.selectedOptions.pushObject(newOption); + this.handleChange(); + }, selectOption(option) { this.selectedOptions.pushObject(option); this.options.removeObject(option); @@ -91,5 +125,12 @@ export default Component.extend({ this.options.pushObject(selected); this.handleChange(); }, + constructSuggestion(id) { + return `Add new ${singularize(this.label)}: ${id}`; + }, + hideCreateOptionOnSameID(id) { + let existingOption = this.options.findBy('id', id) || this.options.findBy('name', id); + return !existingOption; + }, }, }); diff --git a/ui/app/components/secret-edit.js b/ui/app/components/secret-edit.js index c2634900a32c..c9dd40041b74 100644 --- a/ui/app/components/secret-edit.js +++ b/ui/app/components/secret-edit.js @@ -95,7 +95,7 @@ export default Component.extend(FocusOnInsertMixin, WithNavToNearestAncestor, { updatePath: maybeQueryRecord( 'capabilities', context => { - if (context.mode === 'create') { + if (!context.model || context.mode === 'create') { return; } let backend = context.isV2 ? context.get('model.engine.id') : context.model.backend; @@ -116,7 +116,7 @@ export default Component.extend(FocusOnInsertMixin, WithNavToNearestAncestor, { v2UpdatePath: maybeQueryRecord( 'capabilities', context => { - if (context.mode === 'create' || context.isV2 === false) { + if (!context.model || context.mode === 'create' || context.isV2 === false) { return; } let backend = context.get('model.engine.id'); @@ -137,7 +137,9 @@ export default Component.extend(FocusOnInsertMixin, WithNavToNearestAncestor, { buttonDisabled: or('requestInFlight', 'model.isFolder', 'model.flagsIsInvalid', 'hasLintError', 'error'), modelForData: computed('isV2', 'model', function() { - return this.isV2 ? this.model.belongsTo('selectedVersion').value() : this.model; + let { model } = this; + if (!model) return null; + return this.isV2 ? model.belongsTo('selectedVersion').value() : model; }), basicModeDisabled: computed('secretDataIsAdvanced', 'showAdvancedMode', function() { @@ -160,6 +162,7 @@ export default Component.extend(FocusOnInsertMixin, WithNavToNearestAncestor, { 'model.{failedServerRead,selectedVersion.failedServerRead}', 'isV2', function() { + if (!this.model) return; // if the version couldn't be read from the server if (this.isV2 && this.model.selectedVersion.failedServerRead) { return true; diff --git a/ui/app/components/secret-form-header.js b/ui/app/components/secret-form-header.js deleted file mode 100644 index d8b27ca6ee34..000000000000 --- a/ui/app/components/secret-form-header.js +++ /dev/null @@ -1,50 +0,0 @@ -import { alias } from '@ember/object/computed'; -import Component from '@ember/component'; -import hbs from 'htmlbars-inline-precompile'; - -export default Component.extend({ - key: null, - mode: null, - path: null, - actionClass: null, - - title: alias('key.keyWithoutParent'), - - layout: hbs` -
- {{#secret-link - mode="list" - secret=key.parentKey - class="back-button" - }} - {{i-con glyph="chevron-left" size=11}} - Secrets - {{/secret-link}} - -
- {{yield}} -
- -
- {{#if (eq mode "create") }} - Create a secret at - - {{#if showPrefix}} - {{! need this to prevent a shift in the layout before we transition when saving }} - {{#if key.isCreating}} - {{key.initialParentKey}} - {{else}} - {{key.parentKey}} - {{/if}} - {{/if}} - - {{/if}} - - {{#if (eq mode "edit") }} - Edit - {{/if}} - - {{title}} -
-
`, -}); diff --git a/ui/app/components/secret-list-header.js b/ui/app/components/secret-list-header.js index ef8ee111321d..12f3d21496f1 100644 --- a/ui/app/components/secret-list-header.js +++ b/ui/app/components/secret-list-header.js @@ -9,4 +9,5 @@ export default Component.extend({ baseKey: null, backendCrumb: null, model: null, + options: null, }); diff --git a/ui/app/components/section-tabs.js b/ui/app/components/section-tabs.js index 8cf2aa5c9cb8..56969c79cbef 100644 --- a/ui/app/components/section-tabs.js +++ b/ui/app/components/section-tabs.js @@ -2,13 +2,12 @@ import Component from '@ember/component'; const SectionTabs = Component.extend({ tagName: '', - model: null, tabType: 'authSettings', }); SectionTabs.reopenClass({ - positionalParams: ['model', 'tabType'], + positionalParams: ['model', 'tabType', 'paths'], }); export default SectionTabs; diff --git a/ui/app/components/status-menu.js b/ui/app/components/status-menu.js index 642ff3d8db4a..406749a0cf92 100644 --- a/ui/app/components/status-menu.js +++ b/ui/app/components/status-menu.js @@ -18,7 +18,7 @@ export default Component.extend({ glyphName: computed('type', function() { const glyphs = { cluster: 'status-indicator', - user: 'person', + user: 'user-square-outline', }; return glyphs[this.type]; }), diff --git a/ui/app/components/string-list.js b/ui/app/components/string-list.js deleted file mode 100644 index 3936e6671100..000000000000 --- a/ui/app/components/string-list.js +++ /dev/null @@ -1,133 +0,0 @@ -import ArrayProxy from '@ember/array/proxy'; -import Component from '@ember/component'; -import { set, computed } from '@ember/object'; - -export default Component.extend({ - 'data-test-component': 'string-list', - classNames: ['field', 'string-list', 'form-section'], - - /* - * @public - * @param String - * - * Optional - Text displayed in the header above all of the inputs - * - */ - label: null, - - /* - * @public - * @param Function - * - * Function called when any of the inputs change - * accepts a single param `value` that is the - * result of calling `toVal()`. - * - */ - onChange: () => {}, - - /* - * @public - * @param String | Array - * A comma-separated string or an array of strings. - * Defaults to an empty array. - * - */ - inputValue: computed(function() { - return []; - }), - - /* - * - * @public - * @param String - ['array'|'string] - * - * Optional type for `inputValue` - defaults to `'array'` - * Needs to match type of `inputValue` because it is set by the component on init. - * - */ - type: 'array', - - /* - * - * @private - * @param Ember.ArrayProxy - * - * mutable array that contains objects in the form of - * { - * value: 'somestring', - * } - * - * used to track the state of values bound to the various inputs - * - */ - /* eslint-disable ember/no-side-effects */ - inputList: computed(function() { - return ArrayProxy.create({ - content: [], - // trim the `value` when accessing objects - objectAtContent: function(idx) { - const obj = this.get('content').objectAt(idx); - if (obj && obj.value) { - set(obj, 'value', obj.value.trim()); - } - return obj; - }, - }); - }), - - init() { - this._super(...arguments); - this.setType(); - this.toList(); - this.send('addInput'); - }, - - setType() { - const list = this.inputList; - if (!list) { - return; - } - this.set('type', typeof list); - }, - - toVal() { - const inputs = this.inputList.filter(x => x.value).mapBy('value'); - if (this.get('format') === 'string') { - return inputs.join(','); - } - return inputs; - }, - - toList() { - let input = this.inputValue || []; - const inputList = this.inputList; - if (typeof input === 'string') { - input = input.split(','); - } - inputList.addObjects(input.map(value => ({ value }))); - }, - - actions: { - inputChanged(idx, val) { - const inputObj = this.inputList.objectAt(idx); - const onChange = this.onChange; - set(inputObj, 'value', val); - onChange(this.toVal()); - }, - - addInput() { - const inputList = this.inputList; - if (inputList.get('lastObject.value') !== '') { - inputList.pushObject({ value: '' }); - } - }, - - removeInput(idx) { - const onChange = this.onChange; - const inputs = this.inputList; - inputs.removeObject(inputs.objectAt(idx)); - onChange(this.toVal()); - }, - }, -}); diff --git a/ui/app/components/toggle-button.js b/ui/app/components/toggle-button.js deleted file mode 100644 index 9272ecc490d3..000000000000 --- a/ui/app/components/toggle-button.js +++ /dev/null @@ -1,53 +0,0 @@ -import Component from '@ember/component'; -import { set, get, defineProperty, computed } from '@ember/object'; - -/** - * @module ToggleButton - * `ToggleButton` components are used to expand and collapse content with a toggle. - * - * @example - * ```js - * - * {{#if showOptions}} - *
- *

- * I will be toggled! - *

- *
- * {{/if}} - * ``` - * - * @param toggleAttr=null {String} - The attribute upon which to toggle. - * @param openLabel=Hide options {String} - The message to display when the toggle is open. - * @param closedLabel=More options {String} - The message to display when the toggle is closed. - */ -export default Component.extend({ - tagName: 'button', - type: 'button', - toggleTarget: null, - toggleAttr: null, - classNameBindings: ['buttonClass'], - attributeBindings: ['type'], - buttonClass: 'has-text-info', - classNames: ['button', 'is-transparent'], - openLabel: 'Hide options', - closedLabel: 'More options', - init() { - this._super(...arguments); - const toggleAttr = this.get('toggleAttr'); - defineProperty( - this, - 'isOpen', - computed(`toggleTarget.${toggleAttr}`, () => { - const props = this.getProperties('toggleTarget', 'toggleAttr'); - return get(props.toggleTarget, props.toggleAttr); - }) - ); - }, - click() { - const target = this.get('toggleTarget'); - const attr = this.get('toggleAttr'); - const current = get(target, attr); - set(target, attr, !current); - }, -}); diff --git a/ui/app/components/tool-tip.js b/ui/app/components/tool-tip.js deleted file mode 100644 index 6ea25baead4e..000000000000 --- a/ui/app/components/tool-tip.js +++ /dev/null @@ -1,6 +0,0 @@ -import HoverDropdown from 'ember-basic-dropdown-hover/components/basic-dropdown-hover'; - -export default HoverDropdown.extend({ - delay: 0, - horizontalPosition: 'auto-right', -}); diff --git a/ui/app/components/toolbar-secret-link.js b/ui/app/components/toolbar-secret-link.js new file mode 100644 index 000000000000..00395ccabd4a --- /dev/null +++ b/ui/app/components/toolbar-secret-link.js @@ -0,0 +1,33 @@ +/** + * @module ToolbarSecretLink + * `ToolbarSecretLink` styles SecretLink for the Toolbar. + * It should only be used inside of `Toolbar`. + * + * @example + * ```js + * + * + * + * Create policy + * + * + * + * ``` + * + * @param type="" {String} - Use "add" to change icon + */ + +import OuterHTML from './outer-html'; +import { computed } from '@ember/object'; + +export default OuterHTML.extend({ + glyph: computed('type', function() { + if (this.type == 'add') { + return 'plus-plain'; + } else { + return 'chevron-right'; + } + }), + tagName: '', + supportsDataTestProperties: true, +}); diff --git a/ui/app/components/transit-edit.js b/ui/app/components/transit-edit.js index 9c465c472eee..d5258086e182 100644 --- a/ui/app/components/transit-edit.js +++ b/ui/app/components/transit-edit.js @@ -1,9 +1,10 @@ import { inject as service } from '@ember/service'; import { or } from '@ember/object/computed'; import { isBlank } from '@ember/utils'; -import $ from 'jquery'; import Component from '@ember/component'; +import { task, waitForEvent } from 'ember-concurrency'; import { set, get } from '@ember/object'; + import FocusOnInsertMixin from 'vault/mixins/focus-on-insert'; import keys from 'vault/lib/keycodes'; @@ -19,24 +20,22 @@ export default Component.extend(FocusOnInsertMixin, { key: null, requestInFlight: or('key.isLoading', 'key.isReloading', 'key.isSaving'), - init() { - this._super(...arguments); - }, - - didInsertElement() { - this._super(...arguments); - $(document).on('keyup.keyEdit', this.onEscape.bind(this)); - }, - willDestroyElement() { this._super(...arguments); - const key = this.get('key'); - if (get(key, 'isError')) { - key.rollbackAttributes(); + if (this.key && this.key.isError) { + this.key.rollbackAttributes(); } - $(document).off('keyup.keyEdit'); }, + waitForKeyUp: task(function*() { + while (true) { + let event = yield waitForEvent(document.body, 'keyup'); + this.onEscape(event); + } + }) + .on('didInsertElement') + .cancelOn('willDestroyElement'), + transitionToRoute() { this.get('router').transitionTo(...arguments); }, @@ -69,18 +68,6 @@ export default Component.extend(FocusOnInsertMixin, { }, actions: { - handleKeyDown(_, e) { - e.stopPropagation(); - if (!(e.keyCode === keys.ENTER && e.metaKey)) { - return; - } - let $form = this.$('form'); - if ($form.length) { - $form.submit(); - } - $form = null; - }, - createOrUpdateKey(type, event) { event.preventDefault(); @@ -101,10 +88,6 @@ export default Component.extend(FocusOnInsertMixin, { ); }, - handleChange() { - this.hasDataChanges(); - }, - setValueOnKey(key, event) { set(get(this, 'key'), key, event.target.checked); }, diff --git a/ui/app/components/ttl-picker.js b/ui/app/components/ttl-picker.js deleted file mode 100644 index 403b324f6bad..000000000000 --- a/ui/app/components/ttl-picker.js +++ /dev/null @@ -1,99 +0,0 @@ -import { typeOf } from '@ember/utils'; -import EmberError from '@ember/error'; -import Component from '@ember/component'; -import { set, computed } from '@ember/object'; -import Duration from 'Duration.js'; - -const ERROR_MESSAGE = 'TTLs must be specified in whole number increments, please enter a whole number.'; - -export default Component.extend({ - 'data-test-component': 'ttl-picker', - classNames: 'field', - setDefaultValue: true, - onChange: () => {}, - labelText: 'TTL', - labelClass: '', - time: 30, - unit: 'm', - initialValue: null, - errorMessage: null, - unitOptions: computed(function() { - return [ - { label: 'seconds', value: 's' }, - { label: 'minutes', value: 'm' }, - { label: 'hours', value: 'h' }, - { label: 'days', value: 'd' }, - ]; - }), - - ouputSeconds: false, - - convertToSeconds(time, unit) { - const toSeconds = { - s: 1, - m: 60, - h: 3600, - }; - - return time * toSeconds[unit]; - }, - - TTL: computed('time', 'unit', function() { - let { time, unit, outputSeconds } = this.getProperties('time', 'unit', 'outputSeconds'); - //convert to hours - if (unit === 'd') { - time = time * 24; - unit = 'h'; - } - const timeString = time + unit; - return outputSeconds ? this.convertToSeconds(time, unit) : timeString; - }), - - didInsertElement() { - this._super(...arguments); - if (this.setDefaultValue === false) { - return; - } - this.onChange(this.TTL); - }, - - init() { - this._super(...arguments); - if (!this.onChange) { - throw new EmberError('`onChange` handler is a required attr in `' + this.toString() + '`.'); - } - if (this.initialValue != undefined) { - this.parseAndSetTime(); - } - }, - - parseAndSetTime() { - let value = this.initialValue; - let seconds = typeOf(value) === 'number' ? value : 30; - try { - seconds = Duration.parse(value).seconds(); - } catch (e) { - // if parsing fails leave as default 30 - } - - this.set('time', seconds); - this.set('unit', 's'); - }, - - actions: { - changedValue(key, event) { - let { type, value, checked } = event.target; - let val = type === 'checkbox' ? checked : value; - if (val && key === 'time') { - val = parseInt(val, 10); - if (Number.isNaN(val)) { - this.set('errorMessage', ERROR_MESSAGE); - return; - } - } - this.set('errorMessage', null); - set(this, key, val); - this.onChange(this.TTL); - }, - }, -}); diff --git a/ui/app/components/upgrade-link.js b/ui/app/components/upgrade-link.js deleted file mode 100644 index b9ac938e62ae..000000000000 --- a/ui/app/components/upgrade-link.js +++ /dev/null @@ -1,42 +0,0 @@ -import { later } from '@ember/runloop'; -import Component from '@ember/component'; -import { computed } from '@ember/object'; - -export default Component.extend({ - modalContainer: computed('isActive', function() { - return document.getElementById('modal-wormhole'); - }), - isAnimated: false, - isActive: false, - tagName: 'span', - trackingSource: computed('pageName', function() { - let trackingSource = 'vaultui'; - let pageName = this.get('pageName'); - if (pageName) { - trackingSource = trackingSource + '_' + encodeURIComponent(pageName); - } - return trackingSource; - }), - actions: { - openOverlay() { - this.set('isActive', true); - later( - this, - function() { - this.set('isAnimated', true); - }, - 10 - ); - }, - closeOverlay() { - this.set('isAnimated', false); - later( - this, - function() { - this.set('isActive', false); - }, - 300 - ); - }, - }, -}); diff --git a/ui/app/components/upgrade-page.js b/ui/app/components/upgrade-page.js deleted file mode 100644 index c9c974507f50..000000000000 --- a/ui/app/components/upgrade-page.js +++ /dev/null @@ -1,11 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; - -export default Component.extend({ - title: 'Vault Enterprise', - featureName: computed('title', function() { - let title = this.get('title'); - return title === 'Vault Enterprise' ? 'This' : title; - }), - minimumEdition: 'Vault Enterprise', -}); diff --git a/ui/app/controllers/vault/cluster/access/identity/aliases/index.js b/ui/app/controllers/vault/cluster/access/identity/aliases/index.js index bac858c14dab..7c7ef6dddab4 100644 --- a/ui/app/controllers/vault/cluster/access/identity/aliases/index.js +++ b/ui/app/controllers/vault/cluster/access/identity/aliases/index.js @@ -1,5 +1,5 @@ import Controller from '@ember/controller'; -import ListController from 'vault/mixins/list-controller'; +import ListController from 'core/mixins/list-controller'; export default Controller.extend(ListController, { actions: { diff --git a/ui/app/controllers/vault/cluster/access/identity/index.js b/ui/app/controllers/vault/cluster/access/identity/index.js index 088cdcc7de22..0ab1d2827653 100644 --- a/ui/app/controllers/vault/cluster/access/identity/index.js +++ b/ui/app/controllers/vault/cluster/access/identity/index.js @@ -1,6 +1,6 @@ import { inject as service } from '@ember/service'; import Controller from '@ember/controller'; -import ListController from 'vault/mixins/list-controller'; +import ListController from 'core/mixins/list-controller'; export default Controller.extend(ListController, { flashMessages: service(), diff --git a/ui/app/controllers/vault/cluster/access/leases/list.js b/ui/app/controllers/vault/cluster/access/leases/list.js index 985502d25a88..7c9ae9b06f3e 100644 --- a/ui/app/controllers/vault/cluster/access/leases/list.js +++ b/ui/app/controllers/vault/cluster/access/leases/list.js @@ -2,7 +2,7 @@ import { inject as service } from '@ember/service'; import { computed } from '@ember/object'; import Controller, { inject as controller } from '@ember/controller'; import utils from 'vault/lib/key-utils'; -import ListController from 'vault/mixins/list-controller'; +import ListController from 'core/mixins/list-controller'; export default Controller.extend(ListController, { flashMessages: service(), diff --git a/ui/app/controllers/vault/cluster/access/method/item/list.js b/ui/app/controllers/vault/cluster/access/method/item/list.js new file mode 100644 index 000000000000..a89b15a6500d --- /dev/null +++ b/ui/app/controllers/vault/cluster/access/method/item/list.js @@ -0,0 +1,4 @@ +import Controller from '@ember/controller'; +import ListController from 'vault/mixins/list-controller'; + +export default Controller.extend(ListController, {}); diff --git a/ui/app/controllers/vault/cluster/replication.js b/ui/app/controllers/vault/cluster/replication.js deleted file mode 100644 index be9c1dbdac16..000000000000 --- a/ui/app/controllers/vault/cluster/replication.js +++ /dev/null @@ -1,129 +0,0 @@ -import { isPresent } from '@ember/utils'; -import { alias } from '@ember/object/computed'; -import { inject as service } from '@ember/service'; -import Controller from '@ember/controller'; - -const DEFAULTS = { - token: null, - id: null, - loading: false, - errors: [], - showFilterConfig: false, - primary_api_addr: null, - primary_cluster_addr: null, - filterConfig: { - mode: 'whitelist', - paths: [], - }, -}; - -export default Controller.extend(DEFAULTS, { - store: service(), - rm: service('replication-mode'), - replicationMode: alias('rm.mode'), - - submitError(e) { - if (e.errors) { - this.set('errors', e.errors); - } else { - throw e; - } - }, - - saveFilterConfig() { - const config = this.get('filterConfig'); - const id = this.get('id'); - config.id = id; - const configRecord = this.get('store').createRecord('mount-filter-config', config); - return configRecord.save().catch(e => this.submitError(e)); - }, - - reset() { - this.setProperties(DEFAULTS); - }, - - submitSuccess(resp, action) { - const cluster = this.get('model'); - const store = this.get('store'); - if (!cluster) { - return; - } - - if (resp && resp.wrap_info) { - this.set('token', resp.wrap_info.token); - } - if (action === 'secondary-token') { - this.setProperties({ - loading: false, - primary_api_addr: null, - primary_cluster_addr: null, - }); - return cluster; - } - this.reset(); - return store - .adapterFor('cluster') - .replicationStatus() - .then(status => { - return store.pushPayload('cluster', status); - }) - .finally(() => { - this.set('loading', false); - }); - }, - - submitHandler(action, clusterMode, data, event) { - const replicationMode = this.get('replicationMode'); - let saveFilterConfig; - if (event && event.preventDefault) { - event.preventDefault(); - } - if (data && isPresent(data.saveFilterConfig)) { - saveFilterConfig = data.saveFilterConfig; - delete data.saveFilterConfig; - } - this.setProperties({ - loading: true, - errors: [], - }); - if (data) { - data = Object.keys(data).reduce((newData, key) => { - var val = data[key]; - if (isPresent(val)) { - newData[key] = val; - } - return newData; - }, {}); - } - - return this.get('store') - .adapterFor('cluster') - .replicationAction(action, replicationMode, clusterMode, data) - .then( - resp => { - if (saveFilterConfig) { - return this.saveFilterConfig().then(() => { - return this.submitSuccess(resp, action, clusterMode); - }); - } else { - return this.submitSuccess(resp, action, clusterMode); - } - }, - (...args) => this.submitError(...args) - ); - }, - - actions: { - onSubmit(/*action, mode, data, event*/) { - return this.submitHandler(...arguments); - }, - - clear() { - this.reset(); - this.setProperties({ - token: null, - id: null, - }); - }, - }, -}); diff --git a/ui/app/controllers/vault/cluster/replication/mode/secondaries/add.js b/ui/app/controllers/vault/cluster/replication/mode/secondaries/add.js deleted file mode 100644 index 01da58dffbe3..000000000000 --- a/ui/app/controllers/vault/cluster/replication/mode/secondaries/add.js +++ /dev/null @@ -1,3 +0,0 @@ -import ReplicationController from '../../../replication'; - -export default ReplicationController.extend(); diff --git a/ui/app/controllers/vault/cluster/replication/mode/secondaries/config-edit.js b/ui/app/controllers/vault/cluster/replication/mode/secondaries/config-edit.js deleted file mode 100644 index 90c2703f17fa..000000000000 --- a/ui/app/controllers/vault/cluster/replication/mode/secondaries/config-edit.js +++ /dev/null @@ -1,56 +0,0 @@ -import { alias } from '@ember/object/computed'; -import { inject as service } from '@ember/service'; -import Controller from '@ember/controller'; - -const CONFIG_DEFAULTS = { - mode: 'whitelist', - paths: [], -}; - -export default Controller.extend({ - flashMessages: service(), - rm: service('replication-mode'), - replicationMode: alias('rm.mode'), - actions: { - resetConfig(config) { - if (config.get('isNew')) { - config.setProperties(CONFIG_DEFAULTS); - } else { - config.rollbackAttributes(); - } - }, - - saveConfig(config, isDelete) { - const flash = this.get('flashMessages'); - const id = config.id; - const redirectArgs = isDelete - ? [ - 'vault.cluster.replication.mode.secondaries', - this.model.cluster.get('name'), - this.get('replicationMode'), - ] - : ['vault.cluster.replication.mode.secondaries.config-show', id]; - const modelMethod = isDelete ? config.destroyRecord : config.save; - - modelMethod - .call(config) - .then(() => { - this.transitionToRoute(...redirectArgs) - .followRedirects() - .then(() => { - flash.success( - `The performance mount filter config for the secondary ${id} was successfully ${ - isDelete ? 'deleted' : 'saved' - }.` - ); - }); - }) - .catch(e => { - const errString = e.errors.join('.'); - flash.error( - `There was an error ${isDelete ? 'deleting' : 'saving'} the config for ${id}: ${errString}` - ); - }); - }, - }, -}); diff --git a/ui/app/controllers/vault/cluster/replication/mode/secondaries/index.js b/ui/app/controllers/vault/cluster/replication/mode/secondaries/index.js deleted file mode 100644 index 01da58dffbe3..000000000000 --- a/ui/app/controllers/vault/cluster/replication/mode/secondaries/index.js +++ /dev/null @@ -1,3 +0,0 @@ -import ReplicationController from '../../../replication'; - -export default ReplicationController.extend(); diff --git a/ui/app/controllers/vault/cluster/replication/mode/secondaries/revoke.js b/ui/app/controllers/vault/cluster/replication/mode/secondaries/revoke.js deleted file mode 100644 index 9be89e9d34fb..000000000000 --- a/ui/app/controllers/vault/cluster/replication/mode/secondaries/revoke.js +++ /dev/null @@ -1 +0,0 @@ -export { default } from '../../../replication'; diff --git a/ui/app/controllers/vault/cluster/replication/replication-mode.js b/ui/app/controllers/vault/cluster/replication/replication-mode.js deleted file mode 100644 index b5b82ad762fa..000000000000 --- a/ui/app/controllers/vault/cluster/replication/replication-mode.js +++ /dev/null @@ -1,8 +0,0 @@ -import { alias } from '@ember/object/computed'; -import { inject as service } from '@ember/service'; -import Controller from '@ember/controller'; - -export default Controller.extend({ - rm: service('replication-mode'), - replicationMode: alias('rm.mode'), -}); diff --git a/ui/app/controllers/vault/cluster/secrets/backend/configuration.js b/ui/app/controllers/vault/cluster/secrets/backend/configuration.js new file mode 100644 index 000000000000..93aabec1dd7b --- /dev/null +++ b/ui/app/controllers/vault/cluster/secrets/backend/configuration.js @@ -0,0 +1,9 @@ +import { computed } from '@ember/object'; +import Controller from '@ember/controller'; + +export default Controller.extend({ + isConfigurable: computed('model.type', function() { + const configurableEngines = ['aws', 'ssh', 'pki']; + return configurableEngines.includes(this.get('model.type')); + }), +}); diff --git a/ui/app/controllers/vault/cluster/secrets/backend/list.js b/ui/app/controllers/vault/cluster/secrets/backend/list.js index cfda28671fcd..6aa11e2259db 100644 --- a/ui/app/controllers/vault/cluster/secrets/backend/list.js +++ b/ui/app/controllers/vault/cluster/secrets/backend/list.js @@ -4,7 +4,7 @@ import Controller from '@ember/controller'; import utils from 'vault/lib/key-utils'; import BackendCrumbMixin from 'vault/mixins/backend-crumb'; import WithNavToNearestAncestor from 'vault/mixins/with-nav-to-nearest-ancestor'; -import ListController from 'vault/mixins/list-controller'; +import ListController from 'core/mixins/list-controller'; export default Controller.extend(ListController, BackendCrumbMixin, WithNavToNearestAncestor, { flashMessages: service(), @@ -16,6 +16,10 @@ export default Controller.extend(ListController, BackendCrumbMixin, WithNavToNea return !!utils.keyIsFolder(this.get('filter')); }), + isConfigurableTab: computed('isCertTab', 'isConfigure', function() { + return this.get('isCertTab') || this.get('isConfigure'); + }), + actions: { chooseAction(action) { this.set('selectedAction', action); diff --git a/ui/app/controllers/vault/cluster/settings/mount-secret-backend.js b/ui/app/controllers/vault/cluster/settings/mount-secret-backend.js index 432d5f32ff1a..c9358cec84f4 100644 --- a/ui/app/controllers/vault/cluster/settings/mount-secret-backend.js +++ b/ui/app/controllers/vault/cluster/settings/mount-secret-backend.js @@ -10,7 +10,11 @@ export default Controller.extend({ onMountSuccess: function(type, path) { let transition; if (SUPPORTED_BACKENDS.includes(type)) { - transition = this.transitionToRoute('vault.cluster.secrets.backend.index', path); + if (type === 'kmip') { + transition = this.transitionToRoute('vault.cluster.secrets.backend.kmip.scopes', path); + } else { + transition = this.transitionToRoute('vault.cluster.secrets.backend.index', path); + } } else { transition = this.transitionToRoute('vault.cluster.secrets.backends'); } diff --git a/ui/app/controllers/vault/cluster/settings/seal.js b/ui/app/controllers/vault/cluster/settings/seal.js index d0afb3673b6a..721a2fee6df1 100644 --- a/ui/app/controllers/vault/cluster/settings/seal.js +++ b/ui/app/controllers/vault/cluster/settings/seal.js @@ -12,7 +12,7 @@ export default Controller.extend({ .then(() => { this.model.cluster.get('leaderNode').set('sealed', true); this.get('auth').deleteCurrentToken(); - return this.transitionToRoute('vault.cluster'); + return this.transitionToRoute('vault.cluster.unseal'); }); }, }, diff --git a/ui/app/helpers/format-number.js b/ui/app/helpers/format-number.js new file mode 100644 index 000000000000..a782e37de390 --- /dev/null +++ b/ui/app/helpers/format-number.js @@ -0,0 +1,8 @@ +import { helper } from '@ember/component/helper'; + +export function formatNumber([number]) { + // formats a number according to the locale + return new Intl.NumberFormat().format(number); +} + +export default helper(formatNumber); diff --git a/ui/app/helpers/format-utc.js b/ui/app/helpers/format-utc.js new file mode 100644 index 000000000000..82995b8153fd --- /dev/null +++ b/ui/app/helpers/format-utc.js @@ -0,0 +1,15 @@ +import { helper } from '@ember/component/helper'; +import d3 from 'd3-time-format'; + +export function formatUtc([date, specifier]) { + // given a date, format and display it as UTC. + const format = d3.utcFormat(specifier); + const parse = d3.utcParse('%Y-%m-%dT%H:%M:%SZ'); + + // if a date isn't already in UTC, fallback to isoParse to convert it to UTC + const parsedDate = parse(date) || d3.isoParse(date); + + return format(parsedDate); +} + +export default helper(formatUtc); diff --git a/ui/app/helpers/includes.js b/ui/app/helpers/includes.js deleted file mode 100644 index ff61d3586b9d..000000000000 --- a/ui/app/helpers/includes.js +++ /dev/null @@ -1,7 +0,0 @@ -import { helper as buildHelper } from '@ember/component/helper'; - -export function includes([haystack, needle]) { - return haystack.includes(needle); -} - -export default buildHelper(includes); diff --git a/ui/app/helpers/message-types.js b/ui/app/helpers/message-types.js deleted file mode 100644 index 8ae5db040792..000000000000 --- a/ui/app/helpers/message-types.js +++ /dev/null @@ -1,34 +0,0 @@ -import { helper as buildHelper } from '@ember/component/helper'; - -export const MESSAGE_TYPES = { - info: { - class: 'is-info', - glyphClass: 'has-text-info', - glyph: 'information-circled', - text: 'Info', - }, - success: { - class: 'is-success', - glyphClass: 'has-text-success', - glyph: 'checkmark-circled', - text: 'Success', - }, - danger: { - class: 'is-danger', - glyphClass: 'has-text-danger', - glyph: 'cancel-square-fill', - text: 'Error', - }, - warning: { - class: 'is-highlight', - glyphClass: 'has-text-highlight', - glyph: 'alert-circled', - text: 'Warning', - }, -}; - -export function messageTypes([type]) { - return MESSAGE_TYPES[type]; -} - -export default buildHelper(messageTypes); diff --git a/ui/app/helpers/mountable-secret-engines.js b/ui/app/helpers/mountable-secret-engines.js index 7256b3dce265..19af064a5a74 100644 --- a/ui/app/helpers/mountable-secret-engines.js +++ b/ui/app/helpers/mountable-secret-engines.js @@ -1,5 +1,12 @@ import { helper as buildHelper } from '@ember/component/helper'; +export const KMIP = { + displayName: 'KMIP', + value: 'kmip', + type: 'kmip', + category: 'generic', +}; + const MOUNTABLE_SECRET_ENGINES = [ { displayName: 'Active Directory', diff --git a/ui/app/helpers/reduce-to-array.js b/ui/app/helpers/reduce-to-array.js deleted file mode 100644 index b6d76d9e2b24..000000000000 --- a/ui/app/helpers/reduce-to-array.js +++ /dev/null @@ -1,17 +0,0 @@ -import { helper as buildHelper } from '@ember/component/helper'; -import { isNone, typeOf } from '@ember/utils'; - -export function reduceToArray(params) { - return params.reduce(function(result, param) { - if (isNone(param)) { - return result; - } - if (typeOf(param) === 'array') { - return result.concat(param); - } else { - return result.concat([param]); - } - }, []); -} - -export default buildHelper(reduceToArray); diff --git a/ui/app/helpers/supported-secret-backends.js b/ui/app/helpers/supported-secret-backends.js index 748ab5abb99e..9c86859b7207 100644 --- a/ui/app/helpers/supported-secret-backends.js +++ b/ui/app/helpers/supported-secret-backends.js @@ -1,6 +1,6 @@ import { helper as buildHelper } from '@ember/component/helper'; -const SUPPORTED_SECRET_BACKENDS = ['aws', 'cubbyhole', 'generic', 'kv', 'pki', 'ssh', 'transit']; +const SUPPORTED_SECRET_BACKENDS = ['aws', 'cubbyhole', 'generic', 'kv', 'pki', 'ssh', 'transit', 'kmip']; export function supportedSecretBackends() { return SUPPORTED_SECRET_BACKENDS; diff --git a/ui/app/helpers/tabs-for-auth-section.js b/ui/app/helpers/tabs-for-auth-section.js index 141da3decce3..c66ed62220d6 100644 --- a/ui/app/helpers/tabs-for-auth-section.js +++ b/ui/app/helpers/tabs-for-auth-section.js @@ -1,4 +1,6 @@ import { helper as buildHelper } from '@ember/component/helper'; +import { pluralize } from 'ember-inflector'; +import { capitalize } from '@ember/string'; const TABS_FOR_SETTINGS = { aws: [ @@ -73,19 +75,27 @@ const TABS_FOR_SETTINGS = { const TABS_FOR_SHOW = {}; -export function tabsForAuthSection([methodType, sectionType = 'authSettings']) { +export function tabsForAuthSection([model, sectionType = 'authSettings', paths]) { let tabs; - if (sectionType === 'authSettings') { - tabs = (TABS_FOR_SETTINGS[methodType] || []).slice(); + tabs = (TABS_FOR_SETTINGS[model.type] || []).slice(); tabs.push({ label: 'Method Options', routeParams: ['vault.cluster.settings.auth.configure.section', 'options'], }); return tabs; } - - tabs = (TABS_FOR_SHOW[methodType] || []).slice(); + if (paths) { + tabs = paths.map(path => { + let itemName = path.slice(1); //get rid of leading slash + return { + label: capitalize(pluralize(itemName)), + routeParams: ['vault.cluster.access.method.item.list', itemName], + }; + }); + } else { + tabs = (TABS_FOR_SHOW[model.type] || []).slice(); + } tabs.push({ label: 'Configuration', routeParams: ['vault.cluster.access.method.section', 'configuration'], diff --git a/ui/app/initializers/enable-engines.js b/ui/app/initializers/enable-engines.js new file mode 100644 index 000000000000..858f36b06d08 --- /dev/null +++ b/ui/app/initializers/enable-engines.js @@ -0,0 +1,13 @@ +import config from '../config/environment'; + +export function initialize(/* application */) { + // attach mount hooks to the environment config + // context will be the router DSL + config.addRootMounts = function() { + this.mount('replication'); + }; +} + +export default { + initialize, +}; diff --git a/ui/app/lib/attach-capabilities.js b/ui/app/lib/attach-capabilities.js new file mode 100644 index 000000000000..4a32a66fa054 --- /dev/null +++ b/ui/app/lib/attach-capabilities.js @@ -0,0 +1,82 @@ +import DS from 'ember-data'; +import { assert, debug } from '@ember/debug'; +import { typeOf } from '@ember/utils'; +import { isArray } from '@ember/array'; +const { belongsTo } = DS; + +/* + * + * attachCapabilities + * + * @param modelClass = An Ember Data model class + * @param capabilities - an Object whose keys will added to the model class as related 'capabilities' models + * and whose values should be functions that return the id of the related capabilites model + * + * definition of capabilities be done shorthand with the apiPath tagged template funtion + * + * + * @usage + * + * let Model = DS.Model.extend({ + * backend: attr(), + * scope: attr(), + * }); + * + * export default attachCapabilities(Model, { + * updatePath: apiPath`${'backend'}/scope/${'scope'}/role/${'id'}`, + * }); + * + */ +export default function attachCapabilities(modelClass, capabilities) { + let capabilityKeys = Object.keys(capabilities); + let newRelationships = capabilityKeys.reduce((ret, key) => { + ret[key] = belongsTo('capabilities'); + return ret; + }, {}); + + debug(`adding new relationships: ${capabilityKeys.join(', ')} to ${modelClass.toString()}`); + modelClass.reopen(newRelationships); + modelClass.reopenClass({ + // relatedCapabilities is called in the application serializer's + // normalizeResponse hook to add the capabilities relationships to the + // JSON-API document used by Ember Data + relatedCapabilities(jsonAPIDoc) { + let { data, included } = jsonAPIDoc; + if (!data) { + data = jsonAPIDoc; + } + if (isArray(data)) { + let newData = data.map(this.relatedCapabilities); + return { + data: newData, + included, + }; + } + let context = { + id: data.id, + ...data.attributes, + }; + for (let newCapability of capabilityKeys) { + let templateFn = capabilities[newCapability]; + let type = typeOf(templateFn); + assert(`expected value of ${newCapability} to be a function but found ${type}.`, type === 'function'); + data.relationships[newCapability] = { + data: { + type: 'capabilities', + id: templateFn(context), + }, + }; + } + + if (included) { + return { + data, + included, + }; + } else { + return data; + } + }, + }); + return modelClass; +} diff --git a/ui/app/lib/console-helpers.js b/ui/app/lib/console-helpers.js index c0cb651f7496..65ea1b3c5662 100644 --- a/ui/app/lib/console-helpers.js +++ b/ui/app/lib/console-helpers.js @@ -2,7 +2,7 @@ import keys from 'vault/lib/keycodes'; import argTokenizer from 'yargs-parser/lib/tokenize-arg-string.js'; const supportedCommands = ['read', 'write', 'list', 'delete']; -const uiCommands = ['clearall', 'clear', 'fullscreen', 'refresh']; +const uiCommands = ['api', 'clearall', 'clear', 'fullscreen', 'refresh']; export function extractDataAndFlags(data, flags) { return data.concat(flags).reduce( @@ -32,38 +32,24 @@ export function extractDataAndFlags(data, flags) { ); } -export function executeUICommand(command, logAndOutput, clearLog, toggleFullscreen, refreshFn) { - const isUICommand = uiCommands.includes(command); +export function executeUICommand(command, logAndOutput, commandFns) { + let cmd = command.startsWith('api') ? 'api' : command; + let isUICommand = uiCommands.includes(cmd); if (isUICommand) { logAndOutput(command); } - switch (command) { - case 'clearall': - clearLog(true); - break; - case 'clear': - clearLog(); - break; - case 'fullscreen': - toggleFullscreen(); - break; - case 'refresh': - refreshFn(); - break; + if (typeof commandFns[cmd] === 'function') { + commandFns[cmd](); } - return isUICommand; } export function parseCommand(command, shouldThrow) { - // encode everything but spaces - let cmd = encodeURIComponent(command).replace(/%20/g, decodeURIComponent); - let args = argTokenizer(cmd); + let args = argTokenizer(command); if (args[0] === 'vault') { args.shift(); } - args = args.map(decodeURIComponent); let [method, ...rest] = args; let path; let flags = []; @@ -74,7 +60,17 @@ export function parseCommand(command, shouldThrow) { flags.push(arg); } else { if (path) { - data.push(arg); + let strippedArg = arg + // we'll have arg=something or arg="lol I need spaces", so need to split on the first = + .split(/=(.+)/) + // remove matched wrapping " or ' from each item + .map(item => item.replace(/^("|')(.+)(\1)$/, '$2')) + // if there were quotes, there's an empty string as the last member in the array that we don't want, + // so filter it out + .filter(str => str !== '') + // glue the data back together + .join('='); + data.push(strippedArg); } else { path = arg; } diff --git a/ui/app/mixins/cluster-route.js b/ui/app/mixins/cluster-route.js index 9b814c99ed14..2b7ff244ed23 100644 --- a/ui/app/mixins/cluster-route.js +++ b/ui/app/mixins/cluster-route.js @@ -13,6 +13,7 @@ export { INIT, UNSEAL, AUTH, CLUSTER, DR_REPLICATION_SECONDARY }; export default Mixin.create({ auth: service(), + store: service(), transitionToTargetRoute(transition) { const targetRoute = this.targetRouteName(transition); @@ -28,7 +29,7 @@ export default Mixin.create({ }, clusterModel() { - return this.modelFor(CLUSTER); + return this.modelFor(CLUSTER) || this.store.peekRecord('cluster', 'vault'); }, authToken() { diff --git a/ui/app/mixins/list-controller.js b/ui/app/mixins/list-controller.js deleted file mode 100644 index 423b42a12e21..000000000000 --- a/ui/app/mixins/list-controller.js +++ /dev/null @@ -1,45 +0,0 @@ -import { computed } from '@ember/object'; -import Mixin from '@ember/object/mixin'; -import escapeStringRegexp from 'escape-string-regexp'; - -export default Mixin.create({ - queryParams: { - page: 'page', - pageFilter: 'pageFilter', - }, - - page: 1, - pageFilter: null, - filter: null, - filterFocused: false, - - isLoading: false, - - filterMatchesKey: computed('filter', 'model', 'model.[]', function() { - var filter = this.get('filter'); - var content = this.get('model'); - return !!(content.length && content.findBy('id', filter)); - }), - - firstPartialMatch: computed('filter', 'model', 'model.[]', 'filterMatchesKey', function() { - var filter = this.get('filter'); - var content = this.get('model'); - var filterMatchesKey = this.get('filterMatchesKey'); - var re = new RegExp('^' + escapeStringRegexp(filter)); - return filterMatchesKey - ? null - : content.find(function(key) { - return re.test(key.get('id')); - }); - }), - - actions: { - setFilter(val) { - this.set('filter', val); - }, - - setFilterFocus(bool) { - this.set('filterFocused', bool); - }, - }, -}); diff --git a/ui/app/mixins/list-route.js b/ui/app/mixins/list-route.js deleted file mode 100644 index eb1e713018a2..000000000000 --- a/ui/app/mixins/list-route.js +++ /dev/null @@ -1,12 +0,0 @@ -import Mixin from '@ember/object/mixin'; - -export default Mixin.create({ - queryParams: { - page: { - refreshModel: true, - }, - pageFilter: { - refreshModel: true, - }, - }, -}); diff --git a/ui/app/mixins/replication-actions.js b/ui/app/mixins/replication-actions.js deleted file mode 100644 index a2ab65237b9b..000000000000 --- a/ui/app/mixins/replication-actions.js +++ /dev/null @@ -1,105 +0,0 @@ -import { inject as service } from '@ember/service'; -import { or } from '@ember/object/computed'; -import { isPresent } from '@ember/utils'; -import Mixin from '@ember/object/mixin'; -import { task } from 'ember-concurrency'; - -export default Mixin.create({ - store: service(), - router: service(), - loading: or('save.isRunning', 'submitSuccess.isRunning'), - submitHandler(action, clusterMode, data, event) { - let replicationMode = (data && data.replicationMode) || this.get('replicationMode'); - if (event && event.preventDefault) { - event.preventDefault(); - } - this.setProperties({ - errors: [], - }); - if (data) { - data = Object.keys(data).reduce((newData, key) => { - var val = data[key]; - if (isPresent(val)) { - newData[key] = val; - } - return newData; - }, {}); - delete data.replicationMode; - } - - return this.get('save').perform(action, replicationMode, clusterMode, data); - }, - - save: task(function*(action, replicationMode, clusterMode, data) { - let resp; - try { - resp = yield this.get('store') - .adapterFor('cluster') - .replicationAction(action, replicationMode, clusterMode, data); - } catch (e) { - return this.submitError(e); - } - yield this.get('submitSuccess').perform(resp, action, clusterMode); - }).drop(), - - submitSuccess: task(function*(resp, action, mode) { - const cluster = this.get('cluster'); - const replicationMode = this.get('selectedReplicationMode') || this.get('replicationMode'); - const store = this.get('store'); - if (!cluster) { - return; - } - - if (resp && resp.wrap_info) { - this.set('token', resp.wrap_info.token); - } - if (action === 'secondary-token') { - this.setProperties({ - loading: false, - primary_api_addr: null, - primary_cluster_addr: null, - }); - return cluster; - } - this.reset(); - if (action === 'enable') { - // do something to show model is pending - cluster.set( - replicationMode, - store.createFragment('replication-attributes', { - mode: 'bootstrapping', - }) - ); - if (mode === 'secondary' && replicationMode === 'performance') { - // if we're enabing a secondary, there could be mount filtering, - // so we should unload all of the backends - store.unloadAll('secret-engine'); - } - } - const router = this.get('router'); - if (action === 'disable') { - yield router.transitionTo('vault.cluster.replication.mode', replicationMode); - } - try { - yield cluster.reload(); - } catch (e) { - // no error handling here - } - cluster.rollbackAttributes(); - if (action === 'enable') { - yield router.transitionTo('vault.cluster.replication.mode', replicationMode); - } - - if (mode === 'secondary' && replicationMode === 'dr') { - yield router.transitionTo('vault.cluster'); - } - }).drop(), - - submitError(e) { - if (e.errors) { - this.set('errors', e.errors); - } else { - throw e; - } - }, -}); diff --git a/ui/app/mixins/unload-model-route.js b/ui/app/mixins/unload-model-route.js index 244079895358..b1f10d111cfd 100644 --- a/ui/app/mixins/unload-model-route.js +++ b/ui/app/mixins/unload-model-route.js @@ -5,12 +5,15 @@ import Mixin from '@ember/object/mixin'; export default Mixin.create({ modelPath: 'model', unloadModel() { - const model = this.controller.get(this.get('modelPath')); + let { modelPath } = this; + let model = this.controller.get(modelPath); if (!model || !model.unloadRecord) { return; } this.store.unloadRecord(model); model.destroy(); + // it's important to unset the model on the controller since controllers are singletons + this.controller.set(modelPath, null); }, actions: { diff --git a/ui/app/models/auth-config/github.js b/ui/app/models/auth-config/github.js index df745af14e86..bee219e4795b 100644 --- a/ui/app/models/auth-config/github.js +++ b/ui/app/models/auth-config/github.js @@ -19,6 +19,7 @@ export default AuthConfig.extend({ { 'GitHub Options': ['baseUrl'], }, + { TTLs: ['ttl', 'maxTtl'] }, ]; if (this.newFields) { groups = combineFieldGroups(groups, this.newFields, []); diff --git a/ui/app/models/auth-config/ldap.js b/ui/app/models/auth-config/ldap.js index de51d1489bd1..5ab79c1d3a49 100644 --- a/ui/app/models/auth-config/ldap.js +++ b/ui/app/models/auth-config/ldap.js @@ -1,50 +1,11 @@ import { computed } from '@ember/object'; -import DS from 'ember-data'; import AuthConfig from '../auth-config'; import fieldToAttrs from 'vault/utils/field-to-attrs'; import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; -const { attr } = DS; - export default AuthConfig.extend({ useOpenAPI: true, - binddn: attr('string', { - helpText: 'Used when performing user search. Example: cn=vault,ou=Users,dc=example,dc=com', - }), - bindpass: attr('string', { - helpText: 'Used along with binddn when performing user search', - sensitive: true, - }), - userdn: attr('string', { - helpText: 'Base DN under which to perform user search. Example: ou=Users,dc=example,dc=com', - }), - userattr: attr('string', { - helpText: - 'Attribute on user attribute object matching the username passed when authenticating. Examples: sAMAccountName, cn, uid', - }), - upndomain: attr('string', { - helpText: - 'The userPrincipalDomain used to construct the UPN string for the authenticating user. The constructed UPN will appear as [username]@UPNDomain. Example: example.com, which will cause vault to bind as username@example.com.', - }), - - groupfilter: attr('string', { - helpText: - 'Go template used when constructing the group membership query. The template can access the following context variables: [UserDN, Username]. The default is (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}})), which is compatible with several common directory schemas. To support nested group resolution for Active Directory, instead use the following query: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))', - }), - groupdn: attr('string', { - helpText: - 'LDAP search base for group membership search. This can be the root containing either groups or users. Example: ou=Groups,dc=example,dc=com', - }), - groupattr: attr('string', { - helpText: - 'LDAP attribute to follow on objects returned by groupfilter in order to enumerate user group membership. Examples: for groupfilter queries returning group objects, use: cn. For queries returning user objects, use: memberOf. The default is cn.', - }), - useTokenGroups: attr('boolean', { - helpText: - 'Use the Active Directory tokenGroups constructed attribute to find the group memberships. This returns all security groups for the user, including nested groups. In an Active Directory environment with a large number of groups this method offers increased performance. Selecting this will cause Group DN, Attribute, and Filter to be ignored.', - }), - fieldGroups: computed(function() { let groups = [ { diff --git a/ui/app/models/cluster.js b/ui/app/models/cluster.js index 2ed2686cb110..5084d63ed3d0 100644 --- a/ui/app/models/cluster.js +++ b/ui/app/models/cluster.js @@ -70,7 +70,7 @@ export default DS.Model.extend({ }), stateGlyph(state) { - const glyph = 'checkmark-circled-outline'; + const glyph = 'check-circled-outline'; const glyphs = { 'stream-wals': 'android-sync', diff --git a/ui/app/models/kmip/ca.js b/ui/app/models/kmip/ca.js new file mode 100644 index 000000000000..08a2989db02b --- /dev/null +++ b/ui/app/models/kmip/ca.js @@ -0,0 +1,9 @@ +import DS from 'ember-data'; +const { attr, belongsTo } = DS; + +export default DS.Model.extend({ + config: belongsTo('kmip/config', { async: false }), + caPem: attr('string', { + label: 'CA PEM', + }), +}); diff --git a/ui/app/models/kmip/config.js b/ui/app/models/kmip/config.js new file mode 100644 index 000000000000..6726aeee83d5 --- /dev/null +++ b/ui/app/models/kmip/config.js @@ -0,0 +1,21 @@ +import DS from 'ember-data'; +import { computed } from '@ember/object'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; + +const { belongsTo } = DS; + +export default DS.Model.extend({ + useOpenAPI: true, + ca: belongsTo('kmip/ca', { async: false }), + getHelpUrl(path) { + return `/v1/${path}/config?help=1`; + }, + + fieldGroups: computed(function() { + let groups = [{ default: ['listenAddrs', 'connectionTimeout'] }]; + + groups = combineFieldGroups(groups, this.newFields, []); + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/kmip/credential.js b/ui/app/models/kmip/credential.js new file mode 100644 index 000000000000..a8f9f0320100 --- /dev/null +++ b/ui/app/models/kmip/credential.js @@ -0,0 +1,36 @@ +import DS from 'ember-data'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; +import { computed } from '@ember/object'; +const { attr } = DS; +import apiPath from 'vault/utils/api-path'; +import attachCapabilities from 'vault/lib/attach-capabilities'; + +const Model = DS.Model.extend({ + backend: attr({ readOnly: true }), + scope: attr({ readOnly: true }), + role: attr({ readOnly: true }), + certificate: attr('string', { readOnly: true }), + caChain: attr({ readOnly: true }), + privateKey: attr('string', { + readOnly: true, + sensitive: true, + }), + format: attr('string', { + possibleValues: ['pem', 'der', 'pem_bundle'], + defaultValue: 'pem', + label: 'Certificate format', + }), + fieldGroups: computed(function() { + const groups = [ + { + default: ['format'], + }, + ]; + + return fieldToAttrs(this, groups); + }), +}); + +export default attachCapabilities(Model, { + deletePath: apiPath`${'backend'}/scope/${'scope'}/role/${'role'}/credentials/revoke`, +}); diff --git a/ui/app/models/kmip/role.js b/ui/app/models/kmip/role.js new file mode 100644 index 000000000000..e68ce378b489 --- /dev/null +++ b/ui/app/models/kmip/role.js @@ -0,0 +1,48 @@ +import DS from 'ember-data'; +import { computed } from '@ember/object'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; +import apiPath from 'vault/utils/api-path'; +import attachCapabilities from 'vault/lib/attach-capabilities'; + +const { attr } = DS; +export const COMPUTEDS = { + operationFields: computed('newFields', function() { + return this.newFields.filter(key => key.startsWith('operation')); + }), + + operationFieldsWithoutSpecial: computed('operationFields', function() { + return this.operationFields.slice().removeObjects(['operationAll', 'operationNone']); + }), + + nonOperationFields: computed('operationFields', function() { + let excludeFields = ['role'].concat(this.operationFields); + return this.newFields.slice().removeObjects(excludeFields); + }), +}; + +const Model = DS.Model.extend(COMPUTEDS, { + useOpenAPI: true, + backend: attr({ readOnly: true }), + scope: attr({ readOnly: true }), + name: attr({ readOnly: true }), + getHelpUrl(path) { + return `/v1/${path}/scope/example/role/example?help=1`; + }, + fieldGroups: computed('fields', 'nonOperationFields', function() { + const groups = [{ default: this.nonOperationFields }, { 'Allowed Operations': this.operationFields }]; + let ret = fieldToAttrs(this, groups); + return ret; + }), + + operationFormFields: computed('operationFieldsWithoutSpecial', function() { + return expandAttributeMeta(this, this.operationFieldsWithoutSpecial); + }), + fields: computed('nonOperationFields', function() { + return expandAttributeMeta(this, this.nonOperationFields); + }), +}); + +export default attachCapabilities(Model, { + updatePath: apiPath`${'backend'}/scope/${'scope'}/role/${'id'}`, +}); diff --git a/ui/app/models/kmip/scope.js b/ui/app/models/kmip/scope.js new file mode 100644 index 000000000000..24171565bf4e --- /dev/null +++ b/ui/app/models/kmip/scope.js @@ -0,0 +1,19 @@ +import { computed } from '@ember/object'; +import DS from 'ember-data'; +import apiPath from 'vault/utils/api-path'; +import attachCapabilities from 'vault/lib/attach-capabilities'; + +const { attr } = DS; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; + +let Model = DS.Model.extend({ + name: attr('string'), + backend: attr({ readOnly: true }), + attrs: computed(function() { + return expandAttributeMeta(this, ['name']); + }), +}); + +export default attachCapabilities(Model, { + updatePath: apiPath`${'backend'}/scope/${'id'}`, +}); diff --git a/ui/app/models/requests.js b/ui/app/models/requests.js new file mode 100644 index 000000000000..edf020cfe7e2 --- /dev/null +++ b/ui/app/models/requests.js @@ -0,0 +1,32 @@ +import DS from 'ember-data'; +const { attr } = DS; + +/* sample response + +{ + "request_id": "75cbaa46-e741-3eba-2be2-325b1ba8f03f", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "counters": [ + { + "start_time": "2019-05-01T00:00:00Z", + "total": 50 + }, + { + "start_time": "2019-04-01T00:00:00Z", + "total": 45 + } + ] + }, + "wrap_info": null, + "warnings": null, + "auth": null +} + +*/ + +export default DS.Model.extend({ + counters: attr('array'), +}); diff --git a/ui/app/models/role-jwt.js b/ui/app/models/role-jwt.js index 0083b6e4d9a8..fda75df709f1 100644 --- a/ui/app/models/role-jwt.js +++ b/ui/app/models/role-jwt.js @@ -1,6 +1,6 @@ import DS from 'ember-data'; import { computed } from '@ember/object'; -import parseURL from 'vault/utils/parse-url'; +import parseURL from 'core/utils/parse-url'; const { attr } = DS; const DOMAIN_STRINGS = { diff --git a/ui/app/router.js b/ui/app/router.js index b1b87d87904e..10796047baf7 100644 --- a/ui/app/router.js +++ b/ui/app/router.js @@ -13,7 +13,9 @@ Router.map(function() { this.route('auth'); this.route('init'); this.route('logout'); + this.mount('open-api-explorer', { path: '/api-explorer' }); this.route('license'); + this.route('requests', { path: '/metrics/requests' }); this.route('settings', function() { this.route('index', { path: '/' }); this.route('seal'); @@ -39,6 +41,12 @@ Router.map(function() { this.route('methods', { path: '/' }); this.route('method', { path: '/:path' }, function() { this.route('index', { path: '/' }); + this.route('item', { path: '/item/:item_type' }, function() { + this.route('list', { path: '/' }); + this.route('create'); + this.route('edit', { path: '/edit/:item_id' }); + this.route('show', { path: '/show/:item_id' }); + }); this.route('section', { path: '/:section_name' }); }); this.route('leases', function() { @@ -76,6 +84,7 @@ Router.map(function() { this.route('secrets', function() { this.route('backends', { path: '/' }); this.route('backend', { path: '/:backend' }, function() { + this.mount('kmip'); this.route('index', { path: '/' }); this.route('configuration'); // because globs / params can't be empty, @@ -114,21 +123,9 @@ Router.map(function() { this.route('edit', { path: '/:policy_name/edit' }); }); this.route('replication-dr-promote'); - this.route('replication', function() { - this.route('index', { path: '/' }); - this.route('mode', { path: '/:replication_mode' }, function() { - //details - this.route('index', { path: '/' }); - this.route('manage'); - this.route('secondaries', function() { - this.route('add', { path: '/add' }); - this.route('revoke', { path: '/revoke' }); - this.route('config-show', { path: '/config/show/:secondary_id' }); - this.route('config-edit', { path: '/config/edit/:secondary_id' }); - this.route('config-create', { path: '/config/create/:secondary_id' }); - }); - }); - }); + if (config.addRootMounts) { + config.addRootMounts.call(this); + } this.route('not-found', { path: '/*path' }); }); diff --git a/ui/app/routes/vault/cluster.js b/ui/app/routes/vault/cluster.js index 46a41eb3b889..ab2b78b07b39 100644 --- a/ui/app/routes/vault/cluster.js +++ b/ui/app/routes/vault/cluster.js @@ -52,14 +52,14 @@ export default Route.extend(ModelBoundaryRoute, ClusterRoute, { } }, - beforeModel() { + async beforeModel() { const params = this.paramsFor(this.routeName); this.clearNonGlobalModels(); this.get('namespaceService').setNamespace(params.namespaceQueryParam); const id = this.getClusterId(params); if (id) { this.get('auth').setCluster(id); - this.get('permissions').getPaths.perform(); + await this.get('permissions').getPaths.perform(); return this.get('version').fetchFeatures(); } else { return reject({ httpStatus: 404, message: 'not found', path: params.cluster_name }); diff --git a/ui/app/routes/vault/cluster/access/identity/aliases/index.js b/ui/app/routes/vault/cluster/access/identity/aliases/index.js index abee732e5bea..b661ef175069 100644 --- a/ui/app/routes/vault/cluster/access/identity/aliases/index.js +++ b/ui/app/routes/vault/cluster/access/identity/aliases/index.js @@ -1,5 +1,5 @@ import Route from '@ember/routing/route'; -import ListRoute from 'vault/mixins/list-route'; +import ListRoute from 'core/mixins/list-route'; export default Route.extend(ListRoute, { model(params) { diff --git a/ui/app/routes/vault/cluster/access/identity/index.js b/ui/app/routes/vault/cluster/access/identity/index.js index a9a983ecdd36..cdd6faabd1e1 100644 --- a/ui/app/routes/vault/cluster/access/identity/index.js +++ b/ui/app/routes/vault/cluster/access/identity/index.js @@ -1,5 +1,5 @@ import Route from '@ember/routing/route'; -import ListRoute from 'vault/mixins/list-route'; +import ListRoute from 'core/mixins/list-route'; export default Route.extend(ListRoute, { model(params) { diff --git a/ui/app/routes/vault/cluster/access/method.js b/ui/app/routes/vault/cluster/access/method.js index d6c69ba5fe6d..c29851ce9cea 100644 --- a/ui/app/routes/vault/cluster/access/method.js +++ b/ui/app/routes/vault/cluster/access/method.js @@ -1,8 +1,10 @@ import { set } from '@ember/object'; import Route from '@ember/routing/route'; import DS from 'ember-data'; +import { inject as service } from '@ember/service'; export default Route.extend({ + pathHelp: service('path-help'), model(params) { const { path } = params; return this.store.findAll('auth-method').then(modelArray => { @@ -12,7 +14,10 @@ export default Route.extend({ set(error, 'httpStatus', 404); throw error; } - return model; + return this.pathHelp.getPaths(model.apiPath, path).then(paths => { + model.set('paths', paths); + return model; + }); }); }, }); diff --git a/ui/app/routes/vault/cluster/access/method/index.js b/ui/app/routes/vault/cluster/access/method/index.js index 58b2fa477aa6..a3bff252cb83 100644 --- a/ui/app/routes/vault/cluster/access/method/index.js +++ b/ui/app/routes/vault/cluster/access/method/index.js @@ -1,7 +1,10 @@ import Route from '@ember/routing/route'; - +import { tabsForAuthSection } from 'vault/helpers/tabs-for-auth-section'; export default Route.extend({ beforeModel() { - return this.transitionTo('vault.cluster.access.method.section', 'configuration'); + let { methodType, paths } = this.modelFor('vault.cluster.access.method'); + paths = paths ? paths.navPaths.reduce((acc, cur) => acc.concat(cur.path), []) : null; + const activeTab = tabsForAuthSection([methodType, 'authConfig', paths])[0].routeParams; + return this.transitionTo(...activeTab); }, }); diff --git a/ui/app/routes/vault/cluster/access/method/item.js b/ui/app/routes/vault/cluster/access/method/item.js new file mode 100644 index 000000000000..45b52698d7a9 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item.js @@ -0,0 +1,32 @@ +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import { singularize } from 'ember-inflector'; + +export default Route.extend({ + wizard: service(), + pathHelp: service('path-help'), + + beforeModel() { + const { apiPath, type, method, itemType } = this.getMethodAndModelInfo(); + let modelType = `generated-${singularize(itemType)}-${type}`; + return this.pathHelp.getNewModel(modelType, method, apiPath, itemType); + }, + + getMethodAndModelInfo() { + const { item_type: itemType } = this.paramsFor(this.routeName); + const { path: method } = this.paramsFor('vault.cluster.access.method'); + const methodModel = this.modelFor('vault.cluster.access.method'); + const { apiPath, type } = methodModel; + return { apiPath, type, method, itemType }; + }, + + setupController(controller) { + this._super(...arguments); + const { apiPath, method, itemType } = this.getMethodAndModelInfo(); + controller.set('itemType', itemType); + controller.set('method', method); + this.pathHelp.getPaths(apiPath, method, itemType).then(paths => { + controller.set('paths', Array.from(paths.list, pathInfo => pathInfo.path)); + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/item/create.js b/ui/app/routes/vault/cluster/access/method/item/create.js new file mode 100644 index 000000000000..f8073f2cc9e7 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item/create.js @@ -0,0 +1,28 @@ +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; +import { singularize } from 'ember-inflector'; + +export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { + model() { + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const methodModel = this.modelFor('vault.cluster.access.method'); + const { type } = methodModel; + const { path: method } = this.paramsFor('vault.cluster.access.method'); + const modelType = `generated-${singularize(itemType)}-${type}`; + return this.store.createRecord(modelType, { + itemType, + method, + adapterOptions: { path: `${method}/${itemType}` }, + }); + }, + + setupController(controller) { + this._super(...arguments); + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const { path: method } = this.paramsFor('vault.cluster.access.method'); + controller.set('itemType', singularize(itemType)); + controller.set('mode', 'create'); + controller.set('method', method); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/item/edit.js b/ui/app/routes/vault/cluster/access/method/item/edit.js new file mode 100644 index 000000000000..8d9126f2a9e0 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item/edit.js @@ -0,0 +1,25 @@ +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; +import { singularize } from 'ember-inflector'; + +export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { + model(params) { + const methodModel = this.modelFor('vault.cluster.access.method'); + const { type } = methodModel; + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + let modelType = `generated-${singularize(itemType)}-${type}`; + return this.store.findRecord(modelType, params.item_id); + }, + + setupController(controller) { + this._super(...arguments); + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const { path: method } = this.paramsFor('vault.cluster.access.method'); + const { item_id: itemName } = this.paramsFor(this.routeName); + controller.set('itemType', singularize(itemType)); + controller.set('mode', 'edit'); + controller.set('method', method); + controller.set('itemName', itemName); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/item/list.js b/ui/app/routes/vault/cluster/access/method/item/list.js new file mode 100644 index 000000000000..5c62ed3f3386 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item/list.js @@ -0,0 +1,61 @@ +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import { singularize } from 'ember-inflector'; +import ListRoute from 'vault/mixins/list-route'; + +export default Route.extend(ListRoute, { + wizard: service(), + pathHelp: service('path-help'), + + getMethodAndModelInfo() { + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const { path: method } = this.paramsFor('vault.cluster.access.method'); + const methodModel = this.modelFor('vault.cluster.access.method'); + const { apiPath, type } = methodModel; + return { apiPath, type, method, itemType }; + }, + + model() { + const { type, method, itemType } = this.getMethodAndModelInfo(); + const { page, pageFilter } = this.paramsFor(this.routeName); + let modelType = `generated-${singularize(itemType)}-${type}`; + + return this.store + .lazyPaginatedQuery(modelType, { + responsePath: 'data.keys', + page: page, + pageFilter: pageFilter, + type: itemType, + method: method, + }) + .catch(err => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + }, + actions: { + willTransition(transition) { + window.scrollTo(0, 0); + if (transition.targetName !== this.routeName) { + this.store.clearAllDatasets(); + } + return true; + }, + reload() { + this.store.clearAllDatasets(); + this.refresh(); + }, + }, + setupController(controller) { + this._super(...arguments); + const { apiPath, method, itemType } = this.getMethodAndModelInfo(); + controller.set('itemType', singularize(itemType)); + controller.set('method', method); + this.pathHelp.getPaths(apiPath, method, itemType).then(paths => { + controller.set('paths', paths.navPaths.reduce((acc, cur) => acc.concat(cur.path), [])); + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/item/show.js b/ui/app/routes/vault/cluster/access/method/item/show.js new file mode 100644 index 000000000000..a80bc2e9af49 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item/show.js @@ -0,0 +1,24 @@ +import { singularize } from 'ember-inflector'; +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; + +export default Route.extend({ + pathHelp: service('path-help'), + model() { + const { item_id: itemName } = this.paramsFor(this.routeName); + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const { path: method } = this.paramsFor('vault.cluster.access.method'); + const methodModel = this.modelFor('vault.cluster.access.method'); + const { type } = methodModel; + const modelType = `generated-${singularize(itemType)}-${type}`; + return this.store.findRecord(modelType, itemName, { + adapterOptions: { path: `${method}/${itemType}` }, + }); + }, + + setupController(controller) { + this._super(...arguments); + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + controller.set('itemType', singularize(itemType)); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/section.js b/ui/app/routes/vault/cluster/access/method/section.js index 52053fe39b05..ee58779d6ecc 100644 --- a/ui/app/routes/vault/cluster/access/method/section.js +++ b/ui/app/routes/vault/cluster/access/method/section.js @@ -1,3 +1,4 @@ +/* eslint-disable prettier/prettier */ import { set } from '@ember/object'; import { inject as service } from '@ember/service'; import Route from '@ember/routing/route'; @@ -5,6 +6,7 @@ import DS from 'ember-data'; export default Route.extend({ wizard: service(), + model(params) { const { section_name: section } = params; if (section !== 'configuration') { @@ -13,11 +15,7 @@ export default Route.extend({ throw error; } let backend = this.modelFor('vault.cluster.access.method'); - this.get('wizard').transitionFeatureMachine( - this.get('wizard.featureState'), - 'DETAILS', - backend.get('type') - ); + this.wizard.transitionFeatureMachine(this.wizard.featureState, 'DETAILS', backend.type); return backend; }, @@ -25,5 +23,8 @@ export default Route.extend({ const { section_name: section } = this.paramsFor(this.routeName); this._super(...arguments); controller.set('section', section); + let method = this.modelFor('vault.cluster.access.method'); + let paths = method.paths.navPaths.map(pathInfo => pathInfo.path); + controller.set('paths', paths); }, }); diff --git a/ui/app/routes/vault/cluster/policies/index.js b/ui/app/routes/vault/cluster/policies/index.js index a1aac6911f37..93214b9875ae 100644 --- a/ui/app/routes/vault/cluster/policies/index.js +++ b/ui/app/routes/vault/cluster/policies/index.js @@ -1,7 +1,7 @@ import { inject as service } from '@ember/service'; import Route from '@ember/routing/route'; import ClusterRoute from 'vault/mixins/cluster-route'; -import ListRoute from 'vault/mixins/list-route'; +import ListRoute from 'core/mixins/list-route'; export default Route.extend(ClusterRoute, ListRoute, { version: service(), diff --git a/ui/app/routes/vault/cluster/replication.js b/ui/app/routes/vault/cluster/replication.js deleted file mode 100644 index 900927c3ad86..000000000000 --- a/ui/app/routes/vault/cluster/replication.js +++ /dev/null @@ -1,38 +0,0 @@ -import { inject as service } from '@ember/service'; -import { setProperties } from '@ember/object'; -import { hash } from 'rsvp'; -import Route from '@ember/routing/route'; -import ClusterRoute from 'vault/mixins/cluster-route'; - -export default Route.extend(ClusterRoute, { - version: service(), - - beforeModel() { - return this.get('version') - .fetchFeatures() - .then(() => { - return this._super(...arguments); - }); - }, - - model() { - return this.modelFor('vault.cluster'); - }, - - afterModel(model) { - return hash({ - canEnablePrimary: this.store - .findRecord('capabilities', 'sys/replication/primary/enable') - .then(c => c.get('canUpdate')), - canEnableSecondary: this.store - .findRecord('capabilities', 'sys/replication/secondary/enable') - .then(c => c.get('canUpdate')), - }).then(({ canEnablePrimary, canEnableSecondary }) => { - setProperties(model, { - canEnablePrimary, - canEnableSecondary, - }); - return model; - }); - }, -}); diff --git a/ui/app/routes/vault/cluster/replication/index.js b/ui/app/routes/vault/cluster/replication/index.js deleted file mode 100644 index 5aabd99f7b36..000000000000 --- a/ui/app/routes/vault/cluster/replication/index.js +++ /dev/null @@ -1,12 +0,0 @@ -import { inject as service } from '@ember/service'; -import Route from '@ember/routing/route'; - -export default Route.extend({ - replicationMode: service(), - beforeModel() { - this.get('replicationMode').setMode(null); - }, - model() { - return this.modelFor('vault.cluster.replication'); - }, -}); diff --git a/ui/app/routes/vault/cluster/replication/mode.js b/ui/app/routes/vault/cluster/replication/mode.js deleted file mode 100644 index 1494760c1f95..000000000000 --- a/ui/app/routes/vault/cluster/replication/mode.js +++ /dev/null @@ -1,27 +0,0 @@ -import { on } from '@ember/object/evented'; -import { inject as service } from '@ember/service'; -import Route from '@ember/routing/route'; - -const SUPPORTED_REPLICATION_MODES = ['dr', 'performance']; - -export default Route.extend({ - replicationMode: service(), - - beforeModel() { - const replicationMode = this.paramsFor(this.routeName).replication_mode; - if (!SUPPORTED_REPLICATION_MODES.includes(replicationMode)) { - return this.transitionTo('vault.cluster.replication'); - } else { - return this._super(...arguments); - } - }, - - model() { - return this.modelFor('vault.cluster.replication'); - }, - - setReplicationMode: on('activate', 'enter', function() { - const replicationMode = this.paramsFor(this.routeName).replication_mode; - this.get('replicationMode').setMode(replicationMode); - }), -}); diff --git a/ui/app/routes/vault/cluster/replication/mode/index.js b/ui/app/routes/vault/cluster/replication/mode/index.js deleted file mode 100644 index b2663787983c..000000000000 --- a/ui/app/routes/vault/cluster/replication/mode/index.js +++ /dev/null @@ -1,13 +0,0 @@ -import { inject as service } from '@ember/service'; -import Route from '@ember/routing/route'; - -export default Route.extend({ - replicationMode: service(), - beforeModel() { - const replicationMode = this.paramsFor('vault.cluster.replication.mode').replication_mode; - this.get('replicationMode').setMode(replicationMode); - }, - model() { - return this.modelFor('vault.cluster.replication.mode'); - }, -}); diff --git a/ui/app/routes/vault/cluster/replication/mode/manage.js b/ui/app/routes/vault/cluster/replication/mode/manage.js deleted file mode 100644 index 9a30216d34b0..000000000000 --- a/ui/app/routes/vault/cluster/replication/mode/manage.js +++ /dev/null @@ -1,47 +0,0 @@ -import { camelize } from '@ember/string'; -import { all } from 'rsvp'; -import { inject as service } from '@ember/service'; -import Route from '@ember/routing/route'; -import { replicationActionForMode } from 'vault/helpers/replication-action-for-mode'; - -const pathForAction = (action, replicationMode, clusterMode) => { - let path; - if (action === 'reindex' || action === 'recover') { - path = `sys/replication/${action}`; - } else { - path = `sys/replication/${replicationMode}/${clusterMode}/${action}`; - } - return path; -}; - -export default Route.extend({ - store: service(), - model() { - const store = this.get('store'); - const model = this.modelFor('vault.cluster.replication.mode'); - - const replicationMode = this.paramsFor('vault.cluster.replication.mode').replication_mode; - const clusterMode = model.get(replicationMode).get('modeForUrl'); - const actions = replicationActionForMode([replicationMode, clusterMode]); - return all( - actions.map(action => { - return store.findRecord('capabilities', pathForAction(action)).then(capability => { - model.set(`can${camelize(action)}`, capability.get('canUpdate')); - }); - }) - ).then(() => { - return model; - }); - }, - - beforeModel() { - const model = this.modelFor('vault.cluster.replication.mode'); - const replicationMode = this.paramsFor('vault.cluster.replication.mode').replication_mode; - if ( - model.get(replicationMode).get('replicationDisabled') || - model.get(replicationMode).get('replicationUnsupported') - ) { - return this.transitionTo('vault.cluster.replication.mode', replicationMode); - } - }, -}); diff --git a/ui/app/routes/vault/cluster/replication/mode/secondaries.js b/ui/app/routes/vault/cluster/replication/mode/secondaries.js deleted file mode 100644 index d952f773b3c6..000000000000 --- a/ui/app/routes/vault/cluster/replication/mode/secondaries.js +++ /dev/null @@ -1,35 +0,0 @@ -import { setProperties } from '@ember/object'; -import { hash } from 'rsvp'; -import Route from '@ember/routing/route'; - -export default Route.extend({ - model() { - const replicationMode = this.paramsFor('vault.cluster.replication.mode').replication_mode; - - return hash({ - cluster: this.modelFor('vault.cluster.replication.mode'), - canAddSecondary: this.store - .findRecord('capabilities', `sys/replication/${replicationMode}/primary/secondary-token`) - .then(c => c.get('canUpdate')), - canRevokeSecondary: this.store - .findRecord('capabilities', `sys/replication/${replicationMode}/primary/revoke-secondary`) - .then(c => c.get('canUpdate')), - }).then(({ cluster, canAddSecondary, canRevokeSecondary }) => { - setProperties(cluster, { - canRevokeSecondary, - canAddSecondary, - }); - return cluster; - }); - }, - afterModel(model) { - const replicationMode = this.paramsFor('vault.cluster.replication.mode').replication_mode; - if ( - !model.get(`${replicationMode}.isPrimary`) || - model.get(`${replicationMode}.replicationDisabled`) || - model.get(`${replicationMode}.replicationUnsupported`) - ) { - return this.transitionTo('vault.cluster.replication.mode', replicationMode); - } - }, -}); diff --git a/ui/app/routes/vault/cluster/replication/mode/secondaries/add.js b/ui/app/routes/vault/cluster/replication/mode/secondaries/add.js deleted file mode 100644 index 04843f225be1..000000000000 --- a/ui/app/routes/vault/cluster/replication/mode/secondaries/add.js +++ /dev/null @@ -1,27 +0,0 @@ -import { hash } from 'rsvp'; -import Base from '../../replication-base'; - -export default Base.extend({ - model() { - return hash({ - cluster: this.modelFor('vault.cluster.replication.mode.secondaries'), - mounts: this.fetchMounts(), - }); - }, - - redirect(model) { - const replicationMode = this.get('replicationMode'); - if (!model.cluster.get(`${replicationMode}.isPrimary`) || !model.cluster.get('canAddSecondary')) { - return this.transitionTo('vault.cluster.replication.mode', model.cluster.get('name'), replicationMode); - } - }, - - setupController(controller, model) { - controller.set('model', model.cluster); - controller.set('mounts', model.mounts); - }, - - resetController(controller) { - controller.reset(); - }, -}); diff --git a/ui/app/routes/vault/cluster/replication/mode/secondaries/config-edit.js b/ui/app/routes/vault/cluster/replication/mode/secondaries/config-edit.js deleted file mode 100644 index f2fd220c126f..000000000000 --- a/ui/app/routes/vault/cluster/replication/mode/secondaries/config-edit.js +++ /dev/null @@ -1,27 +0,0 @@ -import { hash } from 'rsvp'; -import Base from '../../replication-base'; - -export default Base.extend({ - modelPath: 'model.config', - - model(params) { - return hash({ - cluster: this.modelFor('vault.cluster.replication.mode.secondaries'), - config: this.store.findRecord('mount-filter-config', params.secondary_id), - mounts: this.fetchMounts(), - }); - }, - - redirect(model) { - const cluster = model.cluster; - const replicationMode = this.get('replicationMode'); - if ( - !this.get('version.hasPerfReplication') || - replicationMode !== 'performance' || - !cluster.get(`${replicationMode}.isPrimary`) || - !cluster.get('canAddSecondary') - ) { - return this.transitionTo('vault.cluster.replication.mode', cluster.get('name'), replicationMode); - } - }, -}); diff --git a/ui/app/routes/vault/cluster/replication/mode/secondaries/config-show.js b/ui/app/routes/vault/cluster/replication/mode/secondaries/config-show.js deleted file mode 100644 index de7b644d3fbd..000000000000 --- a/ui/app/routes/vault/cluster/replication/mode/secondaries/config-show.js +++ /dev/null @@ -1,32 +0,0 @@ -import { hash, resolve } from 'rsvp'; -import Base from '../../replication-base'; - -export default Base.extend({ - modelPath: 'model.config', - - model(params) { - const id = params.secondary_id; - return hash({ - cluster: this.modelFor('vault.cluster.replication'), - config: this.store.findRecord('mount-filter-config', id).catch(e => { - if (e.httpStatus === 404) { - // return an empty obj to let them nav to create - return resolve({ id }); - } else { - throw e; - } - }), - }); - }, - redirect(model) { - const cluster = model.cluster; - const replicationMode = this.paramsFor('vault.cluster.replication.mode').replication_mode; - if ( - !this.get('version.hasPerfReplication') || - replicationMode !== 'performance' || - !cluster.get(`${replicationMode}.isPrimary`) - ) { - return this.transitionTo('vault.cluster.replication.mode', cluster.get('name'), replicationMode); - } - }, -}); diff --git a/ui/app/routes/vault/cluster/replication/mode/secondaries/revoke.js b/ui/app/routes/vault/cluster/replication/mode/secondaries/revoke.js deleted file mode 100644 index 041a5913f2a3..000000000000 --- a/ui/app/routes/vault/cluster/replication/mode/secondaries/revoke.js +++ /dev/null @@ -1,18 +0,0 @@ -import Base from '../../replication-base'; - -export default Base.extend({ - model() { - return this.modelFor('vault.cluster.replication.mode.secondaries'); - }, - - redirect(model) { - const replicationMode = this.get('replicationMode'); - if (!model.get(`${replicationMode}.isPrimary`) || !model.get('canRevokeSecondary')) { - return this.transitionTo('vault.cluster.replication', model.get('name')); - } - }, - - resetController(controller) { - controller.reset(); - }, -}); diff --git a/ui/app/routes/vault/cluster/requests.js b/ui/app/routes/vault/cluster/requests.js new file mode 100644 index 000000000000..0fb4b7e58244 --- /dev/null +++ b/ui/app/routes/vault/cluster/requests.js @@ -0,0 +1,7 @@ +import ClusterRouteBase from './cluster-route-base'; + +export default ClusterRouteBase.extend({ + model() { + return this.store.queryRecord('requests', {}); + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend.js b/ui/app/routes/vault/cluster/secrets/backend.js index 9003842e013f..1dafeda49a2a 100644 --- a/ui/app/routes/vault/cluster/secrets/backend.js +++ b/ui/app/routes/vault/cluster/secrets/backend.js @@ -2,9 +2,11 @@ import { inject as service } from '@ember/service'; import Route from '@ember/routing/route'; export default Route.extend({ flashMessages: service(), + secretMountPath: service(), oldModel: null, model(params) { let { backend } = params; + this.secretMountPath.update(backend); return this.store .query('secret-engine', { path: backend, diff --git a/ui/app/routes/vault/cluster/secrets/backend/credentials.js b/ui/app/routes/vault/cluster/secrets/backend/credentials.js index 73e4df6100e3..1b8d4efafb93 100644 --- a/ui/app/routes/vault/cluster/secrets/backend/credentials.js +++ b/ui/app/routes/vault/cluster/secrets/backend/credentials.js @@ -1,6 +1,5 @@ import { resolve } from 'rsvp'; import Route from '@ember/routing/route'; -import { getOwner } from '@ember/application'; import { inject as service } from '@ember/service'; const SUPPORTED_DYNAMIC_BACKENDS = ['ssh', 'aws', 'pki']; @@ -19,8 +18,7 @@ export default Route.extend({ return; } let modelType = 'ssh-otp-credential'; - let owner = getOwner(this); - return this.pathHelp.getNewModel(modelType, owner, backend); + return this.pathHelp.getNewModel(modelType, backend); }, model(params) { diff --git a/ui/app/routes/vault/cluster/secrets/backend/list.js b/ui/app/routes/vault/cluster/secrets/backend/list.js index c0d0025885e2..7a671ff5aa7e 100644 --- a/ui/app/routes/vault/cluster/secrets/backend/list.js +++ b/ui/app/routes/vault/cluster/secrets/backend/list.js @@ -1,7 +1,6 @@ import { set } from '@ember/object'; import { hash, all } from 'rsvp'; import Route from '@ember/routing/route'; -import { getOwner } from '@ember/application'; import { supportedSecretBackends } from 'vault/helpers/supported-secret-backends'; import { inject as service } from '@ember/service'; import { normalizePath } from 'vault/utils/path-encoding-helpers'; @@ -34,7 +33,6 @@ export default Route.extend({ }, beforeModel() { - let owner = getOwner(this); let secret = this.secretParam(); let backend = this.enginePathParam(); let { tab } = this.paramsFor('vault.cluster.secrets.backend'); @@ -47,7 +45,7 @@ export default Route.extend({ return this.replaceWith('vault.cluster.secrets.backend.list', secret + '/'); } let modelType = this.getModelType(backend, tab); - return this.pathHelp.getNewModel(modelType, owner, backend).then(() => { + return this.pathHelp.getNewModel(modelType, backend).then(() => { this.store.unloadAll('capabilities'); }); }, diff --git a/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js b/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js index d7b539267d66..4e3599ca527a 100644 --- a/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js +++ b/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js @@ -4,7 +4,6 @@ import { inject as service } from '@ember/service'; import DS from 'ember-data'; import Route from '@ember/routing/route'; import utils from 'vault/lib/key-utils'; -import { getOwner } from '@ember/application'; import UnloadModelRoute from 'vault/mixins/unload-model-route'; import { encodePath, normalizePath } from 'vault/utils/path-encoding-helpers'; @@ -63,8 +62,7 @@ export default Route.extend(UnloadModelRoute, { if (['secret', 'secret-v2'].includes(modelType)) { return resolve(); } - let owner = getOwner(this); - return this.pathHelp.getNewModel(modelType, owner, backend); + return this.pathHelp.getNewModel(modelType, backend); }, modelType(backend, secret) { diff --git a/ui/app/routes/vault/cluster/settings/auth/configure/index.js b/ui/app/routes/vault/cluster/settings/auth/configure/index.js index e8db754ab5dc..2388eb55cf40 100644 --- a/ui/app/routes/vault/cluster/settings/auth/configure/index.js +++ b/ui/app/routes/vault/cluster/settings/auth/configure/index.js @@ -4,8 +4,8 @@ import { tabsForAuthSection } from 'vault/helpers/tabs-for-auth-section'; export default Route.extend({ beforeModel() { - const type = this.modelFor('vault.cluster.settings.auth.configure').get('type'); - const section = get(tabsForAuthSection([type]), 'firstObject.routeParams.lastObject'); + const model = this.modelFor('vault.cluster.settings.auth.configure'); + const section = get(tabsForAuthSection([model]), 'firstObject.routeParams.lastObject'); return this.transitionTo('vault.cluster.settings.auth.configure.section', section); }, }); diff --git a/ui/app/routes/vault/cluster/settings/auth/configure/section.js b/ui/app/routes/vault/cluster/settings/auth/configure/section.js index ea08cdd526ea..c80082884c32 100644 --- a/ui/app/routes/vault/cluster/settings/auth/configure/section.js +++ b/ui/app/routes/vault/cluster/settings/auth/configure/section.js @@ -4,7 +4,6 @@ import Route from '@ember/routing/route'; import RSVP from 'rsvp'; import DS from 'ember-data'; import UnloadModelRoute from 'vault/mixins/unload-model-route'; -import { getOwner } from '@ember/application'; export default Route.extend(UnloadModelRoute, { modelPath: 'model.model', @@ -36,8 +35,7 @@ export default Route.extend(UnloadModelRoute, { const { method } = this.paramsFor('vault.cluster.settings.auth.configure'); const backend = this.modelFor('vault.cluster.settings.auth.configure'); const modelType = this.modelType(backend.type, section_name); - let owner = getOwner(this); - return this.pathHelp.getNewModel(modelType, owner, method); + return this.pathHelp.getNewModel(modelType, method, backend.apiPath); }, model(params) { diff --git a/ui/app/serializers/application.js b/ui/app/serializers/application.js index d19241e28f5d..517bb377483c 100644 --- a/ui/app/serializers/application.js +++ b/ui/app/serializers/application.js @@ -15,7 +15,13 @@ export default DS.JSONSerializer.extend({ return key; } let pk = this.get('primaryKey') || 'id'; - return { [pk]: key }; + let model = { [pk]: key }; + // if we've added _requestQuery in the adapter, we want + // attach it to the individual models + if (payload._requestQuery) { + model = { ...model, ...payload._requestQuery }; + } + return model; }); return models; } @@ -37,10 +43,15 @@ export default DS.JSONSerializer.extend({ normalizeResponse(store, primaryModelClass, payload, id, requestType) { const responseJSON = this.normalizeItems(payload, requestType); + delete payload._requestQuery; if (id && !responseJSON.id) { responseJSON.id = id; } - return this._super(store, primaryModelClass, responseJSON, id, requestType); + let jsonAPIRepresentation = this._super(store, primaryModelClass, responseJSON, id, requestType); + if (primaryModelClass.relatedCapabilities) { + jsonAPIRepresentation = primaryModelClass.relatedCapabilities(jsonAPIRepresentation); + } + return jsonAPIRepresentation; }, serializeAttribute(snapshot, json, key, attributes) { diff --git a/ui/app/serializers/transit-key.js b/ui/app/serializers/transit-key.js index 684f78906c2e..bf455162e526 100644 --- a/ui/app/serializers/transit-key.js +++ b/ui/app/serializers/transit-key.js @@ -16,6 +16,12 @@ export default DS.RESTSerializer.extend({ } assign(payload, payload.data); delete payload.data; + // timestamps for these two are in seconds... + if (payload.type === 'aes256-gcm96' || payload.type === 'chacha20-poly1305') { + for (let version in payload.keys) { + payload.keys[version] = payload.keys[version] * 1000; + } + } return [payload]; }, diff --git a/ui/app/services/auth.js b/ui/app/services/auth.js index f9704de33903..c757b0c7da6f 100644 --- a/ui/app/services/auth.js +++ b/ui/app/services/auth.js @@ -1,9 +1,10 @@ import Ember from 'ember'; -import { resolve } from 'rsvp'; +import { resolve, reject } from 'rsvp'; import { assign } from '@ember/polyfills'; -import $ from 'jquery'; import { isArray } from '@ember/array'; import { computed, get } from '@ember/object'; + +import fetch from 'fetch'; import { getOwner } from '@ember/application'; import Service, { inject as service } from '@ember/service'; import getStorage from '../lib/token-storage'; @@ -86,7 +87,18 @@ export default Service.extend({ if (namespace) { defaults.headers['X-Vault-Namespace'] = namespace; } - return $.ajax(assign(defaults, options)); + let opts = assign(defaults, options); + + return fetch(url, { + method: opts.method || 'GET', + headers: opts.headers || {}, + }).then(response => { + if (response.status >= 200 && response.status < 300) { + return resolve(response.json()); + } else { + return reject(); + } + }); }, renewCurrentToken() { diff --git a/ui/app/services/csp-event.js b/ui/app/services/csp-event.js index df1a5779d20c..21a39477792a 100644 --- a/ui/app/services/csp-event.js +++ b/ui/app/services/csp-event.js @@ -1,6 +1,5 @@ /*eslint-disable no-constant-condition*/ import { computed } from '@ember/object'; -import { filterBy } from '@ember/object/computed'; import Service from '@ember/service'; import { task, waitForEvent } from 'ember-concurrency'; @@ -9,7 +8,9 @@ export default Service.extend({ events: computed(function() { return []; }), - connectionViolations: filterBy('events', 'violatedDirective', 'connect-src'), + connectionViolations: computed('events.[].violatedDirective', function() { + return this.get('events').filter(e => e.violatedDirective.startsWith('connect-src')); + }), attach() { this.monitor.perform(); diff --git a/ui/app/services/path-help.js b/ui/app/services/path-help.js index ba90c2d63f9a..16bf86628aac 100644 --- a/ui/app/services/path-help.js +++ b/ui/app/services/path-help.js @@ -4,11 +4,17 @@ has less (or no) information about. */ import Service from '@ember/service'; - +import DS from 'ember-data'; +import { encodePath } from 'vault/utils/path-encoding-helpers'; import { getOwner } from '@ember/application'; +import { capitalize } from '@ember/string'; +import { assign } from '@ember/polyfills'; import { expandOpenApiProps, combineAttributes } from 'vault/utils/openapi-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; import { resolve } from 'rsvp'; +import { debug } from '@ember/debug'; +import generatedItemAdapter from 'vault/adapters/generated-item-list'; export function sanitizePath(path) { //remove whitespace + remove trailing and leading slashes return path.trim().replace(/^\/+|\/+$/g, ''); @@ -24,37 +30,235 @@ export default Service.extend({ }); }, + getNewModel(modelType, backend, apiPath, itemType) { + let owner = getOwner(this); + const modelName = `model:${modelType}`; + const modelFactory = owner.factoryFor(modelName); + let newModel, helpUrl; + //if we have a factory, we need to take the existing model into account + if (modelFactory) { + debug(`Model factory found for ${modelType}`); + newModel = modelFactory.class; + const modelProto = newModel.proto(); + if (newModel.merged || modelProto.useOpenAPI !== true) { + return resolve(); + } + helpUrl = modelProto.getHelpUrl(backend); + return this.registerNewModelWithProps(helpUrl, backend, newModel, modelName); + } else { + debug(`Creating new Model for ${modelType}`); + newModel = DS.Model.extend({}); + //use paths to dynamically create our openapi help url + //if we have a brand new model + return this.getPaths(apiPath, backend, itemType).then(paths => { + const adapterFactory = owner.factoryFor(`adapter:${modelType}`); + //if we have an adapter already use that, otherwise create one + if (!adapterFactory) { + debug(`Creating new adapter for ${modelType}`); + const adapter = this.getNewAdapter(backend, paths, itemType); + owner.register(`adapter:${modelType}`, adapter); + } + //if we have an item we want the create info for that itemType + let tag, path; + if (itemType) { + const createPath = paths.create.find(path => path.path.includes(itemType)); + tag = createPath.tag; //tag is for type of backend, e.g. auth or secret + path = createPath.path; + path = path.slice(0, path.indexOf('{') - 1) + '/example'; + } else { + //we need the mount config + tag = paths.configPath[0].tag; + path = paths.configPath[0].path; + } + helpUrl = `/v1/${tag}/${backend}${path}?help=true`; + return this.registerNewModelWithProps(helpUrl, backend, newModel, modelName); + }); + } + }, + + reducePaths(paths, currentPath) { + const pathName = currentPath[0]; + const pathInfo = currentPath[1]; + //config is a get/post endpoint that doesn't take route params + //and isn't also a list endpoint + if ( + pathInfo.post && + pathInfo.get && + (pathInfo['x-vault-displayAttrs'] && pathInfo['x-vault-displayAttrs'].action === 'Configure') + ) { + paths.configPath.push({ path: pathName, tag: pathInfo.get.tags[0] }); + return paths; //config path should only be config path + } + + //list endpoints all have { name: "list" } in their get parameters + if (pathInfo.get && pathInfo.get.parameters && pathInfo.get.parameters[0].name === 'list') { + paths.list.push({ path: pathName, tag: pathInfo.get.tags[0] }); + } + + if (pathInfo.delete) { + paths.delete.push({ path: pathName, tag: pathInfo.delete.tags[0] }); + } + + //create endpoints have path an action (e.g. "Create" or "Generate") + if (pathInfo.post && pathInfo['x-vault-displayAttrs'] && pathInfo['x-vault-displayAttrs'].action) { + paths.create.push({ + path: pathName, + tag: pathInfo.post.tags[0], + action: pathInfo['x-vault-displayAttrs'].action, + }); + } + + if (pathInfo['x-vault-displayAttrs'] && pathInfo['x-vault-displayAttrs'].navigation) { + paths.navPaths.push({ path: pathName }); + } + + return paths; + }, + + getPaths(apiPath, backend) { + debug(`Fetching relevant paths for ${backend} from ${apiPath}`); + return this.ajax(`/v1/${apiPath}?help=1`, backend).then(help => { + const pathInfo = help.openapi.paths; + let paths = Object.entries(pathInfo); + + return paths.reduce(this.reducePaths, { + apiPath: [], + configPath: [], + list: [], + create: [], + delete: [], + navPaths: [], + }); + }); + }, + //Makes a call to grab the OpenAPI document. //Returns relevant information from OpenAPI //as determined by the expandOpenApiProps util getProps(helpUrl, backend) { + debug(`Fetching schema properties for ${backend} from ${helpUrl}`); + return this.ajax(helpUrl, backend).then(help => { - let path = Object.keys(help.openapi.paths)[0]; - let props = help.openapi.paths[path].post.requestBody.content['application/json'].schema.properties; - return expandOpenApiProps(props); + //paths is an array but it will have a single entry + // for the scope we're in + const path = Object.keys(help.openapi.paths)[0]; + const pathInfo = help.openapi.paths[path]; + const params = pathInfo.parameters; + let paramProp = {}; + + //include url params + if (params) { + const { name, schema, description } = params[0]; + let label = capitalize(name); + if (label.toLowerCase() !== 'name') { + label += ' name'; + } + paramProp[name] = { + 'x-vault-displayAttrs': { + name: name, + group: 'default', + }, + label: label, + type: schema.type, + description: description, + isId: true, + }; + } + + //TODO: handle post endpoints without requestBody + const props = pathInfo.post.requestBody.content['application/json'].schema.properties; + //put url params (e.g. {name}, {role}) + //at the front of the props list + const newProps = assign({}, paramProp, props); + return expandOpenApiProps(newProps); }); }, - getNewModel(modelType, owner, backend) { - let name = `model:${modelType}`; - let newModel = owner.factoryFor(name).class; - let modelProto = newModel.proto(); - if (newModel.merged || modelProto.useOpenAPI !== true) { - return resolve(); - } - let helpUrl = modelProto.getHelpUrl(backend); + getNewAdapter(backend, paths, itemType) { + //we need list and create paths to set the correct urls for actions + const { list, create } = paths; + const createPath = create.find(path => path.path.includes(itemType)); + const listPath = list.find(pathInfo => pathInfo.path.includes(itemType)); + const deletePath = paths.delete.find(path => path.path.includes(itemType)); + return generatedItemAdapter.extend({ + urlForItem(method, id) { + let { tag, path } = listPath; + let url = `${this.buildURL()}/${tag}/${backend}${path}/`; + if (id) { + url = url + encodePath(id); + } + return url; + }, + + urlForFindRecord(id, modelName, snapshot) { + return this.urlForItem(modelName, id, snapshot); + }, + + urlForUpdateRecord(id) { + let { tag, path } = createPath; + path = path.slice(0, path.indexOf('{') - 1); + return `${this.buildURL()}/${tag}/${backend}${path}/${id}`; + }, + urlForCreateRecord(modelType, snapshot) { + const { id } = snapshot; + let { tag, path } = createPath; + path = path.slice(0, path.indexOf('{') - 1); + return `${this.buildURL()}/${tag}/${backend}${path}/${id}`; + }, + + urlForDeleteRecord(id) { + let { tag, path } = deletePath; + path = path.slice(0, path.indexOf('{') - 1); + return `${this.buildURL()}/${tag}/${backend}${path}/${id}`; + }, + }); + }, + + registerNewModelWithProps(helpUrl, backend, newModel, modelName) { return this.getProps(helpUrl, backend).then(props => { - if (owner.hasRegistration(name) && !newModel.merged) { - let { attrs, newFields } = combineAttributes(newModel.attributes, props); - newModel = newModel.extend(attrs, { newFields }); - } else { - //generate a whole new model + const { attrs, newFields } = combineAttributes(newModel.attributes, props); + let owner = getOwner(this); + newModel = newModel.extend(attrs, { newFields }); + //if our newModel doesn't have fieldGroups already + //we need to create them + try { + let fieldGroups = newModel.proto().fieldGroups; + if (!fieldGroups) { + debug(`Constructing fieldGroups for ${backend}`); + fieldGroups = this.getFieldGroups(newModel); + newModel = newModel.extend({ fieldGroups }); + } + } catch (err) { + //eat the error, fieldGroups is computed in the model definition } - newModel.reopenClass({ merged: true }); - owner.unregister(name); - owner.register(name, newModel); + owner.unregister(modelName); + owner.register(modelName, newModel); }); }, + getFieldGroups(newModel) { + let groups = { + default: [], + }; + let fieldGroups = []; + newModel.attributes.forEach(attr => { + //if the attr comes in with a fieldGroup from OpenAPI, + //add it to that group + if (attr.options.fieldGroup) { + if (groups[attr.options.fieldGroup]) { + groups[attr.options.fieldGroup].push(attr.name); + } else { + groups[attr.options.fieldGroup] = [attr.name]; + } + } else { + //otherwise just add that attr to the default group + groups.default.push(attr.name); + } + }); + for (let group in groups) { + fieldGroups.push({ [group]: groups[group] }); + } + return fieldToAttrs(newModel, fieldGroups); + }, }); diff --git a/ui/app/services/permissions.js b/ui/app/services/permissions.js index 5e3de8b7df95..929cc4d8c184 100644 --- a/ui/app/services/permissions.js +++ b/ui/app/services/permissions.js @@ -28,6 +28,9 @@ const API_PATHS = { license: 'sys/license', seal: 'sys/seal', }, + metrics: { + requests: 'sys/internal/counters/requests', + }, }; const API_PATHS_TO_ROUTE_PARAMS = { diff --git a/ui/app/services/secret-mount-path.js b/ui/app/services/secret-mount-path.js new file mode 100644 index 000000000000..4d6caa83096b --- /dev/null +++ b/ui/app/services/secret-mount-path.js @@ -0,0 +1,14 @@ +import Service from '@ember/service'; + +// this service tracks the path of the currently viewed secret mount +// so that we can access that inside of engines where parent route params +// are not accessible +export default Service.extend({ + currentPath: null, + update(path) { + this.set('currentPath', path); + }, + get() { + return this.currentPath; + }, +}); diff --git a/ui/app/styles/components/auth-form.scss b/ui/app/styles/components/auth-form.scss index 4bdc2ab92c6d..cd7752493d7c 100644 --- a/ui/app/styles/components/auth-form.scss +++ b/ui/app/styles/components/auth-form.scss @@ -18,3 +18,19 @@ justify-content: center; align-items: center; } + +.toolbar-namespace-picker { + padding: 0 $spacing-s; + + .field { + width: 100%; + } + + .field-label { + margin-right: $spacing-s; + } + + .is-label { + color: $grey; + } +} diff --git a/ui/app/styles/components/confirm.scss b/ui/app/styles/components/confirm.scss index 105dc5d11524..f85f7f87677f 100644 --- a/ui/app/styles/components/confirm.scss +++ b/ui/app/styles/components/confirm.scss @@ -21,3 +21,41 @@ text-align: left; } } + +.popup-menu-content .confirm-action-message { + margin: 0; + + .message { + border: 0; + font-size: $size-8; + line-height: 1.33; + margin: 0; + } + + .message-title { + font-size: 1rem; + } + + .hs-icon { + color: $yellow; + } + + p { + font-weight: $font-weight-semibold; + margin-left: $spacing-l; + padding-left: $spacing-xxs; + padding-top: 0; + } + + .confirm-action-options { + border-top: $light-border; + display: flex; + padding: $spacing-xxs; + + .link { + flex: 1; + text-align: center; + width: auto; + } + } +} diff --git a/ui/app/styles/components/console-ui-panel.scss b/ui/app/styles/components/console-ui-panel.scss index 26523f76c115..170af92dec9b 100644 --- a/ui/app/styles/components/console-ui-panel.scss +++ b/ui/app/styles/components/console-ui-panel.scss @@ -106,7 +106,7 @@ margin-left: calc(#{$console-spacing} - 0.33rem); position: relative; - .icon { + .hs-icon { position: absolute; left: 0; top: 0; diff --git a/ui/app/styles/components/empty-state.scss b/ui/app/styles/components/empty-state.scss index 2cb489aae09f..b023403cfadb 100644 --- a/ui/app/styles/components/empty-state.scss +++ b/ui/app/styles/components/empty-state.scss @@ -2,7 +2,7 @@ align-items: center; color: $grey; display: flex; - background: $ui-gray-050; + background: $ui-gray-010; justify-content: center; padding: $spacing-xxl $spacing-s; box-shadow: 0 -2px 0 -1px $ui-gray-300; diff --git a/ui/app/styles/components/env-banner.scss b/ui/app/styles/components/env-banner.scss index d2851dcf6965..607abd2447fb 100644 --- a/ui/app/styles/components/env-banner.scss +++ b/ui/app/styles/components/env-banner.scss @@ -1,10 +1,24 @@ .env-banner { - &, - &:not(:last-child):not(:last-child) { + align-self: center; + border-radius: 3rem; + background: linear-gradient(135deg, $blue, $purple); + animation: env-banner-color-rotate 8s infinite linear alternate; + color: $white; + margin-top: -20px; + + .hs-icon { margin: 0; } - .level-item { - padding: $size-10 $size-8; + .notification { + background-color: transparent; + line-height: 1.66; + padding: 0 $spacing-s; + } +} + +@keyframes env-banner-color-rotate { + 100% { + filter: hue-rotate(105deg); } } diff --git a/ui/app/styles/components/hs-icon.scss b/ui/app/styles/components/hs-icon.scss new file mode 100644 index 000000000000..904fd9d2be64 --- /dev/null +++ b/ui/app/styles/components/hs-icon.scss @@ -0,0 +1,41 @@ +.hs-icon { + flex: 0 0 auto; + display: inline-flex; + justify-content: center; + align-items: flex-start; + vertical-align: middle; + width: 16px; + height: 16px; + margin: 2px 4px; +} + +.hs-icon svg { + fill: currentColor; + flex: 1 1 0; +} + +.hs-icon-button-right { + margin-left: 0.25rem; + margin-right: -0.5rem; + align-items: center; +} + +.hs-icon-s { + width: 12px; + height: 12px; +} + +.hs-icon-l { + width: 20px; + height: 20px; +} + +.hs-icon-xl { + width: 28px; + height: 28px; +} + +.hs-icon-xxl { + width: 32px; + height: 32px; +} diff --git a/ui/app/styles/components/http-requests-bar-chart.scss b/ui/app/styles/components/http-requests-bar-chart.scss new file mode 100644 index 000000000000..62570d1f9b7a --- /dev/null +++ b/ui/app/styles/components/http-requests-bar-chart.scss @@ -0,0 +1,67 @@ +.http-requests-bar-chart-container { + margin-top: $spacing-s; + margin-bottom: $spacing-m; + display: flex; +} + +.http-requests-bar-chart { + margin: auto; + overflow: inherit; + + .tick { + line { + stroke: $light-grey; + } + + text { + fill: $grey; + font-size: $size-8; + } + } + + .gridlines { + .domain { + stroke: unset; + } + + line { + stroke-dasharray: 5 5; + } + } + + .x-axis, + .y-axis { + .domain, + line { + stroke: $grey-light; + } + } +} + +.d3-tooltip { + line-height: 1.25; + padding: $spacing-s; + background: $grey; + color: $ui-gray-010; + border-radius: 2px; + pointer-events: none !important; +} + +/* Creates a small triangle extender for the tooltip */ +.d3-tooltip::after { + box-sizing: border-box; + display: inline; + font-size: 10px; + width: 100%; + color: $grey; + content: '\25BC'; + position: absolute; + text-align: center; +} + +/* Style northward tooltips differently */ +.d3-tooltip.n::after { + margin-top: -4px; + top: 100%; + left: 0; +} diff --git a/ui/app/styles/components/http-requests-table.scss b/ui/app/styles/components/http-requests-table.scss new file mode 100644 index 000000000000..76e87f593184 --- /dev/null +++ b/ui/app/styles/components/http-requests-table.scss @@ -0,0 +1,41 @@ +.http-requests-table { + & .is-collapsed { + visibility: collapse; + } + + & th, + td { + padding: $spacing-s; + } + + & th { + color: $grey-dark; + font-weight: 500; + font-size: $size-8; + } + + & tbody th { + font-size: $size-7; + } + + & tr { + border-bottom: 1px solid $grey-light; + } + + & td { + color: $grey-darkest; + } + + & .percent-change { + font-weight: 500; + font-size: $size-7; + } + + & .arrow-up { + transform: rotate(45deg); + } + + & .arrow-down { + transform: rotate(-45deg); + } +} diff --git a/ui/app/styles/components/info-table-row.scss b/ui/app/styles/components/info-table-row.scss index 75e1e822779e..bca18fd24b7c 100644 --- a/ui/app/styles/components/info-table-row.scss +++ b/ui/app/styles/components/info-table-row.scss @@ -9,7 +9,7 @@ &.thead { box-shadow: 0 1px 0 $grey-light, 0 -1px 0 $grey-light; margin: 0; - padding: 0 $size-6; + padding: 0 $size-6 0 0; .column { padding: 0.5rem 0.75rem; @@ -27,9 +27,15 @@ } } - .icon { + .hs-icon { margin-right: 0.25rem; } + .icon-true { + color: $green-500; + } + .icon-false { + color: $ui-gray-300; + } } .info-table-row:not(.is-mobile) .column { diff --git a/ui/app/styles/components/navigate-input.scss b/ui/app/styles/components/navigate-input.scss new file mode 100644 index 000000000000..471e851770ac --- /dev/null +++ b/ui/app/styles/components/navigate-input.scss @@ -0,0 +1,5 @@ +.search-icon { + position: absolute; + top: 6px; + left: 2px; +} diff --git a/ui/app/styles/components/popup-menu.scss b/ui/app/styles/components/popup-menu.scss index b7c5c1191abd..8f624e2483aa 100644 --- a/ui/app/styles/components/popup-menu.scss +++ b/ui/app/styles/components/popup-menu.scss @@ -112,10 +112,16 @@ .popup-menu-content .level-left { flex-shrink: 1; } -.popup-menu-trigger { - height: 2rem; - min-width: 0; - padding: 0 $size-10; + +.list-item-row, +.info-table-row, +.wizard-dismiss-menu { + .popup-menu-trigger { + height: 2.5rem; + min-width: 0; + padding: 0; + width: 2.5rem; + } } .status-menu-content { diff --git a/ui/app/styles/components/search-select.scss b/ui/app/styles/components/search-select.scss index fe117126d91d..cd764ae96bec 100644 --- a/ui/app/styles/components/search-select.scss +++ b/ui/app/styles/components/search-select.scss @@ -122,3 +122,9 @@ animation: drop-fade-above 0.15s; } } + +.search-select .search-icon { + position: absolute; + width: 20px; + top: 5px; +} diff --git a/ui/app/styles/components/secret-control-bar.scss b/ui/app/styles/components/secret-control-bar.scss deleted file mode 100644 index 739f139ba768..000000000000 --- a/ui/app/styles/components/secret-control-bar.scss +++ /dev/null @@ -1,24 +0,0 @@ -.secret-control-bar { - margin: 0; - padding: $size-10 $size-9; - background: $grey-lighter; - box-shadow: 0 1px 0 $grey-light, 0 -1px 0 $grey-light; - display: flex; - justify-content: flex-end; - .control { - flex: 0 1 auto; - padding: 0 $size-10; - font-size: $size-8; - height: 1.8rem; - line-height: 1.8rem; - display: flex; - flex-direction: column; - justify-content: center; - .switch[type='checkbox'].is-small + label::before { - top: 0.5rem; - } - .switch[type='checkbox'].is-small + label::after { - top: 0.6rem; - } - } -} diff --git a/ui/app/styles/components/tabs.scss b/ui/app/styles/components/tabs.scss index ad96326996d4..fc7851fd35fa 100644 --- a/ui/app/styles/components/tabs.scss +++ b/ui/app/styles/components/tabs.scss @@ -1,3 +1,7 @@ +.page-header + .tabs-container { + box-shadow: none; +} + .tabs { box-shadow: inset 0 -1px 0 $grey-light; @@ -15,25 +19,25 @@ border-color: $blue; color: $blue; } - &:first-child a, - &:first-child .tab { - margin-left: $size-5; - } } a, .tab { - color: $grey-dark; + color: $grey; font-weight: $font-weight-semibold; text-decoration: none; padding: $size-6 $size-8 $size-8; border-bottom: 2px solid transparent; - transition: border-color $speed; + transition: background-color $speed, border-color $speed; &:hover, &:active { border-color: $grey-light; } + + &:hover { + background-color: $ui-gray-050; + } } .ember-basic-dropdown-trigger { diff --git a/ui/app/styles/components/tool-tip.scss b/ui/app/styles/components/tool-tip.scss index 13591c3970f6..36ff3991812d 100644 --- a/ui/app/styles/components/tool-tip.scss +++ b/ui/app/styles/components/tool-tip.scss @@ -50,18 +50,6 @@ .ember-basic-dropdown-content--above.tool-tip { margin-top: -2px; } -.tool-tip-trigger { - border: none; - border-radius: 20px; - height: 18px; - width: 18px; - outline: none; - box-shadow: none; - cursor: pointer; - padding: 0; - color: $grey-dark; - margin-left: 8px; -} .b-checkbox .tool-tip-trigger { position: relative; diff --git a/ui/app/styles/components/toolbar.scss b/ui/app/styles/components/toolbar.scss new file mode 100644 index 000000000000..aa96bfc78905 --- /dev/null +++ b/ui/app/styles/components/toolbar.scss @@ -0,0 +1,108 @@ +.tabs-container + .toolbar { + border-top: 0; +} + +.toolbar { + background-color: $ui-gray-010; + border: 1px solid $ui-gray-100; + border-bottom-color: $ui-gray-300; + border-top-color: $ui-gray-300; + position: relative; + + &::after { + background: linear-gradient(to right, $ui-gray-010, rgba($ui-gray-010, 0)); + bottom: 0; + content: ''; + position: absolute; + right: 0; + top: 0; + width: $spacing-xxs; + z-index: 2; + } + + .input, + .select { + min-width: 190px; + } +} + +.toolbar-scroller { + align-items: center; + display: flex; + height: 44px; + justify-content: space-between; + overflow-x: auto; + width: 100%; + + @include from($mobile) { + padding: 0 $spacing-xxs; + } + + &::-webkit-scrollbar { + border-bottom: $base-border; + height: $spacing-xxs; + } + + &::-webkit-scrollbar-thumb { + background: $ui-gray-300; + border-radius: $spacing-xxs; + } +} + +.toolbar-filters, +.toolbar-actions { + align-items: center; + display: flex; + flex: 1; + white-space: nowrap; +} + +.toolbar-filters + .toolbar-actions { + @include until($mobile) { + border-left: $base-border; + margin-left: $spacing-xs; + padding-left: $spacing-xs; + } +} + +.toolbar-actions { + @include from($mobile) { + justify-content: flex-end; + } +} + +.toolbar-link { + @extend .button; + @extend .is-ghost; + @extend .has-icon-right; + border: 0; + color: $black; + transition: background-color $speed; + + &:hover { + background-color: $ui-gray-100; + border: 0; + color: $blue; + } + + &:active { + box-shadow: none; + } + + &.popup-menu-trigger { + height: 2.5rem; + padding: $size-10 $size-8; + } +} + +.toolbar-label { + padding: $spacing-xs; + color: $grey; +} + +.toolbar-separator { + border-right: $light-border; + height: 32px; + margin: 0 $spacing-xs; + width: 0; +} diff --git a/ui/app/styles/components/ui-wizard.scss b/ui/app/styles/components/ui-wizard.scss index dbc2a659fb87..186bfadbe820 100644 --- a/ui/app/styles/components/ui-wizard.scss +++ b/ui/app/styles/components/ui-wizard.scss @@ -70,7 +70,7 @@ .wizard-header { border-bottom: $light-border; - padding: 0 $size-4 $size-8 2rem; + padding: 0 $size-4 $size-8 0; margin: $size-4 0; position: relative; @@ -78,12 +78,6 @@ margin-top: 0; padding-top: 0; } - - .title .icon { - left: 0; - position: absolute; - top: 0; - } } .wizard-dismiss-menu { @@ -117,10 +111,6 @@ .title { color: $white; - - .icon { - top: -0.1rem; - } } .wizard-header { @@ -154,12 +144,6 @@ } } -.wizard-section .title .icon { - height: auto; - margin-right: $size-11; - width: auto; -} - .wizard-section:last-of-type { margin-bottom: $size-5; } @@ -236,6 +220,7 @@ transform: translate(-50%, -50%); width: $wizard-progress-check-size; z-index: 10; + margin: 0 !important; } .feature-progress-container .feature-check { @@ -258,10 +243,10 @@ } } -.incomplete-check svg { - fill: $ui-gray-200; +.incomplete-check { + color: $ui-gray-200; } -.completed-check svg { - fill: $green; +.completed-check { + color: $green; } diff --git a/ui/app/styles/components/upgrade-overlay.scss b/ui/app/styles/components/upgrade-overlay.scss deleted file mode 100644 index f5770064acfe..000000000000 --- a/ui/app/styles/components/upgrade-overlay.scss +++ /dev/null @@ -1,63 +0,0 @@ -.upgrade-overlay { - font-size: 1rem; - opacity: 0; - text-align: left; - transition: opacity $speed-slow; - will-change: opacity; - z-index: 300; - - &.is-animated { - opacity: 1; - } - - .modal-background { - background-image: url('/ui/vault-hex.svg'), linear-gradient(90deg, #191a1c, #1b212d); - opacity: 0.97; - } - - .modal-content { - overflow: auto; - overflow-x: hidden; - transform: translateY(20%) scale(0.9); - transition: transform $speed-slow; - will-change: transform; - } - - &.is-animated { - .modal-content { - transform: translateY(0) scale(1); - } - } - - .upgrade-overlay-title { - border-bottom: 1px solid $grey; - padding-bottom: $size-10; - - .icon { - width: 32px; - - #edition-enterprise-hexagon { - fill: $white; - } - } - } - - .columns { - margin-bottom: $size-4; - margin-top: $size-4; - } - - .column { - display: flex; - - .box { - border-radius: $radius; - box-shadow: inset 0 0 0 1px $grey; - width: 100%; - } - } - - li { - list-style: inside disc; - } -} diff --git a/ui/app/styles/components/vlt-radio.scss b/ui/app/styles/components/vlt-radio.scss new file mode 100644 index 000000000000..bda8e560d9a5 --- /dev/null +++ b/ui/app/styles/components/vlt-radio.scss @@ -0,0 +1,39 @@ +.vlt-radio { + position: relative; + input[type='radio'] { + position: absolute; + z-index: 1; + opacity: 0; + } + + input[type='radio'] + label { + content: ''; + border: 1px solid $grey-light; + border-radius: 50%; + cursor: pointer; + display: inline-block; + margin: 0.25rem 0; + height: 1rem; + width: 1rem; + flex-shrink: 0; + flex-grow: 0; + position: relative; + left: 0; + top: 0.3rem; + } + + input[type='radio']:checked + label { + content: ''; + background: $blue; + border: 1px solid $blue; + box-shadow: inset 0 0 0 0.15rem $white; + position: relative; + left: 0; + } + input[type='radio']:focus + label { + content: ''; + box-shadow: 0 0 10px 1px rgba($blue, 0.4), inset 0 0 0 0.15rem $white; + position: relative; + left: 0; + } +} diff --git a/ui/app/styles/core.scss b/ui/app/styles/core.scss index c6237839085f..3a4c90ecbb3b 100644 --- a/ui/app/styles/core.scss +++ b/ui/app/styles/core.scss @@ -55,6 +55,8 @@ @import './components/form-section'; @import './components/global-flash'; @import './components/hover-copy-button'; +@import './components/http-requests-table'; +@import './components/http-requests-bar-chart'; @import './components/init-illustration'; @import './components/info-table-row'; @import './components/input-hint'; @@ -66,20 +68,24 @@ @import './components/masked-input'; @import './components/namespace-picker'; @import './components/namespace-reminder'; +@import './components/navigate-input'; @import './components/page-header'; @import './components/popup-menu'; @import './components/radial-progress'; @import './components/role-item'; @import './components/search-select'; -@import './components/secret-control-bar'; @import './components/shamir-progress'; @import './components/sidebar'; @import './components/splash-page'; @import './components/status-menu'; @import './components/tabs'; @import './components/token-expire-warning'; +@import './components/toolbar'; @import './components/tool-tip'; @import './components/unseal-warning'; -@import './components/upgrade-overlay'; @import './components/ui-wizard'; @import './components/vault-loading'; +@import './components/vlt-radio'; + +// bulma-free-zone +@import './components/hs-icon'; diff --git a/ui/app/styles/core/buttons.scss b/ui/app/styles/core/buttons.scss index 92ad01ba42b3..8f696d408cc2 100644 --- a/ui/app/styles/core/buttons.scss +++ b/ui/app/styles/core/buttons.scss @@ -158,20 +158,17 @@ $button-box-shadow-standard: 0 3px 1px 0 rgba($black, 0.12); .has-text-info & { font-weight: $font-weight-semibold; - - .icon { - vertical-align: middle; - } } &.tool-tip-trigger { - color: $black; + color: $grey-dark; min-width: auto; + padding: 0; } &.has-icon-left, &.has-icon-right { - .icon { + .hs-icon { height: 16px; min-width: auto; width: 16px; @@ -179,7 +176,7 @@ $button-box-shadow-standard: 0 3px 1px 0 rgba($black, 0.12); } &.has-icon-left { - .icon { + .hs-icon { &, &:first-child:last-child { position: relative; @@ -189,11 +186,11 @@ $button-box-shadow-standard: 0 3px 1px 0 rgba($black, 0.12); } &.has-icon-right { - .icon { + .hs-icon { &, &:first-child:last-child { - margin-left: $size-11; - margin-right: -$size-10; + margin-left: $spacing-xxs; + margin-right: -$spacing-xxs; } } } diff --git a/ui/app/styles/core/message.scss b/ui/app/styles/core/message.scss index 7e8932b91180..f1a60248eae2 100644 --- a/ui/app/styles/core/message.scss +++ b/ui/app/styles/core/message.scss @@ -35,6 +35,9 @@ border: 0; margin-top: $spacing-xxs; } + .message-body.pre { + white-space: pre-wrap; + } p { font-size: $size-8; @@ -101,10 +104,8 @@ display: flex; margin: 0 0 $spacing-l; - .icon { - flex: 0; + .hs-icon { margin: 0 $spacing-xxs 0 0; - min-width: fit-content; } .p { diff --git a/ui/app/styles/core/navbar.scss b/ui/app/styles/core/navbar.scss index 7f6f79e86a83..a981fcf12b10 100644 --- a/ui/app/styles/core/navbar.scss +++ b/ui/app/styles/core/navbar.scss @@ -159,7 +159,6 @@ font-size: 1rem; height: auto; justify-content: flex-start; - padding: 0 $spacing-xxs; text-align: left; width: 100%; diff --git a/ui/app/styles/core/tables.scss b/ui/app/styles/core/tables.scss index 1d816a499c78..6d6d6c1203d9 100644 --- a/ui/app/styles/core/tables.scss +++ b/ui/app/styles/core/tables.scss @@ -1,16 +1,14 @@ .table { thead, .thead { - background: $grey-lighter; box-shadow: 0 1px 0 0 $grey-light, 0 -1px 0 0 $grey-light; th, .th { - text-transform: uppercase; font-size: $size-8; - color: $grey-dark; - font-weight: normal; - padding: 0.5rem 1.5rem; + color: $grey; + font-weight: $font-weight-semibold; + padding: 1rem 1.5rem 0; border-width: 0 0 1px 0; border-color: $grey-light; } diff --git a/ui/app/templates/components/alert-banner.hbs b/ui/app/templates/components/alert-banner.hbs deleted file mode 100644 index 5287fa0184b0..000000000000 --- a/ui/app/templates/components/alert-banner.hbs +++ /dev/null @@ -1,31 +0,0 @@ -
-
-
- -
- {{#if yieldWithoutColumn}} - {{yield}} - {{else}} -
-
- {{or title alertType.text}} -
- {{#if message}} -

- {{message}} -

- {{/if}} - {{#if hasBlock}} -

- {{yield}} -

- {{/if}} -
- {{/if}} -
-
diff --git a/ui/app/templates/components/alert-inline.hbs b/ui/app/templates/components/alert-inline.hbs deleted file mode 100644 index 4b841feca4c1..000000000000 --- a/ui/app/templates/components/alert-inline.hbs +++ /dev/null @@ -1,8 +0,0 @@ - -

- {{@message}} -

diff --git a/ui/app/templates/components/alert-popup.hbs b/ui/app/templates/components/alert-popup.hbs index 3a018dd48e21..c6d73c1fa9ef 100644 --- a/ui/app/templates/components/alert-popup.hbs +++ b/ui/app/templates/components/alert-popup.hbs @@ -1,19 +1,24 @@
- +
{{type.text}}
{{#if message}} -

- {{message}} -

+

{{message}}

{{/if}}
diff --git a/ui/app/templates/components/auth-config-form/config.hbs b/ui/app/templates/components/auth-config-form/config.hbs index 69b43f0d46ba..a03f76f13e4a 100644 --- a/ui/app/templates/components/auth-config-form/config.hbs +++ b/ui/app/templates/components/auth-config-form/config.hbs @@ -7,7 +7,7 @@ {{form-field data-test-field attr=attr model=model}} {{/each}} {{else if model.fieldGroups}} - {{form-field-groups model=model}} + {{form-field-groups model=model mode=mode}} {{/if}}
diff --git a/ui/app/templates/components/auth-form.hbs b/ui/app/templates/components/auth-form.hbs index 8d1f204d3958..bbdbaba435d0 100644 --- a/ui/app/templates/components/auth-form.hbs +++ b/ui/app/templates/components/auth-form.hbs @@ -1,30 +1,30 @@
{{#if showLoading}}
- {{partial 'svg/vault-loading'}} +
{{/if}} {{#if hasMethodsWithPath}} - + {{/if}}
{{/if}} +
  • + +
  • +
  • + + Copy token + +
  • {{#if (is-before (now interval=1000) auth.tokenExpirationDate)}} {{#if auth.authData.renewable}}
  • @@ -20,50 +30,32 @@
  • - {{#confirm-action - onConfirmAction=(action "revokeToken") - confirmMessage=(concat "Are you sure you want to revoke the token for " (get auth 'authData.displayName') "?") - confirmButtonText="Revoke" - confirmButtonClasses="button is-primary" - buttonClasses="button link" - showConfirm=shouldRevoke - class=(if shouldRevoke "message is-block is-warning is-outline") - containerClasses="message-body is-block" - messageClasses="is-block" - }} + Revoke token - {{/confirm-action}} +
  • {{else}}
  • - {{#confirm-action - onConfirmAction=(action "revokeToken") - confirmMessage=(concat "Are you sure you want to revoke the token for " (get auth 'authData.displayName') "?") - confirmButtonText="Revoke" - confirmButtonClasses="button is-primary" - buttonClasses="button link" - showConfirm=shouldRevoke - class=(if shouldRevoke "message is-block is-warning is-outline") - containerClasses="message-body is-block" - messageClasses="is-block" - }} + Revoke token - {{/confirm-action}} +
  • {{/if}} {{/if}}
  • - - Copy token - -
  • -
  • - -
  • -
  • - {{#link-to "vault.cluster.logout" activeClusterName id="logout" invokeAction=onLinkClick}} + {{#link-to "vault.cluster.logout" activeClusterName id="logout" class="is-destroy" invokeAction=onLinkClick}} Sign out {{/link-to}}
  • diff --git a/ui/app/templates/components/auth-jwt.hbs b/ui/app/templates/components/auth-jwt.hbs index 3621d8b89409..6ffa96ad2e18 100644 --- a/ui/app/templates/components/auth-jwt.hbs +++ b/ui/app/templates/components/auth-jwt.hbs @@ -10,6 +10,7 @@ placeholder="Default" oninput={{perform this.fetchRole value="target.value"}} autocomplete="off" + spellcheck="false" name="role" id="role" class="input" @@ -29,6 +30,7 @@ name="jwt" class="input" autocomplete="off" + spellcheck="false" data-test-jwt=true }}
    diff --git a/ui/app/templates/components/config-pki-ca.hbs b/ui/app/templates/components/config-pki-ca.hbs index 276fa424ac03..95af999df5d0 100644 --- a/ui/app/templates/components/config-pki-ca.hbs +++ b/ui/app/templates/components/config-pki-ca.hbs @@ -54,14 +54,14 @@
    {{#if model.canDeleteRoot}} - {{#confirm-action - buttonClasses="button" - onConfirmAction=(action "deleteCA") - confirmMessage="Are you sure you want to delete the root CA key?" - cancelButtonText="Cancel" - }} - Delete - {{/confirm-action}} + + Delete + {{/if}}
    diff --git a/ui/app/templates/components/console/command-input.hbs b/ui/app/templates/components/console/command-input.hbs index be3302212541..eab334904dfb 100644 --- a/ui/app/templates/components/console/command-input.hbs +++ b/ui/app/templates/components/console/command-input.hbs @@ -2,12 +2,12 @@ {{#if isRunning}}
    {{else}} - {{i-con glyph="chevron-right" size=12 }} + {{/if}} {{#tool-tip horizontalPosition="auto-right" verticalPosition=(if isFullscreen "above" "below") as |d|}} {{#d.trigger tagName="button" type="button" class=(concat "button is-compact" (if isFullscreen " active")) click=(action "fullscreen") data-test-tool-tip-trigger=true}} - {{i-con glyph=(if isFullscreen "fullscreen-close" "fullscreen-open") aria-hidden="true" size=16}} + {{/d.trigger}} {{#d.content class="tool-tip"}}
    diff --git a/ui/app/templates/components/console/log-command.hbs b/ui/app/templates/components/console/log-command.hbs index 39b9e7d1f28c..cc446877d8dd 100644 --- a/ui/app/templates/components/console/log-command.hbs +++ b/ui/app/templates/components/console/log-command.hbs @@ -1 +1,2 @@ -
    {{i-con glyph="chevron-right" size=12}}{{content}}
    +{{!-- using Icon here instead of Chevron because two nested tagless components results in a rendered line break between the tags breaking the layout in the
     --}}
    +
    diff --git a/ui/app/templates/components/console/log-error-with-html.hbs b/ui/app/templates/components/console/log-error-with-html.hbs index 625365907fa9..45e0c4540d24 100644 --- a/ui/app/templates/components/console/log-error-with-html.hbs +++ b/ui/app/templates/components/console/log-error-with-html.hbs @@ -1,5 +1,5 @@ {{! template-lint-disable no-triple-curlies}}
    - {{i-con glyph="close-circled" aria-hidden="true" size=12}} +
    diff --git a/ui/app/templates/components/console/log-error.hbs b/ui/app/templates/components/console/log-error.hbs index add86ec49224..11cd2647b2a5 100644 --- a/ui/app/templates/components/console/log-error.hbs +++ b/ui/app/templates/components/console/log-error.hbs @@ -1,4 +1,4 @@
    - {{i-con glyph="close-circled" aria-hidden="true" size=12}} +
    diff --git a/ui/app/templates/components/console/log-help.hbs b/ui/app/templates/components/console/log-help.hbs index 09472f01acf9..6d9aa8419fcd 100644 --- a/ui/app/templates/components/console/log-help.hbs +++ b/ui/app/templates/components/console/log-help.hbs @@ -1,5 +1,5 @@
    -{{i-con glyph="information-circled" aria-hidden="true" size=12}} +
    diff --git a/ui/app/templates/components/console/log-success.hbs b/ui/app/templates/components/console/log-success.hbs index e16ae924c55e..d5bcaa7b05b4 100644 --- a/ui/app/templates/components/console/log-success.hbs +++ b/ui/app/templates/components/console/log-success.hbs @@ -1,4 +1,4 @@
    - {{i-con glyph="checkmark-circled" aria-hidden="true" size=12}} +
    diff --git a/ui/app/templates/components/console/ui-panel.hbs b/ui/app/templates/components/console/ui-panel.hbs index 369f0a733b00..bc3dc043b426 100644 --- a/ui/app/templates/components/console/ui-panel.hbs +++ b/ui/app/templates/components/console/ui-panel.hbs @@ -1,5 +1,5 @@ -
    diff --git a/ui/app/templates/components/control-group-success.hbs b/ui/app/templates/components/control-group-success.hbs index a5aca003aeb1..891a1a411262 100644 --- a/ui/app/templates/components/control-group-success.hbs +++ b/ui/app/templates/components/control-group-success.hbs @@ -1,6 +1,6 @@ {{#if (and controlGroupResponse.token controlGroupResponse.uiParams.url)}}
    - You have been granted access to {{model.requestPath}}. Be careful, you can only access this data once. + You have been granted access to {{model.requestPath}}. Be careful, you can only access this data once. If you need access again in the future you will need to get authorized again.
    @@ -26,7 +26,7 @@
    {{#link-to 'vault.cluster.access.control-groups' class="button"}} - Back + Back {{/link-to}}
    {{else}} @@ -40,7 +40,7 @@ Token to access data
    - {{input data-test-token-input class="input" autocomplete="off" name="token" value=token}} + {{input data-test-token-input class="input" autocomplete="off" spellcheck="false" name="token" value=token}}
    -
    - {{#if cancelLinkParams}} -
    - {{#link-to params=cancelLinkParams class="button"}} - Cancel - {{/link-to}} -
    - {{/if}} -
    - {{#if model.canDelete}} - - - {{deleteButtonText}} - - - {{/if}} -
    - diff --git a/ui/app/templates/components/edition-badge.hbs b/ui/app/templates/components/edition-badge.hbs deleted file mode 100644 index 55f9ddb1ea3a..000000000000 --- a/ui/app/templates/components/edition-badge.hbs +++ /dev/null @@ -1,4 +0,0 @@ -{{#if icon}} - -{{/if}} -{{edition}} diff --git a/ui/app/templates/components/empty-action-namespaces.hbs b/ui/app/templates/components/empty-action-namespaces.hbs deleted file mode 100644 index 99298fb60627..000000000000 --- a/ui/app/templates/components/empty-action-namespaces.hbs +++ /dev/null @@ -1,7 +0,0 @@ -{{#link-to "vault.cluster.access.namespaces.create"}} - Create Namespace -{{/link-to}} - - - Learn more - diff --git a/ui/app/templates/components/flex-table-column.hbs b/ui/app/templates/components/flex-table-column.hbs deleted file mode 100644 index 210430eda849..000000000000 --- a/ui/app/templates/components/flex-table-column.hbs +++ /dev/null @@ -1,14 +0,0 @@ -
    -
    - {{header}} -
    -
    -
    -
    -
    - - {{content}} - -
    -
    -
    diff --git a/ui/app/templates/components/form-field.hbs b/ui/app/templates/components/form-field.hbs deleted file mode 100644 index 934fc182cfd8..000000000000 --- a/ui/app/templates/components/form-field.hbs +++ /dev/null @@ -1,228 +0,0 @@ -{{#unless - (or - (eq attr.type "boolean") - (contains - attr.options.editType - (array - "boolean" - "searchSelect" - "mountAccessor" - "kv" - "file" - "ttl" - "stringArray" - "json" - ) - ) - ) -}} - -{{/unless}} -{{#if attr.options.possibleValues}} -
    -
    - -
    -
    -{{else if (and (eq attr.type "string") (eq attr.options.editType "boolean"))}} -
    - - - -
    -{{else if (eq attr.options.editType "searchSelect")}} -
    - - -
    -{{else if (eq attr.options.editType "mountAccessor")}} - {{mount-accessor-select - name=attr.name - label=labelString - warning=attr.options.warning - helpText=attr.options.helpText - value=(get model valuePath) - onChange=(action "setAndBroadcast" valuePath) - }} -{{else if (eq attr.options.editType "kv")}} - {{kv-object-editor - value=(get model valuePath) - onChange=(action "setAndBroadcast" valuePath) - label=labelString - warning=attr.options.warning - helpText=attr.options.helpText - }} -{{else if (eq attr.options.editType "file")}} - {{text-file - index="" - file=file - onChange=(action "setFile") - warning=attr.options.warning - label=labelString - }} -{{else if (eq attr.options.editType "ttl")}} - {{ttl-picker - data-test-input=attr.name - initialValue=(or (get model valuePath) attr.options.defaultValue) - labelText=labelString - warning=attr.options.warning - setDefaultValue=(or attr.options.setDefault false) - onChange=(action (action "setAndBroadcast" valuePath)) - }} -{{else if (eq attr.options.editType "stringArray")}} - {{string-list - data-test-input=attr.name - label=labelString - warning=attr.options.warning - helpText=attr.options.helpText - inputValue=(get model valuePath) - onChange=(action (action "setAndBroadcast" valuePath)) - }} -{{else if (eq attr.options.sensitive true)}} - - -{{else if (or (eq attr.type "number") (eq attr.type "string"))}} -
    - {{#if (eq attr.options.editType "textarea")}} - - {{else if (eq attr.options.editType "json")}} - - {{json-editor - value=(if - (get model valuePath) (stringify (jsonify (get model valuePath))) - ) - valueUpdated=(action "codemirrorUpdated" attr.name "string") - }} - {{else}} - - - {{#if attr.options.validationAttr}} - {{#if - (and - (get model valuePath) (not (get model attr.options.validationAttr)) - ) - }} - - - {{/if}} - {{/if}} - {{/if}} -
    -{{else if (eq attr.type "boolean")}} -
    - - - -
    -{{else if (eq attr.type "object")}} - {{json-editor - value=(if (get model valuePath) (stringify (get model valuePath)) emptyData) - valueUpdated=(action "codemirrorUpdated" attr.name false) - }} -{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/generated-item-list.hbs b/ui/app/templates/components/generated-item-list.hbs new file mode 100644 index 000000000000..1838ae873863 --- /dev/null +++ b/ui/app/templates/components/generated-item-list.hbs @@ -0,0 +1,80 @@ + + + {{#key-value-header path="vault.cluster.access.methods"}} +
  • + + / + + {{#link-to "vault.cluster.access.methods"}} + methods + {{/link-to}} +
  • + {{/key-value-header}} +
    + +

    + {{method}} +

    +
    +
    +{{section-tabs model "authShow" paths}} + + + + Create + {{itemType}} + + + + + {{#if list.empty}} + + {{#link-to + "vault.cluster.access.method.item.create" + (pluralize itemType) + class="link" + }} + Create {{itemType}} + {{/link-to}} + + {{else if list.item}} + + + {{list.item.id}} + + +
  • + {{#link-to "vault.cluster.access.method.item.show" list.item.id class="is-block"}} + View {{itemType}} + {{/link-to}} +
  • +
  • + {{#link-to "vault.cluster.access.method.item.edit" list.item.id class="is-block"}} + Edit {{itemType}} + {{/link-to}} +
  • +
  • + + Delete + {{itemType}} + +
  • +
    +
    + {{/if}} +
    \ No newline at end of file diff --git a/ui/app/templates/components/generated-item.hbs b/ui/app/templates/components/generated-item.hbs new file mode 100644 index 000000000000..8a16e0ada00a --- /dev/null +++ b/ui/app/templates/components/generated-item.hbs @@ -0,0 +1,78 @@ + + + + + + {{#if (eq mode "show")}} +

    + {{model.id}} +

    + {{else}} +

    + {{capitalize mode}} + {{singularize itemType}} + {{#if (eq mode "edit")}} + {{model.id}} + {{/if}} +

    + {{/if}} +
    +
    +{{#if (eq mode "show")}} + + + + Delete + {{itemType}} + + + Edit {{itemType}} + + + +{{/if}} +{{#if (eq mode "show")}} + +{{else}} +
    +
    + + + +
    +
    +
    + + {{#if (eq mode "create")}} + {{#link-to 'vault.cluster.access.method.item.list' class="button" data-test-cancel-link=true}} + Cancel + {{/link-to}} + {{else}} + {{#link-to 'vault.cluster.access.method.item.show' model.id class="button" data-test-cancel-link=true}} + Cancel + {{/link-to}} + {{/if}} +
    +
    +
    +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/home-link.hbs b/ui/app/templates/components/home-link.hbs index 53690327c88a..c9aa090927e9 100644 --- a/ui/app/templates/components/home-link.hbs +++ b/ui/app/templates/components/home-link.hbs @@ -1,5 +1,5 @@ - {{#if hasBlock}} + {{#if (has-block)}} {{yield}} {{else}} {{text}} diff --git a/ui/app/templates/components/hover-copy-button.hbs b/ui/app/templates/components/hover-copy-button.hbs index 0de95fbb86e2..6899d066c57f 100644 --- a/ui/app/templates/components/hover-copy-button.hbs +++ b/ui/app/templates/components/hover-copy-button.hbs @@ -2,11 +2,11 @@ - diff --git a/ui/app/templates/components/http-requests-bar-chart.hbs b/ui/app/templates/components/http-requests-bar-chart.hbs new file mode 100644 index 000000000000..59458a68bd9c --- /dev/null +++ b/ui/app/templates/components/http-requests-bar-chart.hbs @@ -0,0 +1,19 @@ +{{! template-lint-disable no-inline-styles}} + + + + + + + + + + + + + + + + + + diff --git a/ui/app/templates/components/http-requests-container.hbs b/ui/app/templates/components/http-requests-container.hbs new file mode 100644 index 000000000000..453bf826e906 --- /dev/null +++ b/ui/app/templates/components/http-requests-container.hbs @@ -0,0 +1,10 @@ +{{#if (gt counters.length 1) }} + + + + + +{{/if}} + + + diff --git a/ui/app/templates/components/http-requests-dropdown.hbs b/ui/app/templates/components/http-requests-dropdown.hbs new file mode 100644 index 000000000000..97432a5a7058 --- /dev/null +++ b/ui/app/templates/components/http-requests-dropdown.hbs @@ -0,0 +1,12 @@ + +
    + +
    diff --git a/ui/app/templates/components/http-requests-table.hbs b/ui/app/templates/components/http-requests-table.hbs new file mode 100644 index 000000000000..ed3dfb85928d --- /dev/null +++ b/ui/app/templates/components/http-requests-table.hbs @@ -0,0 +1,32 @@ +
    + + + + + + + {{#if (gt counters.length 1)}} + + {{/if}} + + + + {{#each (reverse countersWithChange) as |c|}} + + + + {{#if (gt counters.length 1)}} + + {{/if}} + + {{/each}} + +
    MonthRequestsChange
    {{ format-utc c.start_time '%B %Y' }}{{format-number c.total}} + {{#if c.percentChange}} + + {{c.percentChange}}% + {{else}} + + {{/if}} +
    +
    diff --git a/ui/app/templates/components/identity/edit-form.hbs b/ui/app/templates/components/identity/edit-form.hbs index c0ca88971aa4..f4fe37cccedf 100644 --- a/ui/app/templates/components/identity/edit-form.hbs +++ b/ui/app/templates/components/identity/edit-form.hbs @@ -1,3 +1,17 @@ +{{#if (and (eq mode "edit") model.canDelete)}} + + + + Delete {{model.identityType}} + + + +{{/if}} +
    @@ -34,15 +48,5 @@ {{/if}}
    - {{#if (and (eq mode "edit") model.canDelete)}} - {{#confirm-action - buttonClasses="button is-ghost" - onConfirmAction=(action "deleteItem" model) - confirmMessage=(concat "Are you sure you want to delete " model.id "?") - data-test-entity-item-delete=true - }} - Delete - {{/confirm-action}} - {{/if}}
    diff --git a/ui/app/templates/components/identity/entity-nav.hbs b/ui/app/templates/components/identity/entity-nav.hbs index 17d31f71826a..786a7759ed34 100644 --- a/ui/app/templates/components/identity/entity-nav.hbs +++ b/ui/app/templates/components/identity/entity-nav.hbs @@ -4,20 +4,8 @@ {{capitalize (pluralize identityType)}} - - {{#if (eq identityType "entity")}} - {{#link-to "vault.cluster.access.identity.merge" (pluralize identityType) class="button has-icon-right is-ghost is-compact" data-test-entity-merge-link=true}} - Merge {{pluralize identityType}} - {{i-con glyph="chevron-right" size=11}} - {{/link-to}} - {{/if}} - {{#link-to "vault.cluster.access.identity.create" (pluralize identityType) class="button has-icon-right is-ghost is-compact" data-test-entity-create-link=true}} - Create {{identityType}} - {{i-con glyph="chevron-right" size=11}} - {{/link-to}} - -
    +
    -{{#if model.meta.total}} -
    -
    -
    - {{identity/lookup-input type=identityType}} -
    -
    -
    -{{/if}} + + {{#if model.meta.total}} + + {{identity/lookup-input type=identityType}} + + {{/if}} + + {{#if (eq identityType "entity")}} + + Merge {{pluralize identityType}} + + {{/if}} + + Create {{identityType}} + + + diff --git a/ui/app/templates/components/identity/item-aliases.hbs b/ui/app/templates/components/identity/item-aliases.hbs index 405b5b6dfd19..74dce5421028 100644 --- a/ui/app/templates/components/identity/item-aliases.hbs +++ b/ui/app/templates/components/identity/item-aliases.hbs @@ -9,11 +9,10 @@
    {{#link-to "vault.cluster.access.identity.aliases.show" item.id "details" class="has-text-black has-text-weight-semibold" - }}{{i-con - glyph='role' - size=14 - class="has-text-grey-light" - }}{{item.name}}{{/link-to}} + }}{{item.name}}{{/link-to}}
    {{item.id}}
    diff --git a/ui/app/templates/components/identity/item-groups.hbs b/ui/app/templates/components/identity/item-groups.hbs index eb44ed08c05a..dfc32018b190 100644 --- a/ui/app/templates/components/identity/item-groups.hbs +++ b/ui/app/templates/components/identity/item-groups.hbs @@ -2,11 +2,11 @@ {{#each model.directGroupIds as |gid|}} {{#link-to "vault.cluster.access.identity.show" "groups" gid "details" class="list-item-row" - }}{{i-con - glyph='folder' - size=14 + }}{{gid}} + {{/link-to}} {{/each}} {{#each model.inheritedGroupIds as |gid|}} {{#linked-block @@ -15,11 +15,10 @@ }} {{#link-to "vault.cluster.access.identity.show" "groups" gid "details" class="has-text-black" - }}{{i-con - glyph='folder' - size=14 + }}{{gid}} {{/link-to}} inherited {{/linked-block}} diff --git a/ui/app/templates/components/identity/item-members.hbs b/ui/app/templates/components/identity/item-members.hbs index 4b5485a72720..0b5e44e1fee2 100644 --- a/ui/app/templates/components/identity/item-members.hbs +++ b/ui/app/templates/components/identity/item-members.hbs @@ -9,13 +9,12 @@ }}
    - {{#link-to "vault.cluster.access.identity.show" "groups" gid "details" + {{#link-to "vault.cluster.access.identity.show" "groups" gid "details" class="is-block has-text-black has-text-weight-semibold" - }}{{i-con - glyph='folder' - size=14 + }}{{gid}}{{/link-to}}
    {{#if model.canEdit}} @@ -37,11 +36,10 @@
    {{#link-to "vault.cluster.access.identity.show" "entities" gid "details" class="is-block has-text-black has-text-weight-semibold" - }}{{i-con - glyph='role' - size=14 - class="has-text-grey-light" - }}{{gid}}{{/link-to}} + }}{{gid}}{{/link-to}}
    {{#if model.canEdit}} diff --git a/ui/app/templates/components/identity/item-parent-groups.hbs b/ui/app/templates/components/identity/item-parent-groups.hbs index a7ceeaf0ab78..56f10b6c216b 100644 --- a/ui/app/templates/components/identity/item-parent-groups.hbs +++ b/ui/app/templates/components/identity/item-parent-groups.hbs @@ -11,11 +11,11 @@
    {{#link-to "vault.cluster.access.identity.show" "groups" gid "details" class="is-block has-text-black has-text-weight-semibold" - }}{{i-con - glyph='folder' - size=14 + }}{{gid}} + {{/link-to}}
    diff --git a/ui/app/templates/components/identity/lookup-input.hbs b/ui/app/templates/components/identity/lookup-input.hbs index 527df5676774..92eb9d1d3159 100644 --- a/ui/app/templates/components/identity/lookup-input.hbs +++ b/ui/app/templates/components/identity/lookup-input.hbs @@ -1,5 +1,5 @@
    -
    +
    + {{else if (eq attr.options.editType "json")}} + + {{json-editor + value=(if + (get model valuePath) (stringify (jsonify (get model valuePath))) + ) + valueUpdated=(action "codemirrorUpdated" attr.name "string") + }} + {{else}} + + + {{#if attr.options.validationAttr}} + {{#if + (and + (get model valuePath) (not (get model attr.options.validationAttr)) + ) + }} + + + {{/if}} + {{/if}} + {{/if}} +
    +{{else if (eq attr.type "boolean")}} +
    + + + +
    +{{else if (eq attr.type "object")}} + {{json-editor + value=(if (get model valuePath) (stringify (get model valuePath)) emptyData) + valueUpdated=(action "codemirrorUpdated" attr.name false) + }} +{{/if}} \ No newline at end of file diff --git a/ui/lib/core/addon/templates/components/icon.hbs b/ui/lib/core/addon/templates/components/icon.hbs new file mode 100644 index 000000000000..84b9ea8e8bbb --- /dev/null +++ b/ui/lib/core/addon/templates/components/icon.hbs @@ -0,0 +1,3 @@ + + {{svg-jar @glyph}} + \ No newline at end of file diff --git a/ui/lib/core/addon/templates/components/info-table-row.hbs b/ui/lib/core/addon/templates/components/info-table-row.hbs new file mode 100644 index 000000000000..f852967f1298 --- /dev/null +++ b/ui/lib/core/addon/templates/components/info-table-row.hbs @@ -0,0 +1,28 @@ +{{#if (or alwaysRender value)}} +
    + {{label}} +
    +
    + {{#if (has-block)}} + {{yield}} + {{else if valueIsBoolean}} + {{#if value}} +
    +{{/if}} diff --git a/ui/lib/core/addon/templates/components/info-tooltip.hbs b/ui/lib/core/addon/templates/components/info-tooltip.hbs new file mode 100644 index 000000000000..e725a577b773 --- /dev/null +++ b/ui/lib/core/addon/templates/components/info-tooltip.hbs @@ -0,0 +1,13 @@ +{{#tool-tip as |d|}} + {{#d.trigger tagName="button" type="button" class="tool-tip-trigger button is-ghost is-compact" data-test-tool-tip-trigger=true}} + + {{/d.trigger}} + {{#d.content class="tool-tip"}} +
    + {{yield}} +
    + {{/d.content}} +{{/tool-tip}} \ No newline at end of file diff --git a/ui/lib/core/addon/templates/components/layout-loading.hbs b/ui/lib/core/addon/templates/components/layout-loading.hbs new file mode 100644 index 000000000000..b144ff68bf68 --- /dev/null +++ b/ui/lib/core/addon/templates/components/layout-loading.hbs @@ -0,0 +1,9 @@ +
    +
    +
    +
    + +
    +
    +
    +
    diff --git a/ui/app/templates/components/list-item.hbs b/ui/lib/core/addon/templates/components/list-item.hbs similarity index 92% rename from ui/app/templates/components/list-item.hbs rename to ui/lib/core/addon/templates/components/list-item.hbs index fa760d3c567c..08e224b46212 100644 --- a/ui/app/templates/components/list-item.hbs +++ b/ui/lib/core/addon/templates/components/list-item.hbs @@ -1,7 +1,7 @@ {{#if componentName}} {{component componentName item=item}} {{else if linkParams}} - +
    {{#link-to params=linkParams class="has-text-weight-semibold has-text-black is-display-flex is-flex-1 is-no-underline"}} diff --git a/ui/app/templates/components/page-header-level-left.hbs b/ui/lib/core/addon/templates/components/list-item/content.hbs similarity index 100% rename from ui/app/templates/components/page-header-level-left.hbs rename to ui/lib/core/addon/templates/components/list-item/content.hbs diff --git a/ui/app/templates/components/list-item/popup-menu.hbs b/ui/lib/core/addon/templates/components/list-item/popup-menu.hbs similarity index 100% rename from ui/app/templates/components/list-item/popup-menu.hbs rename to ui/lib/core/addon/templates/components/list-item/popup-menu.hbs diff --git a/ui/app/templates/components/list-pagination.hbs b/ui/lib/core/addon/templates/components/list-pagination.hbs similarity index 93% rename from ui/app/templates/components/list-pagination.hbs rename to ui/lib/core/addon/templates/components/list-pagination.hbs index b156e7675a39..dc9aad255e48 100644 --- a/ui/app/templates/components/list-pagination.hbs +++ b/ui/lib/core/addon/templates/components/list-pagination.hbs @@ -8,14 +8,14 @@ ) class="pagination-previous" }} - {{i-con glyph="chevron-left" size=10}} + Previous {{/link-to}} {{else}} {{/if}} {{#if segmentLinks }} diff --git a/ui/lib/core/addon/templates/components/list-view.hbs b/ui/lib/core/addon/templates/components/list-view.hbs new file mode 100644 index 000000000000..76d779ec31e7 --- /dev/null +++ b/ui/lib/core/addon/templates/components/list-view.hbs @@ -0,0 +1,23 @@ +{{#if + (or + (and items.meta items.meta.total) + items.length + ) +}} +
    + {{#each items as |item|}} + {{yield (hash item=item)}} + {{else}} + {{yield}} + {{/each}} + {{#if showPagination}} + + {{/if}} +
    +{{else}} + {{yield (hash empty=(component "empty-state" title=this.emptyTitle message=this.emptyMessage))}} +{{/if}} diff --git a/ui/lib/core/addon/templates/components/masked-input.hbs b/ui/lib/core/addon/templates/components/masked-input.hbs new file mode 100644 index 000000000000..660260f158f2 --- /dev/null +++ b/ui/lib/core/addon/templates/components/masked-input.hbs @@ -0,0 +1,21 @@ +
    + {{#if displayOnly}} +
    {{displayValue}}
    + {{else}} +