From 5cb9a8a5faec2e270876140d820478d289905576 Mon Sep 17 00:00:00 2001
From: Jim Kalafut
Date: Mon, 11 Feb 2019 13:37:55 -0800
Subject: [PATCH 01/31] Create alias and command for OIDC (#6206)
---
command/commands.go | 2 +
helper/builtinplugins/registry.go | 1 +
.../vault-plugin-auth-jwt/Gopkg.lock | 201 ++++---
.../vault-plugin-auth-jwt/backend.go | 13 +-
.../hashicorp/vault-plugin-auth-jwt/claims.go | 65 +++
.../hashicorp/vault-plugin-auth-jwt/cli.go | 502 ++++++++++++++++++
.../vault-plugin-auth-jwt/path_config.go | 59 +-
.../vault-plugin-auth-jwt/path_login.go | 263 +++++----
.../vault-plugin-auth-jwt/path_oidc.go | 303 +++++++++++
.../vault-plugin-auth-jwt/path_role.go | 188 ++++---
.../vault-plugin-auth-jwt/path_ui.go | 37 ++
.../vault-plugin-auth-jwt/test_ui.html | 37 ++
.../mitchellh/pointerstructure/README.md | 74 +++
.../mitchellh/pointerstructure/delete.go | 112 ++++
.../mitchellh/pointerstructure/get.go | 91 ++++
.../mitchellh/pointerstructure/parse.go | 57 ++
.../mitchellh/pointerstructure/pointer.go | 123 +++++
.../mitchellh/pointerstructure/set.go | 122 +++++
.../mitchellh/pointerstructure/sort.go | 42 ++
vendor/vendor.json | 14 +-
20 files changed, 2043 insertions(+), 263 deletions(-)
create mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-jwt/claims.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_oidc.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_ui.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-jwt/test_ui.html
create mode 100644 vendor/github.com/mitchellh/pointerstructure/README.md
create mode 100644 vendor/github.com/mitchellh/pointerstructure/delete.go
create mode 100644 vendor/github.com/mitchellh/pointerstructure/get.go
create mode 100644 vendor/github.com/mitchellh/pointerstructure/parse.go
create mode 100644 vendor/github.com/mitchellh/pointerstructure/pointer.go
create mode 100644 vendor/github.com/mitchellh/pointerstructure/set.go
create mode 100644 vendor/github.com/mitchellh/pointerstructure/sort.go
diff --git a/command/commands.go b/command/commands.go
index 395b47a07e20..e219decf8926 100644
--- a/command/commands.go
+++ b/command/commands.go
@@ -27,6 +27,7 @@ import (
credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud"
credCentrify "github.com/hashicorp/vault-plugin-auth-centrify"
credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin"
+ credOIDC "github.com/hashicorp/vault-plugin-auth-jwt"
credAws "github.com/hashicorp/vault/builtin/credential/aws"
credCert "github.com/hashicorp/vault/builtin/credential/cert"
credGitHub "github.com/hashicorp/vault/builtin/credential/github"
@@ -177,6 +178,7 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) {
"gcp": &credGcp.CLIHandler{},
"github": &credGitHub.CLIHandler{},
"ldap": &credLdap.CLIHandler{},
+ "oidc": &credOIDC.CLIHandler{},
"okta": &credOkta.CLIHandler{},
"radius": &credUserpass.CLIHandler{
DefaultMount: "radius",
diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go
index 2ab5985c0d2d..1183f7626680 100644
--- a/helper/builtinplugins/registry.go
+++ b/helper/builtinplugins/registry.go
@@ -73,6 +73,7 @@ func newRegistry() *registry {
"jwt": credJWT.Factory,
"kubernetes": credKube.Factory,
"ldap": credLdap.Factory,
+ "oidc": credJWT.Factory,
"okta": credOkta.Factory,
"radius": credRadius.Factory,
"userpass": credUserpass.Factory,
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock
index c936288aaab1..9ae1a539743a 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock
@@ -15,12 +15,12 @@
version = "1.1"
[[projects]]
- branch = "master"
- digest = "1:6bf6d532e503d9526d46e69aff04d11632c8c1e28b847dbd226babc1689aa723"
+ digest = "1:c47f4964978e211c6e566596ec6246c329912ea92e9bb99c00798bb4564c5b09"
name = "github.com/armon/go-radix"
packages = ["."]
pruneopts = "UT"
- revision = "7fddfc383310abc091d79a27f116d30cf0424032"
+ revision = "1a2de0c21c94309923825da3df33a4381872c795"
+ version = "v1.0.0"
[[projects]]
digest = "1:f6e5e1bc64c2908167e6aa9a1fe0c084d515132a1c63ad5b6c84036aa06dc0c1"
@@ -39,7 +39,7 @@
version = "v1.0.1"
[[projects]]
- digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
+ digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf"
name = "github.com/golang/protobuf"
packages = [
"proto",
@@ -49,8 +49,8 @@
"ptypes/timestamp",
]
pruneopts = "UT"
- revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
- version = "v1.1.0"
+ revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
+ version = "v1.2.0"
[[projects]]
branch = "master"
@@ -62,106 +62,108 @@
[[projects]]
branch = "master"
- digest = "1:d1971637b21871ec2033a44ca87c99c5608a7340cb34ec75fab8d2ab503276c9"
+ digest = "1:0ade334594e69404d80d9d323445d2297ff8161637f9b2d347cc6973d2d6f05b"
name = "github.com/hashicorp/errwrap"
packages = ["."]
pruneopts = "UT"
- revision = "d6c0cd88035724dd42e0f335ae30161c20575ecc"
+ revision = "8a6fb523712970c966eefc6b39ed2c5e74880354"
[[projects]]
branch = "master"
- digest = "1:77cb3be9b21ba7f1a4701e870c84ea8b66e7d74c7c8951c58155fdadae9414ec"
+ digest = "1:f47d6109c2034cb16bd62b220e18afd5aa9d5a1630fe5d937ad96a4fb7cbb277"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
pruneopts = "UT"
- revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
+ revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18"
[[projects]]
branch = "master"
- digest = "1:e8d99882caa8c74d68f340ddb9bba3f7e433117ce57c3e52501edfa7e195d2c7"
+ digest = "1:0876aeb6edb07e20b6b0ce1d346655cb63dbe0a26ccfb47b68a9b7697709777b"
name = "github.com/hashicorp/go-hclog"
packages = ["."]
pruneopts = "UT"
- revision = "ff2cf002a8dd750586d91dddd4470c341f981fe1"
+ revision = "4783caec6f2e5cdd47fab8b2bb47ce2ce5c546b7"
[[projects]]
- branch = "master"
- digest = "1:2394f5a25132b3868eff44599cc28d44bdd0330806e34c495d754dd052df612b"
+ digest = "1:2be5a35f0c5b35162c41bb24971e5dcf6ce825403296ee435429cdcc4e1e847e"
name = "github.com/hashicorp/go-immutable-radix"
packages = ["."]
pruneopts = "UT"
- revision = "7f3cd4390caab3250a57f30efdb2a65dd7649ecf"
+ revision = "27df80928bb34bb1b0d6d0e01b9e679902e7a6b5"
+ version = "v1.0.0"
[[projects]]
- branch = "master"
- digest = "1:46fb6a9f1b9667f32ac93e08b1da118b2c666991424ea12e848b05d4fe5155ef"
+ digest = "1:f668349b83f7d779567c880550534addeca7ebadfdcf44b0b9c39be61864b4b7"
name = "github.com/hashicorp/go-multierror"
packages = ["."]
pruneopts = "UT"
- revision = "3d5d8f294aa03d8e98859feac328afbdf1ae0703"
+ revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1"
+ version = "v1.0.0"
[[projects]]
branch = "master"
- digest = "1:20f78c1cf1b6fe6c55ba1407350d6fc7dc77d1591f8106ba693c28014a1a1b37"
+ digest = "1:77a6108b8eb3cd0feac4eeb3e032f36c8fdfe9497671952fd9eb682b9c503158"
name = "github.com/hashicorp/go-plugin"
- packages = ["."]
+ packages = [
+ ".",
+ "internal/proto",
+ ]
pruneopts = "UT"
- revision = "a4620f9913d19f03a6bf19b2f304daaaf83ea130"
+ revision = "362c99b11937c6a84686ee5726a8170e921ab406"
[[projects]]
- branch = "master"
- digest = "1:183f00c472fb9b2446659618eebf4899872fa267b92f926539411abdc8b941df"
+ digest = "1:d260503602063d71718eb21f85c02133ad5eac894c2a6f0e0546b7dc017dc97e"
name = "github.com/hashicorp/go-retryablehttp"
packages = ["."]
pruneopts = "UT"
- revision = "e651d75abec6fbd4f2c09508f72ae7af8a8b7171"
+ revision = "73489d0a1476f0c9e6fb03f9c39241523a496dfd"
+ version = "v0.5.2"
[[projects]]
- branch = "master"
- digest = "1:45aad874d3c7d5e8610427c81870fb54970b981692930ec2a319ce4cb89d7a00"
+ digest = "1:a54ada9beb59fdc35b69322979e870ff0b780e03f4dc309c4c8674b94927df75"
name = "github.com/hashicorp/go-rootcerts"
packages = ["."]
pruneopts = "UT"
- revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
+ revision = "63503fb4e1eca22f9ae0f90b49c5d5538a0e87eb"
+ version = "v1.0.0"
[[projects]]
branch = "master"
- digest = "1:14f2005c31ddf99c4a0f36fc440f8d1ac43224194c7c4a904b3c8f4ba5654d0b"
+ digest = "1:3c4c27026ab6a3218dbde897568f651c81062e2ee6e617e57ae46ca95bb1db6b"
name = "github.com/hashicorp/go-sockaddr"
packages = ["."]
pruneopts = "UT"
- revision = "6d291a969b86c4b633730bfc6b8b9d64c3aafed9"
+ revision = "3aed17b5ee41761cc2b04f2a94c7107d428967e5"
[[projects]]
- branch = "master"
- digest = "1:354978aad16c56c27f57e5b152224806d87902e4935da3b03e18263d82ae77aa"
+ digest = "1:f14364057165381ea296e49f8870a9ffce2b8a95e34d6ae06c759106aaef428c"
name = "github.com/hashicorp/go-uuid"
packages = ["."]
pruneopts = "UT"
- revision = "27454136f0364f2d44b1276c552d69105cf8c498"
+ revision = "4f571afc59f3043a65f8fe6bf46d887b10a01d43"
+ version = "v1.0.1"
[[projects]]
- branch = "master"
- digest = "1:32c0e96a63bd093eccf37db757fb314be5996f34de93969321c2cbef893a7bd6"
+ digest = "1:950caca7dfcf796419232ba996c9c3539d09f26af27ba848c4508e604c13efbb"
name = "github.com/hashicorp/go-version"
packages = ["."]
pruneopts = "UT"
- revision = "270f2f71b1ee587f3b609f00f422b76a6b28f348"
+ revision = "d40cf49b3a77bba84a7afdbd7f1dc295d114efb1"
+ version = "v1.1.0"
[[projects]]
- branch = "master"
- digest = "1:cf296baa185baae04a9a7004efee8511d08e2f5f51d4cbe5375da89722d681db"
+ digest = "1:8ec8d88c248041a6df5f6574b87bc00e7e0b493881dad2e7ef47b11dc69093b5"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru",
]
pruneopts = "UT"
- revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
+ revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
+ version = "v0.5.0"
[[projects]]
- branch = "master"
- digest = "1:12247a2e99a060cc692f6680e5272c8adf0b8f572e6bce0d7095e624c958a240"
+ digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8"
name = "github.com/hashicorp/hcl"
packages = [
".",
@@ -175,11 +177,12 @@
"json/token",
]
pruneopts = "UT"
- revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
+ revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
+ version = "v1.0.0"
[[projects]]
branch = "master"
- digest = "1:d00de8725219a569ffbb5dd1042e4ced1f3b5ccee2b07218371f71026cc7609a"
+ digest = "1:f5bdd7b0d06bfa965cefa9c52af7f556bd079ff4328d67c89f6afdf4be7eabbe"
name = "github.com/hashicorp/vault"
packages = [
"api",
@@ -187,9 +190,11 @@
"helper/cidrutil",
"helper/compressutil",
"helper/consts",
+ "helper/cryptoutil",
"helper/errutil",
"helper/hclutil",
"helper/jsonutil",
+ "helper/license",
"helper/locksutil",
"helper/logging",
"helper/mlock",
@@ -209,39 +214,47 @@
"version",
]
pruneopts = "UT"
- revision = "8655d167084028d627f687ddc25d0c71307eb5be"
+ revision = "b16527d791ba46f74a608527b328957618aa0ae6"
[[projects]]
branch = "master"
- digest = "1:89658943622e6bc5e76b4da027ee9583fa0b321db0c797bd554edab96c1ca2b1"
+ digest = "1:a4826c308e84f5f161b90b54a814f0be7d112b80164b9b884698a6903ea47ab3"
name = "github.com/hashicorp/yamux"
packages = ["."]
pruneopts = "UT"
- revision = "3520598351bb3500a49ae9563f5539666ae0a27c"
+ revision = "2f1d1f20f75d5404f53b9edf6b53ed5505508675"
[[projects]]
- branch = "master"
- digest = "1:c7354463195544b1ab3c1f1fadb41430947f5d28dfbf2cdbd38268c5717a5a03"
+ digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = "UT"
- revision = "58046073cbffe2f25d425fe1331102f55cf719de"
+ revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
+ version = "v1.1.0"
[[projects]]
- branch = "master"
- digest = "1:cae1afe858922bd10e9573b87130f730a6e4183a00eba79920d6656629468bfa"
+ digest = "1:42eb1f52b84a06820cedc9baec2e710bfbda3ee6dac6cdb97f8b9a5066134ec6"
name = "github.com/mitchellh/go-testing-interface"
packages = ["."]
pruneopts = "UT"
- revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28"
+ revision = "6d0b8010fcc857872e42fc6c931227569016843c"
+ version = "v1.0.0"
[[projects]]
- branch = "master"
- digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355"
+ digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
pruneopts = "UT"
- revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac"
+ revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
+ version = "v1.1.2"
+
+[[projects]]
+ branch = "master"
+ digest = "1:302de3c669b04a566d4e99760d6fb35a22177fc14c7a9284e8b3cf6e9fe3f28a"
+ name = "github.com/mitchellh/pointerstructure"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "f2329fcfa9e280bdb5a3f2544aec815a508ad72f"
[[projects]]
digest = "1:9ec6cf1df5ad1d55cf41a43b6b1e7e118a91bade4f68ff4303379343e40c0e25"
@@ -251,6 +264,25 @@
revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39"
version = "v1.0.0"
+[[projects]]
+ digest = "1:808cdddf087fb64baeae67b8dfaee2069034d9704923a3cb8bd96a995421a625"
+ name = "github.com/patrickmn/go-cache"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
+ version = "v2.1.0"
+
+[[projects]]
+ digest = "1:c7a5e79396b6eb570159df7a1d487ce5775bf43b7907976fbef6de544ea160ad"
+ name = "github.com/pierrec/lz4"
+ packages = [
+ ".",
+ "internal/xxh32",
+ ]
+ pruneopts = "UT"
+ revision = "473cd7ce01a1113208073166464b98819526150e"
+ version = "v2.0.8"
+
[[projects]]
branch = "master"
digest = "1:bd9efe4e0b0f768302a1e2f0c22458149278de533e521206e5ddc71848c269a0"
@@ -263,28 +295,29 @@
revision = "1555304b9b35fdd2b425bccf1a5613677705e7d0"
[[projects]]
- digest = "1:0e792eea6c96ec55ff302ef33886acbaa5006e900fefe82689e88d96439dcd84"
+ digest = "1:6baa565fe16f8657cf93469b2b8a6c61a277827734400d27e44d589547297279"
name = "github.com/ryanuber/go-glob"
packages = ["."]
pruneopts = "UT"
- revision = "572520ed46dbddaed19ea3d9541bdd0494163693"
- version = "v0.1"
+ revision = "51a8f68e6c24dc43f1e371749c89a267de4ebc53"
+ version = "v1.0.0"
[[projects]]
branch = "master"
- digest = "1:b8fa1ff0fc20983395978b3f771bb10438accbfe19326b02e236c1d4bf1c91b2"
+ digest = "1:5bce6a1c0d1492cef01d74084ddbac09c4bbc4cbc1db3fdd0c138ed9bc945bf8"
name = "golang.org/x/crypto"
packages = [
+ "blake2b",
"ed25519",
"ed25519/internal/edwards25519",
"pbkdf2",
]
pruneopts = "UT"
- revision = "de0752318171da717af4ce24d0a2e8626afaeb11"
+ revision = "193df9c0f06f8bb35fba505183eaf0acc0136505"
[[projects]]
branch = "master"
- digest = "1:3c4175c2711d67096567fc2d84a83464d6ff58119af3efc89983339d64144cb0"
+ digest = "1:9d2f08c64693fbe7177b5980f80c35672c80f12be79bb3bc86948b934d70e4ee"
name = "golang.org/x/net"
packages = [
"context",
@@ -297,26 +330,29 @@
"trace",
]
pruneopts = "UT"
- revision = "aaf60122140d3fcf75376d319f0554393160eb50"
+ revision = "65e2d4e15006aab9813ff8769e768bbf4bb667a0"
[[projects]]
branch = "master"
- digest = "1:af19f6e6c369bf51ef226e989034cd88a45083173c02ac4d7ab74c9a90d356b7"
+ digest = "1:e007b54f54cbd4214aa6d97a67d57bc2539991adb4e22ea92c482bbece8de469"
name = "golang.org/x/oauth2"
packages = [
".",
"internal",
]
pruneopts = "UT"
- revision = "3d292e4d0cdc3a0113e6d207bb137145ef1de42f"
+ revision = "99b60b757ec124ebb7d6b7e97f153b19c10ce163"
[[projects]]
branch = "master"
- digest = "1:05662433b3a13c921587a6e622b5722072edff83211efd1cd79eeaeedfd83f07"
+ digest = "1:c9e49928119661a681af4037236af47654d6bd421c0af184962c890d0a61e0fb"
name = "golang.org/x/sys"
- packages = ["unix"]
+ packages = [
+ "cpu",
+ "unix",
+ ]
pruneopts = "UT"
- revision = "1c9583448a9c3aa0f9a6a5241bf73c0bd8aafded"
+ revision = "3b5209105503162ded1863c307ac66fec31120dd"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
@@ -343,14 +379,14 @@
[[projects]]
branch = "master"
- digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
+ digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "UT"
- revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
+ revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
[[projects]]
- digest = "1:328b5e4f197d928c444a51a75385f4b978915c0e75521f0ad6a3db976c97a7d3"
+ digest = "1:6f3bd49ddf2e104e52062774d797714371fac1b8bddfd8e124ce78e6b2264a10"
name = "google.golang.org/appengine"
packages = [
"internal",
@@ -362,8 +398,8 @@
"urlfetch",
]
pruneopts = "UT"
- revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
- version = "v1.1.0"
+ revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
+ version = "v1.4.0"
[[projects]]
branch = "master"
@@ -371,19 +407,21 @@
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "UT"
- revision = "d0a8f471bba2dbb160885b0000d814ee5d559bad"
+ revision = "4b09977fb92221987e99d190c8f88f2c92727a29"
[[projects]]
- digest = "1:047efbc3c9a51f3002b0002f92543857d372654a676fb6b01931982cd80467dd"
+ digest = "1:a887a56d0ff92cf05b4bb6004b46fc6e64d3fb6aca4eaeb1466bdce183ba5004"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
+ "binarylog/grpc_binarylog_v1",
"codes",
"connectivity",
"credentials",
+ "credentials/internal",
"encoding",
"encoding/proto",
"grpclog",
@@ -391,9 +429,12 @@
"health/grpc_health_v1",
"internal",
"internal/backoff",
+ "internal/binarylog",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
+ "internal/grpcsync",
+ "internal/syscall",
"internal/transport",
"keepalive",
"metadata",
@@ -407,11 +448,11 @@
"tap",
]
pruneopts = "UT"
- revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455"
- version = "v1.14.0"
+ revision = "a02b0774206b209466313a0b525d2c738fe407eb"
+ version = "v1.18.0"
[[projects]]
- digest = "1:b57bb9a6a2a03558d63166f1afc3c0c4f91ad137f63bf2bee995e9baeb976a9c"
+ digest = "1:a4cde1eec9a17eb2399a50c6e1a9fe3fde039994de058f9dbf6592d157bfe97b"
name = "gopkg.in/square/go-jose.v2"
packages = [
".",
@@ -420,8 +461,8 @@
"jwt",
]
pruneopts = "UT"
- revision = "8254d6c783765f38c8675fae4427a1fe73fbd09d"
- version = "v2.1.8"
+ revision = "e94fb177d3668d35ab39c61cbb2f311550557e83"
+ version = "v2.2.2"
[solve-meta]
analyzer-name = "dep"
@@ -433,6 +474,8 @@
"github.com/hashicorp/go-cleanhttp",
"github.com/hashicorp/go-hclog",
"github.com/hashicorp/go-sockaddr",
+ "github.com/hashicorp/go-uuid",
+ "github.com/hashicorp/vault/api",
"github.com/hashicorp/vault/helper/certutil",
"github.com/hashicorp/vault/helper/cidrutil",
"github.com/hashicorp/vault/helper/logging",
@@ -443,6 +486,8 @@
"github.com/hashicorp/vault/logical",
"github.com/hashicorp/vault/logical/framework",
"github.com/hashicorp/vault/logical/plugin",
+ "github.com/mitchellh/pointerstructure",
+ "github.com/patrickmn/go-cache",
"golang.org/x/oauth2",
"gopkg.in/square/go-jose.v2",
"gopkg.in/square/go-jose.v2/jwt",
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/backend.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/backend.go
index c906bd8c8ead..c1d328bdf29e 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/backend.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/backend.go
@@ -3,10 +3,12 @@ package jwtauth
import (
"context"
"sync"
+ "time"
oidc "github.com/coreos/go-oidc"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
+ cache "github.com/patrickmn/go-cache"
)
const (
@@ -16,7 +18,7 @@ const (
// Factory is used by framework
func Factory(ctx context.Context, c *logical.BackendConfig) (logical.Backend, error) {
- b := backend(c)
+ b := backend()
if err := b.Setup(ctx, c); err != nil {
return nil, err
}
@@ -29,14 +31,16 @@ type jwtAuthBackend struct {
l sync.RWMutex
provider *oidc.Provider
cachedConfig *jwtConfig
+ oidcStates *cache.Cache
providerCtx context.Context
providerCtxCancel context.CancelFunc
}
-func backend(c *logical.BackendConfig) *jwtAuthBackend {
+func backend() *jwtAuthBackend {
b := new(jwtAuthBackend)
b.providerCtx, b.providerCtxCancel = context.WithCancel(context.Background())
+ b.oidcStates = cache.New(oidcStateTimeout, 1*time.Minute)
b.Backend = &framework.Backend{
AuthRenew: b.pathLoginRenew,
@@ -46,6 +50,9 @@ func backend(c *logical.BackendConfig) *jwtAuthBackend {
PathsSpecial: &logical.Paths{
Unauthenticated: []string{
"login",
+ "oidc/auth_url",
+ "oidc/callback",
+ "ui", // TODO: remove when Vault UI is ready
},
SealWrapStorage: []string{
"config",
@@ -57,7 +64,9 @@ func backend(c *logical.BackendConfig) *jwtAuthBackend {
pathRoleList(b),
pathRole(b),
pathConfig(b),
+ pathUI(b), // TODO: remove when Vault UI is ready
},
+ pathOIDC(b),
),
Clean: b.cleanup,
}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/claims.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/claims.go
new file mode 100644
index 000000000000..473349bc3ab2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/claims.go
@@ -0,0 +1,65 @@
+package jwtauth
+
+import (
+ "fmt"
+ "strings"
+
+ log "github.com/hashicorp/go-hclog"
+ "github.com/mitchellh/pointerstructure"
+)
+
+// getClaim returns a claim value from allClaims given a provided claim string.
+// If this string is a valid JSONPointer, it will be interpreted as such to locate
+// the claim. Otherwise, the claim string will be used directly.
+func getClaim(logger log.Logger, allClaims map[string]interface{}, claim string) interface{} {
+ var val interface{}
+ var err error
+
+ if !strings.HasPrefix(claim, "/") {
+ val = allClaims[claim]
+ } else {
+ val, err = pointerstructure.Get(allClaims, claim)
+ if err != nil {
+ logger.Warn(fmt.Sprintf("unable to locate %s in claims: %s", claim, err.Error()))
+ return nil
+ }
+ }
+
+ // The claims unmarshalled by go-oidc don't use UseNumber, so there will
+ // be mismatches if they're coming in as float64 since Vault's config will
+ // be represented as json.Number. If the operator can coerce claims data to
+ // be in string form, there is no problem. Alternatively, we could try to
+ // intelligently convert float64 to json.Number, e.g.:
+ //
+ // switch v := val.(type) {
+ // case float64:
+ // val = json.Number(strconv.Itoa(int(v)))
+ // }
+ //
+ // Or we fork and/or PR go-oidc.
+
+ return val
+}
+
+// extractMetadata builds a metadata map from a set of claims and claims mappings.
+// The referenced claims must be strings and the claims mappings must be of the structure:
+//
+// {
+// "/some/claim/pointer": "metadata_key1",
+// "another_claim": "metadata_key2",
+// ...
+// }
+func extractMetadata(logger log.Logger, allClaims map[string]interface{}, claimMappings map[string]string) (map[string]string, error) {
+ metadata := make(map[string]string)
+ for source, target := range claimMappings {
+ if value := getClaim(logger, allClaims, source); value != nil {
+ strValue, ok := value.(string)
+ if !ok {
+ return nil, fmt.Errorf("error converting claim '%s' to string", source)
+ }
+
+ metadata[target] = strValue
+ }
+ }
+ return metadata, nil
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go
new file mode 100644
index 000000000000..a8b221d261a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go
@@ -0,0 +1,502 @@
+package jwtauth
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "os/exec"
+ "os/signal"
+ "regexp"
+ "runtime"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+)
+
+const defaultMount = "oidc"
+const defaultPort = "8300"
+
+type CLIHandler struct{}
+
+type loginResp struct {
+ secret *api.Secret
+ err error
+}
+
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
+ // handle ctrl-c while waiting for the callback
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, os.Interrupt)
+ defer signal.Stop(ch)
+
+ doneCh := make(chan loginResp)
+
+ mount, ok := m["mount"]
+ if !ok {
+ mount = defaultMount
+ }
+
+ port, ok := m["port"]
+ if !ok {
+ port = defaultPort
+ }
+
+ role := m["role"]
+ if role == "" {
+ return nil, errors.New("a 'role' must be specified")
+ }
+
+ secret, err := fetchAuthURL(c, role, mount, port)
+ if err != nil {
+ return nil, err
+ }
+
+ authURL := secret.Data["auth_url"].(string)
+ if authURL == "" {
+ return nil, errors.New(fmt.Sprintf("Unable to authorize role %q. Check Vault logs for more information.", role))
+ }
+
+ fmt.Fprintf(os.Stderr, "Complete the login via your OIDC provider. Launching browser to:\n\n %s\n\n\n", authURL)
+ if err := openURL(authURL); err != nil {
+ fmt.Fprintf(os.Stderr, "Error attempting to automatically open browser: '%s'.\nPlease visit the authorization URL manually.", err)
+ }
+
+ // Set up callback handler
+ http.HandleFunc(fmt.Sprintf("/v1/auth/%s/oidc/callback", mount), func(w http.ResponseWriter, req *http.Request) {
+ var response string
+
+ query := req.URL.Query()
+ code := query.Get("code")
+ state := query.Get("state")
+ data := map[string][]string{
+ "code": {code},
+ "state": {state},
+ }
+
+ secret, err := c.Logical().ReadWithData(fmt.Sprintf("auth/%s/oidc/callback", mount), data)
+ if err != nil {
+ summary, detail := parseError(err)
+ response = errorHTML(summary, detail)
+ } else {
+ response = successHTML
+ }
+
+ w.Write([]byte(response))
+ doneCh <- loginResp{secret, err}
+ })
+
+ // Start local server
+ go func() {
+ if err := http.ListenAndServe(":"+port, nil); err != nil && err != http.ErrServerClosed {
+ fmt.Fprintf(os.Stderr, "Error listening for callback: %v\n\n", err.Error())
+ }
+ }()
+
+ // Wait for either the callback to finish or SIGINT to be received
+ select {
+ case s := <-doneCh:
+ return s.secret, s.err
+ case <-ch:
+ return nil, errors.New("interrupted")
+ }
+}
+
+func fetchAuthURL(c *api.Client, role, mount, port string) (*api.Secret, error) {
+ data := map[string]interface{}{
+ "role": role,
+ "redirect_uri": fmt.Sprintf("http://localhost:%s/v1/auth/%s/oidc/callback", port, mount),
+ }
+
+ return c.Logical().Write(fmt.Sprintf("auth/%s/oidc/auth_url", mount), data)
+}
+
+// openURL opens the specified URL in the default browser of the user.
+// Source: https://stackoverflow.com/a/39324149/453290
+func openURL(url string) error {
+ var cmd string
+ var args []string
+
+ switch runtime.GOOS {
+ case "windows":
+ cmd = "cmd"
+ args = []string{"/c", "start"}
+ case "darwin":
+ cmd = "open"
+ default: // "linux", "freebsd", "openbsd", "netbsd"
+ cmd = "xdg-open"
+ }
+ args = append(args, url)
+ return exec.Command(cmd, args...).Start()
+}
+
+// parseError converts error from the API into summary and detailed portions.
+func parseError(err error) (string, string) {
+ headers := []string{errNoResponse, errLoginFailed, errTokenVerification}
+ summary := "Login error"
+ detail := ""
+
+ re := regexp.MustCompile(`(?s)Errors:.*\* *(.*)`)
+
+ errorParts := re.FindStringSubmatch(err.Error())
+ switch len(errorParts) {
+ case 0:
+ summary = ""
+ case 1:
+ detail = errorParts[0]
+ case 2:
+ for _, h := range headers {
+ if strings.HasPrefix(errorParts[1], h) {
+ summary = h
+ detail = strings.TrimSpace(errorParts[1][len(h):])
+ break
+ }
+ }
+ if detail == "" {
+ detail = errorParts[1]
+ }
+ }
+
+ return summary, detail
+
+}
+
+func errorHTML(summary, detail string) string {
+ const html = `
+
+
+
+
+
+
+
+HashiCorp Vault
+
+
+
+
+
+
+
+`
+ return fmt.Sprintf(html, summary, detail)
+}
+
+// Help method for OIDC cli
+func (h *CLIHandler) Help() string {
+ help := `
+Usage: vault login -method=oidc [CONFIG K=V...]
+
+ The OIDC auth method allows users to authenticate using an OIDC provider.
+ The provider must be configured as part of a role by the operator.
+
+ Authenticate using role "engineering":
+
+ $ vault login -method=oidc role=engineering
+ Complete the login via your OIDC provider. Launching browser to:
+
+ https://accounts.google.com/o/oauth2/v2/...
+
+ The default browser will be opened for the user to complete the login. Alternatively,
+ the user may visit the provided URL directly.
+
+Configuration:
+
+ role=
+ Vault role of type "OIDC" to use for authentication.
+
+ port=
+ Optional localhost port to use for OIDC callback (default: 8300).
+`
+
+ return strings.TrimSpace(help)
+}
+
+const successHTML = `
+
+
+
+
+
+ Vault Authentication Succeeded
+
+
+
+
+
+
+`
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go
index 5019cbd603de..3fc200df4cc2 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go
@@ -4,6 +4,7 @@ import (
"crypto/tls"
"crypto/x509"
"errors"
+ "fmt"
"net/http"
"context"
@@ -29,19 +30,44 @@ func pathConfig(b *jwtAuthBackend) *framework.Path {
Type: framework.TypeString,
Description: "The CA certificate or chain of certificates, in PEM format, to use to validate conections to the OIDC Discovery URL. If not set, system certificates are used.",
},
+ "oidc_client_id": {
+ Type: framework.TypeString,
+ Description: "The OAuth Client ID configured with your OIDC provider.",
+ },
+ "oidc_client_secret": {
+ Type: framework.TypeString,
+ Description: "The OAuth Client Secret configured with your OIDC provider.",
+ DisplaySensitive: true,
+ },
+ "default_role": {
+ Type: framework.TypeString,
+ Description: "The default role to use if none is provided during login. If not set, a role is required during login.",
+ },
"jwt_validation_pubkeys": {
Type: framework.TypeCommaStringSlice,
Description: `A list of PEM-encoded public keys to use to authenticate signatures locally. Cannot be used with "oidc_discovery_url".`,
},
+ "jwt_supported_algs": {
+ Type: framework.TypeCommaStringSlice,
+ Description: `A list of supported signing algorithms. Defaults to RS256.`,
+ },
"bound_issuer": {
Type: framework.TypeString,
Description: "The value against which to match the 'iss' claim in a JWT. Optional.",
},
},
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathConfigRead,
- logical.UpdateOperation: b.pathConfigWrite,
+ Operations: map[logical.Operation]framework.OperationHandler{
+ logical.ReadOperation: &framework.PathOperation{
+ Callback: b.pathConfigRead,
+ Summary: "Read the current JWT authentication backend configuration.",
+ },
+
+ logical.UpdateOperation: &framework.PathOperation{
+ Callback: b.pathConfigWrite,
+ Summary: "Configure the JWT authentication backend.",
+ Description: confHelpDesc,
+ },
},
HelpSynopsis: confHelpSyn,
@@ -98,7 +124,11 @@ func (b *jwtAuthBackend) pathConfigRead(ctx context.Context, req *logical.Reques
Data: map[string]interface{}{
"oidc_discovery_url": config.OIDCDiscoveryURL,
"oidc_discovery_ca_pem": config.OIDCDiscoveryCAPEM,
+ "oidc_client_id": config.OIDCClientID,
+ "oidc_client_secret": config.OIDCClientSecret,
+ "default_role": config.DefaultRole,
"jwt_validation_pubkeys": config.JWTValidationPubKeys,
+ "jwt_supported_algs": config.JWTSupportedAlgs,
"bound_issuer": config.BoundIssuer,
},
}
@@ -110,7 +140,11 @@ func (b *jwtAuthBackend) pathConfigWrite(ctx context.Context, req *logical.Reque
config := &jwtConfig{
OIDCDiscoveryURL: d.Get("oidc_discovery_url").(string),
OIDCDiscoveryCAPEM: d.Get("oidc_discovery_ca_pem").(string),
+ OIDCClientID: d.Get("oidc_client_id").(string),
+ OIDCClientSecret: d.Get("oidc_client_secret").(string),
+ DefaultRole: d.Get("default_role").(string),
JWTValidationPubKeys: d.Get("jwt_validation_pubkeys").([]string),
+ JWTSupportedAlgs: d.Get("jwt_supported_algs").([]string),
BoundIssuer: d.Get("bound_issuer").(string),
}
@@ -120,12 +154,19 @@ func (b *jwtAuthBackend) pathConfigWrite(ctx context.Context, req *logical.Reque
config.OIDCDiscoveryURL != "" && len(config.JWTValidationPubKeys) != 0:
return logical.ErrorResponse("exactly one of 'oidc_discovery_url' and 'jwt_validation_pubkeys' must be set"), nil
+ case config.OIDCClientID != "" && config.OIDCClientSecret == "",
+ config.OIDCClientID == "" && config.OIDCClientSecret != "":
+ return logical.ErrorResponse("both 'oidc_client_id' and 'oidc_client_secret' must be set for OIDC"), nil
+
case config.OIDCDiscoveryURL != "":
_, err := b.createProvider(config)
if err != nil {
return logical.ErrorResponse(errwrap.Wrapf("error checking discovery URL: {{err}}", err).Error()), nil
}
+ case config.OIDCClientID != "" && config.OIDCDiscoveryURL == "":
+ return logical.ErrorResponse("'oidc_discovery_url' must be set for OIDC"), nil
+
case len(config.JWTValidationPubKeys) != 0:
for _, v := range config.JWTValidationPubKeys {
if _, err := certutil.ParsePublicKeyPEM([]byte(v)); err != nil {
@@ -137,6 +178,14 @@ func (b *jwtAuthBackend) pathConfigWrite(ctx context.Context, req *logical.Reque
return nil, errors.New("unknown condition")
}
+ for _, a := range config.JWTSupportedAlgs {
+ switch a {
+ case oidc.RS256, oidc.RS384, oidc.RS512, oidc.ES256, oidc.ES384, oidc.ES512, oidc.PS256, oidc.PS384, oidc.PS512:
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("Invalid supported algorithm: %s", a)), nil
+ }
+ }
+
entry, err := logical.StorageEntryJSON(configPath, config)
if err != nil {
return nil, err
@@ -181,8 +230,12 @@ func (b *jwtAuthBackend) createProvider(config *jwtConfig) (*oidc.Provider, erro
type jwtConfig struct {
OIDCDiscoveryURL string `json:"oidc_discovery_url"`
OIDCDiscoveryCAPEM string `json:"oidc_discovery_ca_pem"`
+ OIDCClientID string `json:"oidc_client_id"`
+ OIDCClientSecret string `json:"oidc_client_secret"`
JWTValidationPubKeys []string `json:"jwt_validation_pubkeys"`
+ JWTSupportedAlgs []string `json:"jwt_supported_algs"`
BoundIssuer string `json:"bound_issuer"`
+ DefaultRole string `json:"default_role"`
ParsedJWTPubKeys []interface{} `json:"-"`
}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_login.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_login.go
index 868fe9d094e4..70d7e943ad30 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_login.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_login.go
@@ -29,9 +29,14 @@ func pathLogin(b *jwtAuthBackend) *framework.Path {
},
},
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLogin,
- logical.AliasLookaheadOperation: b.pathLogin,
+ Operations: map[logical.Operation]framework.OperationHandler{
+ logical.UpdateOperation: &framework.PathOperation{
+ Callback: b.pathLogin,
+ Summary: pathLoginHelpSyn,
+ },
+ logical.AliasLookaheadOperation: &framework.PathOperation{
+ Callback: b.pathLogin,
+ },
},
HelpSynopsis: pathLoginHelpSyn,
@@ -40,13 +45,19 @@ func pathLogin(b *jwtAuthBackend) *framework.Path {
}
func (b *jwtAuthBackend) pathLogin(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- token := d.Get("jwt").(string)
- if len(token) == 0 {
- return logical.ErrorResponse("missing token"), nil
+ config, err := b.config(ctx, req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if config == nil {
+ return logical.ErrorResponse("could not load configuration"), nil
}
roleName := d.Get("role").(string)
- if len(roleName) == 0 {
+ if roleName == "" {
+ roleName = config.DefaultRole
+ }
+ if roleName == "" {
return logical.ErrorResponse("missing role"), nil
}
@@ -55,19 +66,16 @@ func (b *jwtAuthBackend) pathLogin(ctx context.Context, req *logical.Request, d
return nil, err
}
if role == nil {
- return logical.ErrorResponse("role could not be found"), nil
+ return logical.ErrorResponse("role %q could not be found", roleName), nil
}
- if req.Connection != nil && !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, role.BoundCIDRs) {
- return logical.ErrorResponse("request originated from invalid CIDR"), nil
+ token := d.Get("jwt").(string)
+ if len(token) == 0 {
+ return logical.ErrorResponse("missing token"), nil
}
- config, err := b.config(ctx, req.Storage)
- if err != nil {
- return nil, err
- }
- if config == nil {
- return logical.ErrorResponse("could not load configuration"), nil
+ if req.Connection != nil && !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, role.BoundCIDRs) {
+ return logical.ErrorResponse("request originated from invalid CIDR"), nil
}
// Here is where things diverge. If it is using OIDC Discovery, validate
@@ -130,118 +138,37 @@ func (b *jwtAuthBackend) pathLogin(ctx context.Context, req *logical.Request, d
}
case config.OIDCDiscoveryURL != "":
- provider, err := b.getProvider(ctx, config)
+ allClaims, err = b.verifyToken(ctx, config, role, token)
if err != nil {
- return nil, errwrap.Wrapf("error getting provider for login operation: {{err}}", err)
- }
-
- verifier := provider.Verifier(&oidc.Config{
- SkipClientIDCheck: true,
- })
-
- idToken, err := verifier.Verify(ctx, token)
- if err != nil {
- return logical.ErrorResponse(errwrap.Wrapf("error validating signature: {{err}}", err).Error()), nil
- }
-
- if err := idToken.Claims(&allClaims); err != nil {
- return logical.ErrorResponse(errwrap.Wrapf("unable to successfully parse all claims from token: {{err}}", err).Error()), nil
- }
-
- if role.BoundSubject != "" && role.BoundSubject != idToken.Subject {
- return logical.ErrorResponse("sub claim does not match bound subject"), nil
- }
- if len(role.BoundAudiences) != 0 {
- var found bool
- for _, v := range role.BoundAudiences {
- if strutil.StrListContains(idToken.Audience, v) {
- found = true
- break
- }
- }
- if !found {
- return logical.ErrorResponse("aud claim does not match any bound audience"), nil
- }
+ return logical.ErrorResponse(err.Error()), nil
}
default:
return nil, errors.New("unhandled case during login")
}
- userClaimRaw, ok := allClaims[role.UserClaim]
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf("%q claim not found in token", role.UserClaim)), nil
- }
- userName, ok := userClaimRaw.(string)
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf("%q claim could not be converted to string", role.UserClaim)), nil
+ alias, groupAliases, err := b.createIdentity(allClaims, role)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
}
- var groupAliases []*logical.Alias
- if role.GroupsClaim != "" {
- mapPath, err := parseClaimWithDelimiters(role.GroupsClaim, role.GroupsClaimDelimiterPattern)
- if err != nil {
- return logical.ErrorResponse(errwrap.Wrapf("error parsing delimiters for groups claim: {{err}}", err).Error()), nil
- }
- if len(mapPath) < 1 {
- return logical.ErrorResponse("unexpected length 0 of claims path after parsing groups claim against delimiters"), nil
- }
- var claimKey string
- claimMap := allClaims
- for i, key := range mapPath {
- if i == len(mapPath)-1 {
- claimKey = key
- break
- }
- nextMapRaw, ok := claimMap[key]
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf("map via key %q not found while navigating group claim delimiters", key)), nil
- }
- nextMap, ok := nextMapRaw.(map[string]interface{})
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf("key %q does not reference a map while navigating group claim delimiters", key)), nil
- }
- claimMap = nextMap
- }
-
- groupsClaimRaw, ok := claimMap[claimKey]
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf("%q claim not found in token", role.GroupsClaim)), nil
- }
- groups, ok := groupsClaimRaw.([]interface{})
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf("%q claim could not be converted to string list", role.GroupsClaim)), nil
- }
- for _, groupRaw := range groups {
- group, ok := groupRaw.(string)
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf("value %v in groups claim could not be parsed as string", groupRaw)), nil
- }
- if group == "" {
- continue
- }
- groupAliases = append(groupAliases, &logical.Alias{
- Name: group,
- })
- }
+ tokenMetadata := map[string]string{"role": roleName}
+ for k, v := range alias.Metadata {
+ tokenMetadata[k] = v
}
resp := &logical.Response{
Auth: &logical.Auth{
- Policies: role.Policies,
- DisplayName: userName,
- Period: role.Period,
- NumUses: role.NumUses,
- Alias: &logical.Alias{
- Name: userName,
- },
+ Policies: role.Policies,
+ DisplayName: alias.Name,
+ Period: role.Period,
+ NumUses: role.NumUses,
+ Alias: alias,
GroupAliases: groupAliases,
InternalData: map[string]interface{}{
"role": roleName,
},
- Metadata: map[string]string{
- "role": roleName,
- },
+ Metadata: tokenMetadata,
LeaseOptions: logical.LeaseOptions{
Renewable: true,
TTL: role.TTL,
@@ -276,6 +203,120 @@ func (b *jwtAuthBackend) pathLoginRenew(ctx context.Context, req *logical.Reques
return resp, nil
}
+func (b *jwtAuthBackend) verifyToken(ctx context.Context, config *jwtConfig, role *jwtRole, rawToken string) (map[string]interface{}, error) {
+ allClaims := make(map[string]interface{})
+
+ provider, err := b.getProvider(ctx, config)
+ if err != nil {
+ return nil, errwrap.Wrapf("error getting provider for login operation: {{err}}", err)
+ }
+
+ oidcConfig := &oidc.Config{
+ SupportedSigningAlgs: config.JWTSupportedAlgs,
+ }
+
+ if role.RoleType == "oidc" {
+ oidcConfig.ClientID = config.OIDCClientID
+ } else {
+ oidcConfig.SkipClientIDCheck = true
+ }
+ verifier := provider.Verifier(oidcConfig)
+
+ idToken, err := verifier.Verify(ctx, rawToken)
+ if err != nil {
+ return nil, errwrap.Wrapf("error validating signature: {{err}}", err)
+ }
+
+ if err := idToken.Claims(&allClaims); err != nil {
+ return nil, errwrap.Wrapf("unable to successfully parse all claims from token: {{err}}", err)
+ }
+
+ if role.BoundSubject != "" && role.BoundSubject != idToken.Subject {
+ return nil, errors.New("sub claim does not match bound subject")
+ }
+ if len(role.BoundAudiences) > 0 {
+ var found bool
+ for _, v := range role.BoundAudiences {
+ if strutil.StrListContains(idToken.Audience, v) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil, errors.New("aud claim does not match any bound audience")
+ }
+ }
+
+ if len(role.BoundClaims) > 0 {
+ for claim, expValue := range role.BoundClaims {
+ actValue := getClaim(b.Logger(), allClaims, claim)
+ if actValue == nil {
+ return nil, fmt.Errorf("claim is missing: %s", claim)
+ }
+
+ if expValue != actValue {
+ return nil, fmt.Errorf("claim '%s' does not match associated bound claim", claim)
+ }
+ }
+ }
+
+ return allClaims, nil
+}
+
+// createIdentity creates an alias and set of groups aliass based on the role
+// definition and received claims.
+func (b *jwtAuthBackend) createIdentity(allClaims map[string]interface{}, role *jwtRole) (*logical.Alias, []*logical.Alias, error) {
+ userClaimRaw, ok := allClaims[role.UserClaim]
+ if !ok {
+ return nil, nil, fmt.Errorf("claim %q not found in token", role.UserClaim)
+ }
+ userName, ok := userClaimRaw.(string)
+ if !ok {
+ return nil, nil, fmt.Errorf("claim %q could not be converted to string", role.UserClaim)
+ }
+
+ metadata, err := extractMetadata(b.Logger(), allClaims, role.ClaimMappings)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ alias := &logical.Alias{
+ Name: userName,
+ Metadata: metadata,
+ }
+
+ var groupAliases []*logical.Alias
+
+ if role.GroupsClaim == "" {
+ return alias, groupAliases, nil
+ }
+
+ groupsClaimRaw := getClaim(b.Logger(), allClaims, role.GroupsClaim)
+
+ if groupsClaimRaw == nil {
+ return nil, nil, fmt.Errorf("%q claim not found in token", role.GroupsClaim)
+ }
+ groups, ok := groupsClaimRaw.([]interface{})
+
+ if !ok {
+ return nil, nil, fmt.Errorf("%q claim could not be converted to string list", role.GroupsClaim)
+ }
+ for _, groupRaw := range groups {
+ group, ok := groupRaw.(string)
+ if !ok {
+ return nil, nil, fmt.Errorf("value %v in groups claim could not be parsed as string", groupRaw)
+ }
+ if group == "" {
+ continue
+ }
+ groupAliases = append(groupAliases, &logical.Alias{
+ Name: group,
+ })
+ }
+
+ return alias, groupAliases, nil
+}
+
const (
pathLoginHelpSyn = `
Authenticates to Vault using a JWT (or OIDC) token.
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_oidc.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_oidc.go
new file mode 100644
index 000000000000..b437ed6e5474
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_oidc.go
@@ -0,0 +1,303 @@
+package jwtauth
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ oidc "github.com/coreos/go-oidc"
+ "github.com/hashicorp/errwrap"
+ uuid "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "golang.org/x/oauth2"
+)
+
+var oidcStateTimeout = 10 * time.Minute
+
+// OIDC error prefixes. This are searched for specifically by the UI, so any
+// changes to them must be aligned with a UI change.
+const errLoginFailed = "Vault login failed."
+const errNoResponse = "No response from provider."
+const errTokenVerification = "Token verification failed."
+
+// oidcState is created when an authURL is requested. The state identifier is
+// passed throughout the OAuth process.
+type oidcState struct {
+ rolename string
+ nonce string
+ redirectURI string
+}
+
+func pathOIDC(b *jwtAuthBackend) []*framework.Path {
+ return []*framework.Path{
+ {
+ Pattern: `oidc/callback`,
+ Fields: map[string]*framework.FieldSchema{
+ "state": {
+ Type: framework.TypeString,
+ },
+ "code": {
+ Type: framework.TypeString,
+ },
+ },
+
+ Operations: map[logical.Operation]framework.OperationHandler{
+ logical.ReadOperation: &framework.PathOperation{
+ Callback: b.pathCallback,
+ Summary: "Callback endpoint to complete an OIDC login.",
+ },
+ },
+ },
+ {
+ Pattern: `oidc/auth_url`,
+ Fields: map[string]*framework.FieldSchema{
+ "role": {
+ Type: framework.TypeLowerCaseString,
+ Description: "The role to issue an OIDC authorization URL against.",
+ },
+ "redirect_uri": {
+ Type: framework.TypeString,
+ Description: "The OAuth redirect_uri to use in the authorization URL.",
+ },
+ },
+ Operations: map[logical.Operation]framework.OperationHandler{
+ logical.UpdateOperation: &framework.PathOperation{
+ Callback: b.authURL,
+ Summary: "Request an authorization URL to start an OIDC login flow.",
+ },
+ },
+ },
+ }
+}
+
+func (b *jwtAuthBackend) pathCallback(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ state := b.verifyState(d.Get("state").(string))
+ if state == nil {
+ return logical.ErrorResponse(errLoginFailed + " Expired or missing OAuth state."), nil
+ }
+
+ roleName := state.rolename
+ role, err := b.role(ctx, req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(errLoginFailed + " Role could not be found"), nil
+ }
+
+ config, err := b.config(ctx, req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if config == nil {
+ return logical.ErrorResponse(errLoginFailed + " Could not load configuration"), nil
+ }
+
+ provider, err := b.getProvider(ctx, config)
+ if err != nil {
+ return nil, errwrap.Wrapf(errLoginFailed+" Error getting provider for login operation: {{err}}", err)
+ }
+
+ var oauth2Config = oauth2.Config{
+ ClientID: config.OIDCClientID,
+ ClientSecret: config.OIDCClientSecret,
+ RedirectURL: state.redirectURI,
+ Endpoint: provider.Endpoint(),
+ Scopes: []string{oidc.ScopeOpenID},
+ }
+
+ code := d.Get("code").(string)
+ if code == "" {
+ return logical.ErrorResponse(errLoginFailed + " OAuth code parameter not provided"), nil
+ }
+
+ oauth2Token, err := oauth2Config.Exchange(ctx, code)
+ if err != nil {
+ return logical.ErrorResponse(errLoginFailed+" Error exchanging oidc code: %q.", err.Error()), nil
+ }
+
+ // Extract the ID Token from OAuth2 token.
+ rawToken, ok := oauth2Token.Extra("id_token").(string)
+ if !ok {
+ return logical.ErrorResponse(errTokenVerification + " No id_token found in response."), nil
+ }
+
+ // Parse and verify ID Token payload.
+ allClaims, err := b.verifyToken(ctx, config, role, rawToken)
+ if err != nil {
+ return logical.ErrorResponse("%s %s", errTokenVerification, err.Error()), nil
+ }
+
+ // Attempt to fetch information from the /userinfo endpoint and merge it with
+ // the existing claims data. A failure to fetch additional information from this
+ // endpoint will not invalidate the authorization flow.
+ if userinfo, err := provider.UserInfo(ctx, oauth2.StaticTokenSource(oauth2Token)); err == nil {
+ _ = userinfo.Claims(&allClaims)
+ } else {
+ logFunc := b.Logger().Warn
+ if strings.Contains(err.Error(), "user info endpoint is not supported") {
+ logFunc = b.Logger().Info
+ }
+ logFunc("error reading /userinfo endpoint", "error", err)
+ }
+
+ if allClaims["nonce"] != state.nonce {
+ return logical.ErrorResponse(errTokenVerification + " Invalid ID token nonce."), nil
+ }
+ delete(allClaims, "nonce")
+
+ alias, groupAliases, err := b.createIdentity(allClaims, role)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
+ tokenMetadata := map[string]string{"role": roleName}
+ for k, v := range alias.Metadata {
+ tokenMetadata[k] = v
+ }
+
+ resp := &logical.Response{
+ Auth: &logical.Auth{
+ Policies: role.Policies,
+ DisplayName: alias.Name,
+ Period: role.Period,
+ NumUses: role.NumUses,
+ Alias: alias,
+ GroupAliases: groupAliases,
+ InternalData: map[string]interface{}{
+ "role": roleName,
+ },
+ Metadata: tokenMetadata,
+ LeaseOptions: logical.LeaseOptions{
+ Renewable: true,
+ TTL: role.TTL,
+ MaxTTL: role.MaxTTL,
+ },
+ BoundCIDRs: role.BoundCIDRs,
+ },
+ }
+
+ return resp, nil
+}
+
+// authURL returns a URL used for redirection to receive an authorization code.
+// This path requires a role name, or that a default_role has been configured.
+// Because this endpoint is unauthenticated, the response to invalid or non-OIDC
+// roles is intentionally non-descriptive and will simply be an empty string.
+func (b *jwtAuthBackend) authURL(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ logger := b.Logger()
+
+ // default response for most error/invalid conditions
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "auth_url": "",
+ },
+ }
+
+ config, err := b.config(ctx, req.Storage)
+ if err != nil {
+ logger.Warn("error loading configuration", "error", err)
+ return resp, nil
+ }
+
+ if config == nil {
+ logger.Warn("nil configuration")
+ return resp, nil
+ }
+
+ roleName := d.Get("role").(string)
+ if roleName == "" {
+ roleName = config.DefaultRole
+ if roleName == "" {
+ return logical.ErrorResponse("missing role"), nil
+ }
+ }
+
+ redirectURI := d.Get("redirect_uri").(string)
+ if redirectURI == "" {
+ return logical.ErrorResponse("missing redirect_uri"), nil
+ }
+
+ role, err := b.role(ctx, req.Storage, roleName)
+ if err != nil {
+ logger.Warn("error loading role", "error", err)
+ return resp, nil
+ }
+
+ if role == nil || role.RoleType != "oidc" {
+ logger.Warn("invalid role type", "role type", role)
+ return resp, nil
+ }
+
+ if !strutil.StrListContains(role.AllowedRedirectURIs, redirectURI) {
+ logger.Warn("unauthorized redirect_uri", "redirect_uri", redirectURI)
+ return resp, nil
+ }
+
+ provider, err := b.getProvider(ctx, config)
+ if err != nil {
+ logger.Warn("error getting provider for login operation", "error", err)
+ return resp, nil
+ }
+
+ // "openid" is a required scope for OpenID Connect flows
+ scopes := append([]string{oidc.ScopeOpenID}, role.OIDCScopes...)
+
+ // Configure an OpenID Connect aware OAuth2 client
+ oauth2Config := oauth2.Config{
+ ClientID: config.OIDCClientID,
+ ClientSecret: config.OIDCClientSecret,
+ RedirectURL: redirectURI,
+ Endpoint: provider.Endpoint(),
+ Scopes: scopes,
+ }
+
+ stateID, nonce, err := b.createState(roleName, redirectURI)
+ if err != nil {
+ logger.Warn("error generating OAuth state", "error", err)
+ return resp, nil
+ }
+
+ resp.Data["auth_url"] = oauth2Config.AuthCodeURL(stateID, oidc.Nonce(nonce))
+
+ return resp, nil
+}
+
+// createState make an expiring state object, associated with a random state ID
+// that is passed throughout the OAuth process. A nonce is also included in the
+// auth process, and for simplicity will be identical in length/format as the state ID.
+func (b *jwtAuthBackend) createState(rolename, redirectURI string) (string, string, error) {
+ // Get enough bytes for 2 160-bit IDs (per rfc6749#section-10.10)
+ bytes, err := uuid.GenerateRandomBytes(2 * 20)
+ if err != nil {
+ return "", "", err
+ }
+
+ stateID := fmt.Sprintf("%x", bytes[:20])
+ nonce := fmt.Sprintf("%x", bytes[20:])
+
+ b.oidcStates.SetDefault(stateID, &oidcState{
+ rolename: rolename,
+ nonce: nonce,
+ redirectURI: redirectURI,
+ })
+
+ return stateID, nonce, nil
+}
+
+// verifyState tests whether the provided state ID is valid and returns the
+// associated state object if so. A nil state is returned if the ID is not found
+// or expired. The state should only ever be retrieved once and is deleted as
+// part of this request.
+func (b *jwtAuthBackend) verifyState(stateID string) *oidcState {
+ defer b.oidcStates.Delete(stateID)
+
+ if stateRaw, ok := b.oidcStates.Get(stateID); ok {
+ return stateRaw.(*oidcState)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_role.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_role.go
index 9ca1fec415f8..6eb701601c4e 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_role.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_role.go
@@ -7,19 +7,25 @@ import (
"strings"
"time"
- "github.com/hashicorp/errwrap"
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/policyutil"
+ "github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
+var reservedMetadata = []string{"role"}
+
func pathRoleList(b *jwtAuthBackend) *framework.Path {
return &framework.Path{
Pattern: "role/?",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
+ Operations: map[logical.Operation]framework.OperationHandler{
+ logical.ListOperation: &framework.PathOperation{
+ Callback: b.pathRoleList,
+ Summary: strings.TrimSpace(roleHelp["role-list"][0]),
+ Description: strings.TrimSpace(roleHelp["role-list"][1]),
+ },
},
HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]),
HelpDescription: strings.TrimSpace(roleHelp["role-list"][1]),
@@ -35,6 +41,10 @@ func pathRole(b *jwtAuthBackend) *framework.Path {
Type: framework.TypeLowerCaseString,
Description: "Name of the role.",
},
+ "role_type": {
+ Type: framework.TypeString,
+ Description: "Type of the role, either 'jwt' or 'oidc'.",
+ },
"policies": {
Type: framework.TypeCommaStringSlice,
Description: "List of policies on the role.",
@@ -68,6 +78,14 @@ TTL will be set to the value of this parameter.`,
Type: framework.TypeCommaStringSlice,
Description: `Comma-separated list of 'aud' claims that are valid for login; any match is sufficient`,
},
+ "bound_claims": {
+ Type: framework.TypeMap,
+ Description: `Map of claims/values which must match for login`,
+ },
+ "claim_mappings": {
+ Type: framework.TypeKVPairs,
+ Description: `Mappings of claims (key) that will be copied to a metadata field (value)`,
+ },
"user_claim": {
Type: framework.TypeString,
Description: `The claim to use for the Identity entity alias name`,
@@ -76,22 +94,43 @@ TTL will be set to the value of this parameter.`,
Type: framework.TypeString,
Description: `The claim to use for the Identity group alias names`,
},
- "groups_claim_delimiter_pattern": {
- Type: framework.TypeString,
- Description: `A pattern of delimiters used to allow the groups_claim to live outside of the top-level JWT structure. For instance, a "groups_claim" of "meta/user.name/groups" with this field set to "//" will expect nested structures named "meta", "user.name", and "groups". If this field was set to "/./" the groups information would expect to be via nested structures of "meta", "user", "name", and "groups".`,
- },
"bound_cidrs": {
Type: framework.TypeCommaStringSlice,
Description: `Comma-separated list of IP CIDRS that are allowed to
authenticate against this role`,
},
+ "oidc_scopes": {
+ Type: framework.TypeCommaStringSlice,
+ Description: `Comma-separated list of OIDC scopes`,
+ },
+ "allowed_redirect_uris": {
+ Type: framework.TypeCommaStringSlice,
+ Description: `Comma-separated list of allowed values for redirect_uri`,
+ },
},
ExistenceCheck: b.pathRoleExistenceCheck,
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.CreateOperation: b.pathRoleCreateUpdate,
- logical.UpdateOperation: b.pathRoleCreateUpdate,
- logical.ReadOperation: b.pathRoleRead,
- logical.DeleteOperation: b.pathRoleDelete,
+ Operations: map[logical.Operation]framework.OperationHandler{
+ logical.ReadOperation: &framework.PathOperation{
+ Callback: b.pathRoleRead,
+ Summary: "Read an existing role.",
+ },
+
+ logical.UpdateOperation: &framework.PathOperation{
+ Callback: b.pathRoleCreateUpdate,
+ Summary: strings.TrimSpace(roleHelp["role"][0]),
+ Description: strings.TrimSpace(roleHelp["role"][1]),
+ },
+
+ logical.CreateOperation: &framework.PathOperation{
+ Callback: b.pathRoleCreateUpdate,
+ Summary: strings.TrimSpace(roleHelp["role"][0]),
+ Description: strings.TrimSpace(roleHelp["role"][1]),
+ },
+
+ logical.DeleteOperation: &framework.PathOperation{
+ Callback: b.pathRoleDelete,
+ Summary: "Delete an existing role.",
+ },
},
HelpSynopsis: strings.TrimSpace(roleHelp["role"][0]),
HelpDescription: strings.TrimSpace(roleHelp["role"][1]),
@@ -99,6 +138,8 @@ authenticate against this role`,
}
type jwtRole struct {
+ RoleType string `json:"role_type"`
+
// Policies that are to be required by the token to access this role
Policies []string `json:"policies"`
@@ -119,12 +160,15 @@ type jwtRole struct {
Period time.Duration `json:"period"`
// Role binding properties
- BoundAudiences []string `json:"bound_audiences"`
- BoundSubject string `json:"bound_subject"`
- BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"`
- UserClaim string `json:"user_claim"`
- GroupsClaim string `json:"groups_claim"`
- GroupsClaimDelimiterPattern string `json:"groups_claim_delimiter_pattern"`
+ BoundAudiences []string `json:"bound_audiences"`
+ BoundSubject string `json:"bound_subject"`
+ BoundClaims map[string]interface{} `json:"bound_claims"`
+ ClaimMappings map[string]string `json:"claim_mappings"`
+ BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"`
+ UserClaim string `json:"user_claim"`
+ GroupsClaim string `json:"groups_claim"`
+ OIDCScopes []string `json:"oidc_scopes"`
+ AllowedRedirectURIs []string `json:"allowed_redirect_uris"`
}
// role takes a storage backend and the name and returns the role's storage
@@ -182,17 +226,20 @@ func (b *jwtAuthBackend) pathRoleRead(ctx context.Context, req *logical.Request,
// Create a map of data to be returned
resp := &logical.Response{
Data: map[string]interface{}{
- "policies": role.Policies,
- "num_uses": role.NumUses,
- "period": int64(role.Period.Seconds()),
- "ttl": int64(role.TTL.Seconds()),
- "max_ttl": int64(role.MaxTTL.Seconds()),
- "bound_audiences": role.BoundAudiences,
- "bound_subject": role.BoundSubject,
- "bound_cidrs": role.BoundCIDRs,
- "user_claim": role.UserClaim,
- "groups_claim": role.GroupsClaim,
- "groups_claim_delimiter_pattern": role.GroupsClaimDelimiterPattern,
+ "role_type": role.RoleType,
+ "policies": role.Policies,
+ "num_uses": role.NumUses,
+ "period": int64(role.Period.Seconds()),
+ "ttl": int64(role.TTL.Seconds()),
+ "max_ttl": int64(role.MaxTTL.Seconds()),
+ "bound_audiences": role.BoundAudiences,
+ "bound_subject": role.BoundSubject,
+ "bound_cidrs": role.BoundCIDRs,
+ "bound_claims": role.BoundClaims,
+ "claim_mappings": role.ClaimMappings,
+ "user_claim": role.UserClaim,
+ "groups_claim": role.GroupsClaim,
+ "allowed_redirect_uris": role.AllowedRedirectURIs,
},
}
@@ -236,6 +283,15 @@ func (b *jwtAuthBackend) pathRoleCreateUpdate(ctx context.Context, req *logical.
role = new(jwtRole)
}
+ roleType := data.Get("role_type").(string)
+ if roleType == "" {
+ roleType = "jwt"
+ }
+ if roleType != "jwt" && roleType != "oidc" {
+ return logical.ErrorResponse("invalid 'role_type': %s", roleType), nil
+ }
+ role.RoleType = roleType
+
if policiesRaw, ok := data.GetOk("policies"); ok {
role.Policies = policyutil.ParsePolicies(policiesRaw)
}
@@ -287,6 +343,29 @@ func (b *jwtAuthBackend) pathRoleCreateUpdate(ctx context.Context, req *logical.
role.BoundCIDRs = parsedCIDRs
}
+ if boundClaimsRaw, ok := data.GetOk("bound_claims"); ok {
+ role.BoundClaims = boundClaimsRaw.(map[string]interface{})
+ }
+
+ if claimMappingsRaw, ok := data.GetOk("claim_mappings"); ok {
+ claimMappings := claimMappingsRaw.(map[string]string)
+
+ // sanity check mappings for duplicates and collision with reserved names
+ targets := make(map[string]bool)
+ for _, metadataKey := range claimMappings {
+ if strutil.StrListContains(reservedMetadata, metadataKey) {
+ return logical.ErrorResponse("metadata key '%s' is reserved and may not be a mapping destination", metadataKey), nil
+ }
+
+ if targets[metadataKey] {
+ return logical.ErrorResponse("multiple keys are mapped to metadata key '%s'", metadataKey), nil
+ }
+ targets[metadataKey] = true
+ }
+
+ role.ClaimMappings = claimMappings
+ }
+
if userClaim, ok := data.GetOk("user_claim"); ok {
role.UserClaim = userClaim.(string)
}
@@ -298,21 +377,24 @@ func (b *jwtAuthBackend) pathRoleCreateUpdate(ctx context.Context, req *logical.
role.GroupsClaim = groupsClaim.(string)
}
- if groupsClaimDelimiterPattern, ok := data.GetOk("groups_claim_delimiter_pattern"); ok {
- role.GroupsClaimDelimiterPattern = groupsClaimDelimiterPattern.(string)
+ if oidcScopes, ok := data.GetOk("oidc_scopes"); ok {
+ role.OIDCScopes = oidcScopes.([]string)
}
- // Validate claim/delims
- if role.GroupsClaim != "" {
- if _, err := parseClaimWithDelimiters(role.GroupsClaim, role.GroupsClaimDelimiterPattern); err != nil {
- return logical.ErrorResponse(errwrap.Wrapf("error validating delimiters for groups claim: {{err}}", err).Error()), nil
- }
+ allowedRedirectURIs := data.Get("allowed_redirect_uris").([]string)
+ if roleType == "oidc" && len(allowedRedirectURIs) == 0 {
+ return logical.ErrorResponse("'allowed_redirect_uris' must be set"), nil
}
+ role.AllowedRedirectURIs = allowedRedirectURIs
- if len(role.BoundAudiences) == 0 &&
- len(role.BoundCIDRs) == 0 &&
- role.BoundSubject == "" {
- return logical.ErrorResponse("must have at least one bound constraint when creating/updating a role"), nil
+ // OIDC verifcation will enforce that the audience match the configured client_id.
+ // For other methods, require at least one bound constraint.
+ if roleType != "oidc" {
+ if len(role.BoundAudiences) == 0 &&
+ len(role.BoundCIDRs) == 0 &&
+ role.BoundSubject == "" {
+ return logical.ErrorResponse("must have at least one bound constraint when creating/updating a role"), nil
+ }
}
// Check that the TTL value provided is less than the MaxTTL.
@@ -340,32 +422,6 @@ func (b *jwtAuthBackend) pathRoleCreateUpdate(ctx context.Context, req *logical.
return resp, nil
}
-// parseClaimWithDelimiters parses a given claim string and ensures that we can
-// separate it out into a "map path"
-func parseClaimWithDelimiters(claim, delimiters string) ([]string, error) {
- if delimiters == "" {
- return []string{claim}, nil
- }
- var ret []string
- for _, runeVal := range delimiters {
- idx := strings.IndexRune(claim, runeVal)
- switch idx {
- case -1:
- return nil, fmt.Errorf("could not find instance of %q delimiter in claim", string(runeVal))
- case 0:
- return nil, fmt.Errorf("instance of %q delimiter in claim is at beginning of claim string", string(runeVal))
- case len(claim) - 1:
- return nil, fmt.Errorf("instance of %q delimiter in claim is at end of claim string", string(runeVal))
- default:
- ret = append(ret, claim[:idx])
- claim = claim[idx+1:]
- }
- }
- ret = append(ret, claim)
-
- return ret, nil
-}
-
// roleStorageEntry stores all the options that are set on an role
var roleHelp = map[string][2]string{
"role-list": {
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_ui.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_ui.go
new file mode 100644
index 000000000000..7930f9ef8be5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_ui.go
@@ -0,0 +1,37 @@
+// A throwaway file for super simple testing via a UI
+package jwtauth
+
+import (
+ "context"
+ "io/ioutil"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathUI(b *jwtAuthBackend) *framework.Path {
+ return &framework.Path{
+ Pattern: `ui$`,
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathUI,
+ },
+ }
+}
+
+func (b *jwtAuthBackend) pathUI(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ data, err := ioutil.ReadFile("test_ui.html")
+ if err != nil {
+ panic(err)
+ }
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ logical.HTTPStatusCode: 200,
+ logical.HTTPRawBody: string(data),
+ logical.HTTPContentType: "text/html",
+ },
+ }
+
+ return resp, nil
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/test_ui.html b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/test_ui.html
new file mode 100644
index 000000000000..dd6502ed73f1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/test_ui.html
@@ -0,0 +1,37 @@
+
+
+
+Role:
+
+Login
+
+
+
+
+
diff --git a/vendor/github.com/mitchellh/pointerstructure/README.md b/vendor/github.com/mitchellh/pointerstructure/README.md
new file mode 100644
index 000000000000..13e3358557f3
--- /dev/null
+++ b/vendor/github.com/mitchellh/pointerstructure/README.md
@@ -0,0 +1,74 @@
+# pointerstructure [![GoDoc](https://godoc.org/github.com/mitchellh/pointerstructure?status.svg)](https://godoc.org/github.com/mitchellh/pointerstructure)
+
+pointerstructure is a Go library for identifying a specific value within
+any Go structure using a string syntax.
+
+pointerstructure is based on
+[JSON Pointer (RFC 6901)](https://tools.ietf.org/html/rfc6901), but
+reimplemented for Go.
+
+The goal of pointerstructure is to provide a single, well-known format
+for addressing a specific value. This can be useful for user provided
+input on structures, diffs of structures, etc.
+
+## Features
+
+ * Get the value for an address
+
+ * Set the value for an address within an existing structure
+
+ * Delete the value at an address
+
+ * Sorting a list of addresses
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/pointerstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/pointerstructure).
+
+A quick code example is shown below:
+
+```go
+complex := map[string]interface{}{
+ "alice": 42,
+ "bob": []interface{}{
+ map[string]interface{}{
+ "name": "Bob",
+ },
+ },
+}
+
+value, err := pointerstructure.Get(complex, "/bob/0/name")
+if err != nil {
+ panic(err)
+}
+
+fmt.Printf("%s", value)
+// Output:
+// Bob
+```
+
+Continuing the example above, you can also set values:
+
+```go
+value, err = pointerstructure.Set(complex, "/bob/0/name", "Alice")
+if err != nil {
+ panic(err)
+}
+
+value, err = pointerstructure.Get(complex, "/bob/0/name")
+if err != nil {
+ panic(err)
+}
+
+fmt.Printf("%s", value)
+// Output:
+// Alice
+```
diff --git a/vendor/github.com/mitchellh/pointerstructure/delete.go b/vendor/github.com/mitchellh/pointerstructure/delete.go
new file mode 100644
index 000000000000..5ed6b4bffc39
--- /dev/null
+++ b/vendor/github.com/mitchellh/pointerstructure/delete.go
@@ -0,0 +1,112 @@
+package pointerstructure
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Delete deletes the value specified by the pointer p in structure s.
+//
+// When deleting a slice index, all other elements will be shifted to
+// the left. This is specified in RFC6902 (JSON Patch) and not RFC6901 since
+// RFC6901 doesn't specify operations on pointers. If you don't want to
+// shift elements, you should use Set to set the slice index to the zero value.
+//
+// The structures s must have non-zero values set up to this pointer.
+// For example, if deleting "/bob/0/name", then "/bob/0" must be set already.
+//
+// The returned value is potentially a new value if this pointer represents
+// the root document. Otherwise, the returned value will always be s.
+func (p *Pointer) Delete(s interface{}) (interface{}, error) {
+ // if we represent the root doc, we've deleted everything
+ if len(p.Parts) == 0 {
+ return nil, nil
+ }
+
+ // Save the original since this is going to be our return value
+ originalS := s
+
+ // Get the parent value
+ var err error
+ s, err = p.Parent().Get(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // Map for lookup of getter to call for type
+ funcMap := map[reflect.Kind]deleteFunc{
+ reflect.Array: p.deleteSlice,
+ reflect.Map: p.deleteMap,
+ reflect.Slice: p.deleteSlice,
+ }
+
+ val := reflect.ValueOf(s)
+ for val.Kind() == reflect.Interface {
+ val = val.Elem()
+ }
+
+ for val.Kind() == reflect.Ptr {
+ val = reflect.Indirect(val)
+ }
+
+ f, ok := funcMap[val.Kind()]
+ if !ok {
+ return nil, fmt.Errorf("delete %s: invalid value kind: %s", p, val.Kind())
+ }
+
+ result, err := f(originalS, val)
+ if err != nil {
+ return nil, fmt.Errorf("delete %s: %s", p, err)
+ }
+
+ return result, nil
+}
+
+type deleteFunc func(interface{}, reflect.Value) (interface{}, error)
+
+func (p *Pointer) deleteMap(root interface{}, m reflect.Value) (interface{}, error) {
+ part := p.Parts[len(p.Parts)-1]
+ key, err := coerce(reflect.ValueOf(part), m.Type().Key())
+ if err != nil {
+ return root, err
+ }
+
+ // Delete the key
+ var elem reflect.Value
+ m.SetMapIndex(key, elem)
+ return root, nil
+}
+
+func (p *Pointer) deleteSlice(root interface{}, s reflect.Value) (interface{}, error) {
+ // Coerce the key to an int
+ part := p.Parts[len(p.Parts)-1]
+ idxVal, err := coerce(reflect.ValueOf(part), reflect.TypeOf(42))
+ if err != nil {
+ return root, err
+ }
+ idx := int(idxVal.Int())
+
+ // Verify we're within bounds
+ if idx < 0 || idx >= s.Len() {
+ return root, fmt.Errorf(
+ "index %d is out of range (length = %d)", idx, s.Len())
+ }
+
+ // Mimicing the following with reflection to do this:
+ //
+ // copy(a[i:], a[i+1:])
+ // a[len(a)-1] = nil // or the zero value of T
+ // a = a[:len(a)-1]
+
+ // copy(a[i:], a[i+1:])
+ reflect.Copy(s.Slice(idx, s.Len()), s.Slice(idx+1, s.Len()))
+
+ // a[len(a)-1] = nil // or the zero value of T
+ s.Index(s.Len() - 1).Set(reflect.Zero(s.Type().Elem()))
+
+ // a = a[:len(a)-1]
+ s = s.Slice(0, s.Len()-1)
+
+ // set the slice back on the parent
+ return p.Parent().Set(root, s.Interface())
+}
diff --git a/vendor/github.com/mitchellh/pointerstructure/get.go b/vendor/github.com/mitchellh/pointerstructure/get.go
new file mode 100644
index 000000000000..15137c1570ef
--- /dev/null
+++ b/vendor/github.com/mitchellh/pointerstructure/get.go
@@ -0,0 +1,91 @@
+package pointerstructure
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Get reads the value out of the total value v.
+func (p *Pointer) Get(v interface{}) (interface{}, error) {
+ // fast-path the empty address case to avoid reflect.ValueOf below
+ if len(p.Parts) == 0 {
+ return v, nil
+ }
+
+ // Map for lookup of getter to call for type
+ funcMap := map[reflect.Kind]func(string, reflect.Value) (reflect.Value, error){
+ reflect.Array: p.getSlice,
+ reflect.Map: p.getMap,
+ reflect.Slice: p.getSlice,
+ }
+
+ currentVal := reflect.ValueOf(v)
+ for i, part := range p.Parts {
+ for currentVal.Kind() == reflect.Interface {
+ currentVal = currentVal.Elem()
+ }
+
+ for currentVal.Kind() == reflect.Ptr {
+ currentVal = reflect.Indirect(currentVal)
+ }
+
+ f, ok := funcMap[currentVal.Kind()]
+ if !ok {
+ return nil, fmt.Errorf(
+ "%s: at part %d, invalid value kind: %s", p, i, currentVal.Kind())
+ }
+
+ var err error
+ currentVal, err = f(part, currentVal)
+ if err != nil {
+ return nil, fmt.Errorf("%s at part %d: %s", p, i, err)
+ }
+ }
+
+ return currentVal.Interface(), nil
+}
+
+func (p *Pointer) getMap(part string, m reflect.Value) (reflect.Value, error) {
+ var zeroValue reflect.Value
+
+ // Coerce the string part to the correct key type
+ key, err := coerce(reflect.ValueOf(part), m.Type().Key())
+ if err != nil {
+ return zeroValue, err
+ }
+
+ // Verify that the key exists
+ found := false
+ for _, k := range m.MapKeys() {
+ if k.Interface() == key.Interface() {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return zeroValue, fmt.Errorf("couldn't find key %#v", key.Interface())
+ }
+
+ // Get the key
+ return m.MapIndex(key), nil
+}
+
+func (p *Pointer) getSlice(part string, v reflect.Value) (reflect.Value, error) {
+ var zeroValue reflect.Value
+
+ // Coerce the key to an int
+ idxVal, err := coerce(reflect.ValueOf(part), reflect.TypeOf(42))
+ if err != nil {
+ return zeroValue, err
+ }
+ idx := int(idxVal.Int())
+
+ // Verify we're within bounds
+ if idx < 0 || idx >= v.Len() {
+ return zeroValue, fmt.Errorf(
+ "index %d is out of range (length = %d)", idx, v.Len())
+ }
+
+ // Get the key
+ return v.Index(idx), nil
+}
diff --git a/vendor/github.com/mitchellh/pointerstructure/parse.go b/vendor/github.com/mitchellh/pointerstructure/parse.go
new file mode 100644
index 000000000000..c34e8d465fd7
--- /dev/null
+++ b/vendor/github.com/mitchellh/pointerstructure/parse.go
@@ -0,0 +1,57 @@
+package pointerstructure
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Parse parses a pointer from the input string. The input string
+// is expected to follow the format specified by RFC 6901: '/'-separated
+// parts. Each part can contain escape codes to contain '/' or '~'.
+func Parse(input string) (*Pointer, error) {
+ // Special case the empty case
+ if input == "" {
+ return &Pointer{}, nil
+ }
+
+ // We expect the first character to be "/"
+ if input[0] != '/' {
+ return nil, fmt.Errorf(
+ "parse Go pointer %q: first char must be '/'", input)
+ }
+
+ // Trim out the first slash so we don't have to +1 every index
+ input = input[1:]
+
+ // Parse out all the parts
+ var parts []string
+ lastSlash := -1
+ for i, r := range input {
+ if r == '/' {
+ parts = append(parts, input[lastSlash+1:i])
+ lastSlash = i
+ }
+ }
+
+ // Add last part
+ parts = append(parts, input[lastSlash+1:])
+
+ // Process each part for string replacement
+ for i, p := range parts {
+ // Replace ~1 followed by ~0 as specified by the RFC
+ parts[i] = strings.Replace(
+ strings.Replace(p, "~1", "/", -1), "~0", "~", -1)
+ }
+
+ return &Pointer{Parts: parts}, nil
+}
+
+// MustParse is like Parse but panics if the input cannot be parsed.
+func MustParse(input string) *Pointer {
+ p, err := Parse(input)
+ if err != nil {
+ panic(err)
+ }
+
+ return p
+}
diff --git a/vendor/github.com/mitchellh/pointerstructure/pointer.go b/vendor/github.com/mitchellh/pointerstructure/pointer.go
new file mode 100644
index 000000000000..3a8f88b918fb
--- /dev/null
+++ b/vendor/github.com/mitchellh/pointerstructure/pointer.go
@@ -0,0 +1,123 @@
+// Package pointerstructure provides functions for identifying a specific
+// value within any Go structure using a string syntax.
+//
+// The syntax used is based on JSON Pointer (RFC 6901).
+package pointerstructure
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+// Pointer represents a pointer to a specific value. You can construct
+// a pointer manually or use Parse.
+type Pointer struct {
+ // Parts are the pointer parts. No escape codes are processed here.
+ // The values are expected to be exact. If you have escape codes, use
+ // the Parse functions.
+ Parts []string
+}
+
+// Get reads the value at the given pointer.
+//
+// This is a shorthand for calling Parse on the pointer and then calling Get
+// on that result. An error will be returned if the value cannot be found or
+// there is an error with the format of pointer.
+func Get(value interface{}, pointer string) (interface{}, error) {
+ p, err := Parse(pointer)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.Get(value)
+}
+
+// Set sets the value at the given pointer.
+//
+// This is a shorthand for calling Parse on the pointer and then calling Set
+// on that result. An error will be returned if the value cannot be found or
+// there is an error with the format of pointer.
+//
+// Set returns the complete document, which might change if the pointer value
+// points to the root ("").
+func Set(doc interface{}, pointer string, value interface{}) (interface{}, error) {
+ p, err := Parse(pointer)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.Set(doc, value)
+}
+
+// String returns the string value that can be sent back to Parse to get
+// the same Pointer result.
+func (p *Pointer) String() string {
+ if len(p.Parts) == 0 {
+ return ""
+ }
+
+ // Copy the parts so we can convert back the escapes
+ result := make([]string, len(p.Parts))
+ copy(result, p.Parts)
+ for i, p := range p.Parts {
+ result[i] = strings.Replace(
+ strings.Replace(p, "~", "~0", -1), "/", "~1", -1)
+
+ }
+
+ return "/" + strings.Join(result, "/")
+}
+
+// Parent returns a pointer to the parent element of this pointer.
+//
+// If Pointer represents the root (empty parts), a pointer representing
+// the root is returned. Therefore, to check for the root, IsRoot() should be
+// called.
+func (p *Pointer) Parent() *Pointer {
+ // If this is root, then we just return a new root pointer. We allocate
+ // a new one though so this can still be modified.
+ if p.IsRoot() {
+ return &Pointer{}
+ }
+
+ parts := make([]string, len(p.Parts)-1)
+ copy(parts, p.Parts[:len(p.Parts)-1])
+ return &Pointer{
+ Parts: parts,
+ }
+}
+
+// IsRoot returns true if this pointer represents the root document.
+func (p *Pointer) IsRoot() bool {
+ return len(p.Parts) == 0
+}
+
+// coerce is a helper to coerce a value to a specific type if it must
+// and if its possible. If it isn't possible, an error is returned.
+func coerce(value reflect.Value, to reflect.Type) (reflect.Value, error) {
+ // If the value is already assignable to the type, then let it go
+ if value.Type().AssignableTo(to) {
+ return value, nil
+ }
+
+ // If a direct conversion is possible, do that
+ if value.Type().ConvertibleTo(to) {
+ return value.Convert(to), nil
+ }
+
+ // Create a new value to hold our result
+ result := reflect.New(to)
+
+ // Decode
+ if err := mapstructure.WeakDecode(value.Interface(), result.Interface()); err != nil {
+ return result, fmt.Errorf(
+ "couldn't convert value %#v to type %s",
+ value.Interface(), to.String())
+ }
+
+ // We need to indirect the value since reflect.New always creates a pointer
+ return reflect.Indirect(result), nil
+}
diff --git a/vendor/github.com/mitchellh/pointerstructure/set.go b/vendor/github.com/mitchellh/pointerstructure/set.go
new file mode 100644
index 000000000000..a396ac62f295
--- /dev/null
+++ b/vendor/github.com/mitchellh/pointerstructure/set.go
@@ -0,0 +1,122 @@
+package pointerstructure
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Set writes a value v to the pointer p in structure s.
+//
+// The structures s must have non-zero values set up to this pointer.
+// For example, if setting "/bob/0/name", then "/bob/0" must be set already.
+//
+// The returned value is potentially a new value if this pointer represents
+// the root document. Otherwise, the returned value will always be s.
+func (p *Pointer) Set(s, v interface{}) (interface{}, error) {
+ // if we represent the root doc, return that
+ if len(p.Parts) == 0 {
+ return v, nil
+ }
+
+ // Save the original since this is going to be our return value
+ originalS := s
+
+ // Get the parent value
+ var err error
+ s, err = p.Parent().Get(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // Map for lookup of getter to call for type
+ funcMap := map[reflect.Kind]setFunc{
+ reflect.Array: p.setSlice,
+ reflect.Map: p.setMap,
+ reflect.Slice: p.setSlice,
+ }
+
+ val := reflect.ValueOf(s)
+ for val.Kind() == reflect.Interface {
+ val = val.Elem()
+ }
+
+ for val.Kind() == reflect.Ptr {
+ val = reflect.Indirect(val)
+ }
+
+ f, ok := funcMap[val.Kind()]
+ if !ok {
+ return nil, fmt.Errorf("set %s: invalid value kind: %s", p, val.Kind())
+ }
+
+ result, err := f(originalS, val, reflect.ValueOf(v))
+ if err != nil {
+ return nil, fmt.Errorf("set %s: %s", p, err)
+ }
+
+ return result, nil
+}
+
+type setFunc func(interface{}, reflect.Value, reflect.Value) (interface{}, error)
+
+func (p *Pointer) setMap(root interface{}, m, value reflect.Value) (interface{}, error) {
+ part := p.Parts[len(p.Parts)-1]
+ key, err := coerce(reflect.ValueOf(part), m.Type().Key())
+ if err != nil {
+ return root, err
+ }
+
+ elem, err := coerce(value, m.Type().Elem())
+ if err != nil {
+ return root, err
+ }
+
+ // Set the key
+ m.SetMapIndex(key, elem)
+ return root, nil
+}
+
+func (p *Pointer) setSlice(root interface{}, s, value reflect.Value) (interface{}, error) {
+ // Coerce the value, we'll need that no matter what
+ value, err := coerce(value, s.Type().Elem())
+ if err != nil {
+ return root, err
+ }
+
+ // If the part is the special "-", that means to append it (RFC6901 4.)
+ part := p.Parts[len(p.Parts)-1]
+ if part == "-" {
+ return p.setSliceAppend(root, s, value)
+ }
+
+ // Coerce the key to an int
+ idxVal, err := coerce(reflect.ValueOf(part), reflect.TypeOf(42))
+ if err != nil {
+ return root, err
+ }
+ idx := int(idxVal.Int())
+
+ // Verify we're within bounds
+ if idx < 0 || idx >= s.Len() {
+ return root, fmt.Errorf(
+ "index %d is out of range (length = %d)", idx, s.Len())
+ }
+
+ // Set the key
+ s.Index(idx).Set(value)
+ return root, nil
+}
+
+func (p *Pointer) setSliceAppend(root interface{}, s, value reflect.Value) (interface{}, error) {
+ // Coerce the value, we'll need that no matter what. This should
+ // be a no-op since we expect it to be done already, but there is
+ // a fast-path check for that in coerce so do it anyways.
+ value, err := coerce(value, s.Type().Elem())
+ if err != nil {
+ return root, err
+ }
+
+ // We can assume "s" is the parent of pointer value. We need to actually
+ // write s back because Append can return a new slice.
+ return p.Parent().Set(root, reflect.Append(s, value).Interface())
+}
diff --git a/vendor/github.com/mitchellh/pointerstructure/sort.go b/vendor/github.com/mitchellh/pointerstructure/sort.go
new file mode 100644
index 000000000000..886d1183c69a
--- /dev/null
+++ b/vendor/github.com/mitchellh/pointerstructure/sort.go
@@ -0,0 +1,42 @@
+package pointerstructure
+
+import (
+ "sort"
+)
+
+// Sort does an in-place sort of the pointers so that they are in order
+// of least specific to most specific alphabetized. For example:
+// "/foo", "/foo/0", "/qux"
+//
+// This ordering is ideal for applying the changes in a way that ensures
+// that parents are set first.
+func Sort(p []*Pointer) { sort.Sort(PointerSlice(p)) }
+
+// PointerSlice is a slice of pointers that adheres to sort.Interface
+type PointerSlice []*Pointer
+
+func (p PointerSlice) Len() int { return len(p) }
+func (p PointerSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p PointerSlice) Less(i, j int) bool {
+ // Equal number of parts, do a string compare per part
+ for idx, ival := range p[i].Parts {
+ // If we're passed the length of p[j] parts, then we're done
+ if idx >= len(p[j].Parts) {
+ break
+ }
+
+ // Compare the values if they're not equal
+ jval := p[j].Parts[idx]
+ if ival != jval {
+ return ival < jval
+ }
+ }
+
+ // Equal prefix, take the shorter
+ if len(p[i].Parts) != len(p[j].Parts) {
+ return len(p[i].Parts) < len(p[j].Parts)
+ }
+
+ // Equal, it doesn't matter
+ return false
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index f6ddcf764d2d..316b1bd97069 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -1409,10 +1409,12 @@
"revisionTime": "2018-12-10T20:01:33Z"
},
{
- "checksumSHA1": "tt3FtyjXgdBI9Mb43UL4LtOZmAk=",
+ "checksumSHA1": "86jzaGc3dRpZ5BKQPFP7ecasQfg=",
"path": "github.com/hashicorp/vault-plugin-auth-jwt",
- "revision": "f428c77917331c1b87dae2dd37016bd1dd4c55da",
- "revisionTime": "2018-10-31T19:59:42Z"
+ "revision": "bf17a88bb5c43eb2cbdc08011cd76ecec028521c",
+ "revisionTime": "2019-02-07T06:35:46Z",
+ "version": "=oidc-cli",
+ "versionExact": "oidc-cli"
},
{
"checksumSHA1": "Ldg2jQeyPrpAupyQq4lRVN+jfFY=",
@@ -1788,6 +1790,12 @@
"revision": "3536a929edddb9a5b34bd6861dc4a9647cb459fe",
"revisionTime": "2018-10-05T04:51:35Z"
},
+ {
+ "checksumSHA1": "31atAEqGt+z8hZgyVZZokEeM6dM=",
+ "path": "github.com/mitchellh/pointerstructure",
+ "revision": "f2329fcfa9e280bdb5a3f2544aec815a508ad72f",
+ "revisionTime": "2017-02-05T20:42:03Z"
+ },
{
"checksumSHA1": "nxuST3bjBv5uDVPzrX9wdruOwv0=",
"path": "github.com/mitchellh/reflectwalk",
From 79a07dd2c6ae2bd57d97c7d7607e6c8875105598 Mon Sep 17 00:00:00 2001
From: Brian Kassouf
Date: Tue, 12 Feb 2019 09:31:03 -0800
Subject: [PATCH 02/31] Remove netRPC based plugins (#6173)
* Remove netRPC backend plugins
* Remove netRPC database plugins
* Fix tests and comments
---
builtin/logical/database/dbplugin/client.go | 12 +-
.../database/dbplugin/netrpc_transport.go | 197 ----------
builtin/logical/database/dbplugin/plugin.go | 22 --
.../logical/database/dbplugin/plugin_test.go | 167 ---------
builtin/logical/database/dbplugin/server.go | 18 +-
logical/plugin/backend.go | 28 --
logical/plugin/backend_client.go | 248 -------------
logical/plugin/backend_server.go | 147 --------
logical/plugin/backend_test.go | 173 ---------
logical/plugin/grpc_backend_client.go | 1 +
logical/plugin/grpc_backend_server.go | 3 +
logical/plugin/grpc_backend_test.go | 20 +-
logical/plugin/grpc_storage.go | 20 +
logical/plugin/plugin.go | 40 +-
logical/plugin/serve.go | 19 +-
logical/plugin/storage.go | 139 -------
logical/plugin/storage_test.go | 17 +-
logical/plugin/system.go | 351 ------------------
logical/plugin/system_test.go | 231 ------------
19 files changed, 58 insertions(+), 1795 deletions(-)
delete mode 100644 builtin/logical/database/dbplugin/netrpc_transport.go
delete mode 100644 logical/plugin/backend_client.go
delete mode 100644 logical/plugin/backend_server.go
delete mode 100644 logical/plugin/backend_test.go
delete mode 100644 logical/plugin/storage.go
delete mode 100644 logical/plugin/system.go
delete mode 100644 logical/plugin/system_test.go
diff --git a/builtin/logical/database/dbplugin/client.go b/builtin/logical/database/dbplugin/client.go
index cbd3560fb2ee..4146e2c05c57 100644
--- a/builtin/logical/database/dbplugin/client.go
+++ b/builtin/logical/database/dbplugin/client.go
@@ -35,11 +35,12 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne
// pluginSets is the map of plugins we can dispense.
pluginSets := map[int]plugin.PluginSet{
- // Version 3 supports both protocols
+ // Version 3 used to supports both protocols. We want to keep it around
+ // since it's possible old plugins built against this version will still
+ // work with gRPC. There is currently no difference between version 3
+ // and version 4.
3: plugin.PluginSet{
- "database": &DatabasePlugin{
- GRPCDatabasePlugin: new(GRPCDatabasePlugin),
- },
+ "database": new(GRPCDatabasePlugin),
},
// Version 4 only supports gRPC
4: plugin.PluginSet{
@@ -76,9 +77,6 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne
switch raw.(type) {
case *gRPCClient:
db = raw.(*gRPCClient)
- case *databasePluginRPCClient:
- logger.Warn("plugin is using deprecated netRPC transport, recompile plugin to upgrade to gRPC", "plugin", pluginRunner.Name)
- db = raw.(*databasePluginRPCClient)
default:
return nil, errors.New("unsupported client type")
}
diff --git a/builtin/logical/database/dbplugin/netrpc_transport.go b/builtin/logical/database/dbplugin/netrpc_transport.go
deleted file mode 100644
index 25cbc979673c..000000000000
--- a/builtin/logical/database/dbplugin/netrpc_transport.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package dbplugin
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/rpc"
- "strings"
- "time"
-)
-
-// ---- RPC server domain ----
-
-// databasePluginRPCServer implements an RPC version of Database and is run
-// inside a plugin. It wraps an underlying implementation of Database.
-type databasePluginRPCServer struct {
- impl Database
-}
-
-func (ds *databasePluginRPCServer) Type(_ struct{}, resp *string) error {
- var err error
- *resp, err = ds.impl.Type()
- return err
-}
-
-func (ds *databasePluginRPCServer) CreateUser(args *CreateUserRequestRPC, resp *CreateUserResponse) error {
- var err error
- resp.Username, resp.Password, err = ds.impl.CreateUser(context.Background(), args.Statements, args.UsernameConfig, args.Expiration)
- return err
-}
-
-func (ds *databasePluginRPCServer) RenewUser(args *RenewUserRequestRPC, _ *struct{}) error {
- err := ds.impl.RenewUser(context.Background(), args.Statements, args.Username, args.Expiration)
- return err
-}
-
-func (ds *databasePluginRPCServer) RevokeUser(args *RevokeUserRequestRPC, _ *struct{}) error {
- err := ds.impl.RevokeUser(context.Background(), args.Statements, args.Username)
- return err
-}
-
-func (ds *databasePluginRPCServer) RotateRootCredentials(args *RotateRootCredentialsRequestRPC, resp *RotateRootCredentialsResponse) error {
- config, err := ds.impl.RotateRootCredentials(context.Background(), args.Statements)
- if err != nil {
- return err
- }
- resp.Config, err = json.Marshal(config)
- return err
-}
-
-func (ds *databasePluginRPCServer) Initialize(args *InitializeRequestRPC, _ *struct{}) error {
- return ds.Init(&InitRequestRPC{
- Config: args.Config,
- VerifyConnection: args.VerifyConnection,
- }, &InitResponse{})
-}
-
-func (ds *databasePluginRPCServer) Init(args *InitRequestRPC, resp *InitResponse) error {
- config, err := ds.impl.Init(context.Background(), args.Config, args.VerifyConnection)
- if err != nil {
- return err
- }
- resp.Config, err = json.Marshal(config)
- return err
-}
-
-func (ds *databasePluginRPCServer) Close(_ struct{}, _ *struct{}) error {
- ds.impl.Close()
- return nil
-}
-
-// ---- RPC client domain ----
-// databasePluginRPCClient implements Database and is used on the client to
-// make RPC calls to a plugin.
-type databasePluginRPCClient struct {
- client *rpc.Client
-}
-
-func (dr *databasePluginRPCClient) Type() (string, error) {
- var dbType string
- err := dr.client.Call("Plugin.Type", struct{}{}, &dbType)
-
- return fmt.Sprintf("plugin-%s", dbType), err
-}
-
-func (dr *databasePluginRPCClient) CreateUser(_ context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) {
- req := CreateUserRequestRPC{
- Statements: statements,
- UsernameConfig: usernameConfig,
- Expiration: expiration,
- }
-
- var resp CreateUserResponse
- err = dr.client.Call("Plugin.CreateUser", req, &resp)
-
- return resp.Username, resp.Password, err
-}
-
-func (dr *databasePluginRPCClient) RenewUser(_ context.Context, statements Statements, username string, expiration time.Time) error {
- req := RenewUserRequestRPC{
- Statements: statements,
- Username: username,
- Expiration: expiration,
- }
-
- return dr.client.Call("Plugin.RenewUser", req, &struct{}{})
-}
-
-func (dr *databasePluginRPCClient) RevokeUser(_ context.Context, statements Statements, username string) error {
- req := RevokeUserRequestRPC{
- Statements: statements,
- Username: username,
- }
-
- return dr.client.Call("Plugin.RevokeUser", req, &struct{}{})
-}
-
-func (dr *databasePluginRPCClient) RotateRootCredentials(_ context.Context, statements []string) (saveConf map[string]interface{}, err error) {
- req := RotateRootCredentialsRequestRPC{
- Statements: statements,
- }
-
- var resp RotateRootCredentialsResponse
- err = dr.client.Call("Plugin.RotateRootCredentials", req, &resp)
-
- err = json.Unmarshal(resp.Config, &saveConf)
- return saveConf, err
-}
-
-func (dr *databasePluginRPCClient) Initialize(_ context.Context, conf map[string]interface{}, verifyConnection bool) error {
- _, err := dr.Init(nil, conf, verifyConnection)
- return err
-}
-
-func (dr *databasePluginRPCClient) Init(_ context.Context, conf map[string]interface{}, verifyConnection bool) (saveConf map[string]interface{}, err error) {
- req := InitRequestRPC{
- Config: conf,
- VerifyConnection: verifyConnection,
- }
-
- var resp InitResponse
- err = dr.client.Call("Plugin.Init", req, &resp)
- if err != nil {
- if strings.Contains(err.Error(), "can't find method Plugin.Init") {
- req := InitializeRequestRPC{
- Config: conf,
- VerifyConnection: verifyConnection,
- }
-
- err = dr.client.Call("Plugin.Initialize", req, &struct{}{})
- if err == nil {
- return conf, nil
- }
- }
- return nil, err
- }
-
- err = json.Unmarshal(resp.Config, &saveConf)
- return saveConf, err
-}
-
-func (dr *databasePluginRPCClient) Close() error {
- return dr.client.Call("Plugin.Close", struct{}{}, &struct{}{})
-}
-
-// ---- RPC Request Args Domain ----
-
-type InitializeRequestRPC struct {
- Config map[string]interface{}
- VerifyConnection bool
-}
-
-type InitRequestRPC struct {
- Config map[string]interface{}
- VerifyConnection bool
-}
-
-type CreateUserRequestRPC struct {
- Statements Statements
- UsernameConfig UsernameConfig
- Expiration time.Time
-}
-
-type RenewUserRequestRPC struct {
- Statements Statements
- Username string
- Expiration time.Time
-}
-
-type RevokeUserRequestRPC struct {
- Statements Statements
- Username string
-}
-
-type RotateRootCredentialsRequestRPC struct {
- Statements []string
-}
diff --git a/builtin/logical/database/dbplugin/plugin.go b/builtin/logical/database/dbplugin/plugin.go
index 9eabcf858363..47262bf2b272 100644
--- a/builtin/logical/database/dbplugin/plugin.go
+++ b/builtin/logical/database/dbplugin/plugin.go
@@ -3,7 +3,6 @@ package dbplugin
import (
"context"
"fmt"
- "net/rpc"
"time"
"google.golang.org/grpc"
@@ -72,8 +71,6 @@ func PluginFactory(ctx context.Context, pluginName string, sys pluginutil.LookRu
switch db.(*DatabasePluginClient).Database.(type) {
case *gRPCClient:
transport = "gRPC"
- case *databasePluginRPCClient:
- transport = "netRPC"
}
}
@@ -110,17 +107,9 @@ var handshakeConfig = plugin.HandshakeConfig{
MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb",
}
-var _ plugin.Plugin = &DatabasePlugin{}
-var _ plugin.GRPCPlugin = &DatabasePlugin{}
var _ plugin.Plugin = &GRPCDatabasePlugin{}
var _ plugin.GRPCPlugin = &GRPCDatabasePlugin{}
-// DatabasePlugin implements go-plugin's Plugin interface. It has methods for
-// retrieving a server and a client instance of the plugin.
-type DatabasePlugin struct {
- *GRPCDatabasePlugin
-}
-
// GRPCDatabasePlugin is the plugin.Plugin implementation that only supports GRPC
// transport
type GRPCDatabasePlugin struct {
@@ -130,17 +119,6 @@ type GRPCDatabasePlugin struct {
plugin.NetRPCUnsupportedPlugin
}
-func (d DatabasePlugin) Server(*plugin.MuxBroker) (interface{}, error) {
- impl := &DatabaseErrorSanitizerMiddleware{
- next: d.Impl,
- }
- return &databasePluginRPCServer{impl: impl}, nil
-}
-
-func (DatabasePlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
- return &databasePluginRPCClient{client: c}, nil
-}
-
func (d GRPCDatabasePlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error {
impl := &DatabaseErrorSanitizerMiddleware{
next: d.Impl,
diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go
index c61b27321ae5..10f162f5b99a 100644
--- a/builtin/logical/database/dbplugin/plugin_test.go
+++ b/builtin/logical/database/dbplugin/plugin_test.go
@@ -8,7 +8,6 @@ import (
"time"
log "github.com/hashicorp/go-hclog"
- plugin "github.com/hashicorp/go-plugin"
"github.com/hashicorp/vault/builtin/logical/database/dbplugin"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/namespace"
@@ -96,7 +95,6 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
sys := vault.TestDynamicSystemView(cores[0].Core)
vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", consts.PluginTypeDatabase, "TestPlugin_GRPC_Main", []string{}, "")
- vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin-netRPC", consts.PluginTypeDatabase, "TestPlugin_NetRPC_Main", []string{}, "")
return cluster, sys
}
@@ -121,31 +119,6 @@ func TestPlugin_GRPC_Main(t *testing.T) {
plugins.Serve(plugin, apiClientMeta.GetTLSConfig())
}
-// This is not an actual test case, it's a helper function that will be executed
-// by the go-plugin client via an exec call.
-func TestPlugin_NetRPC_Main(t *testing.T) {
- if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
- return
- }
-
- os.Unsetenv(pluginutil.PluginVaultVersionEnv)
- p := &mockPlugin{
- users: make(map[string][]string),
- }
-
- args := []string{"--tls-skip-verify=true"}
-
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(args)
-
- tlsProvider := pluginutil.VaultPluginTLSProvider(apiClientMeta.GetTLSConfig())
- serveConf := dbplugin.ServeConfig(p, tlsProvider)
- serveConf.GRPCServer = nil
-
- plugin.Serve(serveConf)
-}
-
func TestPlugin_Init(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
@@ -284,143 +257,3 @@ func TestPlugin_RevokeUser(t *testing.T) {
t.Fatalf("err: %s", err)
}
}
-
-// Test the code is still compatible with an old netRPC plugin
-func TestPlugin_NetRPC_Init(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- dbRaw, err := dbplugin.PluginFactory(namespace.RootContext(nil), "test-plugin-netRPC", sys, log.NewNullLogger())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- connectionDetails := map[string]interface{}{
- "test": 1,
- }
-
- _, err = dbRaw.Init(context.Background(), connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = dbRaw.Close()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestPlugin_NetRPC_CreateUser(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- db, err := dbplugin.PluginFactory(namespace.RootContext(nil), "test-plugin-netRPC", sys, log.NewNullLogger())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- defer db.Close()
-
- connectionDetails := map[string]interface{}{
- "test": 1,
- }
-
- _, err = db.Init(context.Background(), connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConf := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- us, pw, err := db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if us != "test" || pw != "test" {
- t.Fatal("expected username and password to be 'test'")
- }
-
- // try and save the same user again to verify it saved the first time, this
- // should return an error
- _, _, err = db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err == nil {
- t.Fatal("expected an error, user wasn't created correctly")
- }
-}
-
-func TestPlugin_NetRPC_RenewUser(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- db, err := dbplugin.PluginFactory(namespace.RootContext(nil), "test-plugin-netRPC", sys, log.NewNullLogger())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- defer db.Close()
-
- connectionDetails := map[string]interface{}{
- "test": 1,
- }
- _, err = db.Init(context.Background(), connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConf := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- us, _, err := db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = db.RenewUser(context.Background(), dbplugin.Statements{}, us, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestPlugin_NetRPC_RevokeUser(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- db, err := dbplugin.PluginFactory(namespace.RootContext(nil), "test-plugin-netRPC", sys, log.NewNullLogger())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- defer db.Close()
-
- connectionDetails := map[string]interface{}{
- "test": 1,
- }
- _, err = db.Init(context.Background(), connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConf := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- us, _, err := db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Test default revoke statements
- err = db.RevokeUser(context.Background(), dbplugin.Statements{}, us)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Try adding the same username back so we can verify it was removed
- _, _, err = db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
diff --git a/builtin/logical/database/dbplugin/server.go b/builtin/logical/database/dbplugin/server.go
index 7a6040058466..401661f46c5b 100644
--- a/builtin/logical/database/dbplugin/server.go
+++ b/builtin/logical/database/dbplugin/server.go
@@ -4,7 +4,6 @@ import (
"crypto/tls"
plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
)
// Serve is called from within a plugin and wraps the provided
@@ -17,11 +16,13 @@ func Serve(db Database, tlsProvider func() (*tls.Config, error)) {
func ServeConfig(db Database, tlsProvider func() (*tls.Config, error)) *plugin.ServeConfig {
// pluginSets is the map of plugins we can dispense.
pluginSets := map[int]plugin.PluginSet{
+ // Version 3 used to supports both protocols. We want to keep it around
+ // since it's possible old plugins built against this version will still
+ // work with gRPC. There is currently no difference between version 3
+ // and version 4.
3: plugin.PluginSet{
- "database": &DatabasePlugin{
- GRPCDatabasePlugin: &GRPCDatabasePlugin{
- Impl: db,
- },
+ "database": &GRPCDatabasePlugin{
+ Impl: db,
},
},
4: plugin.PluginSet{
@@ -38,12 +39,5 @@ func ServeConfig(db Database, tlsProvider func() (*tls.Config, error)) *plugin.S
GRPCServer: plugin.DefaultGRPCServer,
}
- // If we do not have gRPC support fallback to version 3
- // Remove this block in 0.13
- if !pluginutil.GRPCSupport() {
- conf.GRPCServer = nil
- delete(conf.VersionedPlugins, 4)
- }
-
return conf
}
diff --git a/logical/plugin/backend.go b/logical/plugin/backend.go
index bf4f84c07598..8859a593717b 100644
--- a/logical/plugin/backend.go
+++ b/logical/plugin/backend.go
@@ -2,7 +2,6 @@ package plugin
import (
"context"
- "net/rpc"
"sync/atomic"
"google.golang.org/grpc"
@@ -13,16 +12,9 @@ import (
"github.com/hashicorp/vault/logical/plugin/pb"
)
-var _ plugin.Plugin = (*BackendPlugin)(nil)
-var _ plugin.GRPCPlugin = (*BackendPlugin)(nil)
var _ plugin.Plugin = (*GRPCBackendPlugin)(nil)
var _ plugin.GRPCPlugin = (*GRPCBackendPlugin)(nil)
-// BackendPlugin is the plugin.Plugin implementation
-type BackendPlugin struct {
- *GRPCBackendPlugin
-}
-
// GRPCBackendPlugin is the plugin.Plugin implementation that only supports GRPC
// transport
type GRPCBackendPlugin struct {
@@ -34,26 +26,6 @@ type GRPCBackendPlugin struct {
plugin.NetRPCUnsupportedPlugin
}
-// Server gets called when on plugin.Serve()
-func (b *BackendPlugin) Server(broker *plugin.MuxBroker) (interface{}, error) {
- return &backendPluginServer{
- factory: b.Factory,
- broker: broker,
- // We pass the logger down into the backend so go-plugin will forward
- // logs for us.
- logger: b.Logger,
- }, nil
-}
-
-// Client gets called on plugin.NewClient()
-func (b BackendPlugin) Client(broker *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
- return &backendPluginClient{
- client: c,
- broker: broker,
- metadataMode: b.MetadataMode,
- }, nil
-}
-
func (b GRPCBackendPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
pb.RegisterBackendServer(s, &backendGRPCPluginServer{
broker: broker,
diff --git a/logical/plugin/backend_client.go b/logical/plugin/backend_client.go
deleted file mode 100644
index 21de94446430..000000000000
--- a/logical/plugin/backend_client.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package plugin
-
-import (
- "context"
- "errors"
- "net/rpc"
-
- log "github.com/hashicorp/go-hclog"
- plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode")
-)
-
-// backendPluginClient implements logical.Backend and is the
-// go-plugin client.
-type backendPluginClient struct {
- broker *plugin.MuxBroker
- client *rpc.Client
- metadataMode bool
-
- system logical.SystemView
- logger log.Logger
-}
-
-// HandleRequestArgs is the args for HandleRequest method.
-type HandleRequestArgs struct {
- StorageID uint32
- Request *logical.Request
-}
-
-// HandleRequestReply is the reply for HandleRequest method.
-type HandleRequestReply struct {
- Response *logical.Response
- Error error
-}
-
-// SpecialPathsReply is the reply for SpecialPaths method.
-type SpecialPathsReply struct {
- Paths *logical.Paths
-}
-
-// SystemReply is the reply for System method.
-type SystemReply struct {
- SystemView logical.SystemView
- Error error
-}
-
-// HandleExistenceCheckArgs is the args for HandleExistenceCheck method.
-type HandleExistenceCheckArgs struct {
- StorageID uint32
- Request *logical.Request
-}
-
-// HandleExistenceCheckReply is the reply for HandleExistenceCheck method.
-type HandleExistenceCheckReply struct {
- CheckFound bool
- Exists bool
- Error error
-}
-
-// SetupArgs is the args for Setup method.
-type SetupArgs struct {
- StorageID uint32
- LoggerID uint32
- SysViewID uint32
- Config map[string]string
- BackendUUID string
-}
-
-// SetupReply is the reply for Setup method.
-type SetupReply struct {
- Error error
-}
-
-// TypeReply is the reply for the Type method.
-type TypeReply struct {
- Type logical.BackendType
-}
-
-func (b *backendPluginClient) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) {
- if b.metadataMode {
- return nil, ErrClientInMetadataMode
- }
-
- // Do not send the storage, since go-plugin cannot serialize
- // interfaces. The server will pick up the storage from the shim.
- req.Storage = nil
- args := &HandleRequestArgs{
- Request: req,
- }
- var reply HandleRequestReply
-
- if req.Connection != nil {
- oldConnState := req.Connection.ConnState
- req.Connection.ConnState = nil
- defer func() {
- req.Connection.ConnState = oldConnState
- }()
- }
-
- err := b.client.Call("Plugin.HandleRequest", args, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- if reply.Error.Error() == logical.ErrUnsupportedOperation.Error() {
- return nil, logical.ErrUnsupportedOperation
- }
-
- return reply.Response, reply.Error
- }
-
- return reply.Response, nil
-}
-
-func (b *backendPluginClient) SpecialPaths() *logical.Paths {
- var reply SpecialPathsReply
- err := b.client.Call("Plugin.SpecialPaths", new(interface{}), &reply)
- if err != nil {
- return nil
- }
-
- return reply.Paths
-}
-
-// System returns vault's system view. The backend client stores the view during
-// Setup, so there is no need to shim the system just to get it back.
-func (b *backendPluginClient) System() logical.SystemView {
- return b.system
-}
-
-// Logger returns vault's logger. The backend client stores the logger during
-// Setup, so there is no need to shim the logger just to get it back.
-func (b *backendPluginClient) Logger() log.Logger {
- return b.logger
-}
-
-func (b *backendPluginClient) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) {
- if b.metadataMode {
- return false, false, ErrClientInMetadataMode
- }
-
- // Do not send the storage, since go-plugin cannot serialize
- // interfaces. The server will pick up the storage from the shim.
- req.Storage = nil
- args := &HandleExistenceCheckArgs{
- Request: req,
- }
- var reply HandleExistenceCheckReply
-
- if req.Connection != nil {
- oldConnState := req.Connection.ConnState
- req.Connection.ConnState = nil
- defer func() {
- req.Connection.ConnState = oldConnState
- }()
- }
-
- err := b.client.Call("Plugin.HandleExistenceCheck", args, &reply)
- if err != nil {
- return false, false, err
- }
- if reply.Error != nil {
- // THINKING: Should be be a switch on all error types?
- if reply.Error.Error() == logical.ErrUnsupportedPath.Error() {
- return false, false, logical.ErrUnsupportedPath
- }
- return false, false, reply.Error
- }
-
- return reply.CheckFound, reply.Exists, nil
-}
-
-func (b *backendPluginClient) Cleanup(ctx context.Context) {
- b.client.Call("Plugin.Cleanup", new(interface{}), &struct{}{})
-}
-
-func (b *backendPluginClient) Initialize(ctx context.Context) error {
- if b.metadataMode {
- return ErrClientInMetadataMode
- }
- err := b.client.Call("Plugin.Initialize", new(interface{}), &struct{}{})
- return err
-}
-
-func (b *backendPluginClient) InvalidateKey(ctx context.Context, key string) {
- if b.metadataMode {
- return
- }
- b.client.Call("Plugin.InvalidateKey", key, &struct{}{})
-}
-
-func (b *backendPluginClient) Setup(ctx context.Context, config *logical.BackendConfig) error {
- // Shim logical.Storage
- storageImpl := config.StorageView
- if b.metadataMode {
- storageImpl = &NOOPStorage{}
- }
- storageID := b.broker.NextId()
- go b.broker.AcceptAndServe(storageID, &StorageServer{
- impl: storageImpl,
- })
-
- // Shim logical.SystemView
- sysViewImpl := config.System
- if b.metadataMode {
- sysViewImpl = &logical.StaticSystemView{}
- }
- sysViewID := b.broker.NextId()
- go b.broker.AcceptAndServe(sysViewID, &SystemViewServer{
- impl: sysViewImpl,
- })
-
- args := &SetupArgs{
- StorageID: storageID,
- SysViewID: sysViewID,
- Config: config.Config,
- BackendUUID: config.BackendUUID,
- }
- var reply SetupReply
-
- err := b.client.Call("Plugin.Setup", args, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
-
- // Set system and logger for getter methods
- b.system = config.System
- b.logger = config.Logger
-
- return nil
-}
-
-func (b *backendPluginClient) Type() logical.BackendType {
- var reply TypeReply
- err := b.client.Call("Plugin.Type", new(interface{}), &reply)
- if err != nil {
- return logical.TypeUnknown
- }
-
- return logical.BackendType(reply.Type)
-}
diff --git a/logical/plugin/backend_server.go b/logical/plugin/backend_server.go
deleted file mode 100644
index 291a9e4ab2fa..000000000000
--- a/logical/plugin/backend_server.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package plugin
-
-import (
- "context"
- "errors"
- "net/rpc"
-
- hclog "github.com/hashicorp/go-hclog"
- plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- ErrServerInMetadataMode = errors.New("plugin server can not perform action while in metadata mode")
-)
-
-// backendPluginServer is the RPC server that backendPluginClient talks to,
-// it methods conforming to requirements by net/rpc
-type backendPluginServer struct {
- broker *plugin.MuxBroker
- backend logical.Backend
- factory logical.Factory
-
- logger hclog.Logger
- sysViewClient *rpc.Client
- storageClient *rpc.Client
-}
-
-func (b *backendPluginServer) HandleRequest(args *HandleRequestArgs, reply *HandleRequestReply) error {
- if pluginutil.InMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- storage := &StorageClient{client: b.storageClient}
- args.Request.Storage = storage
-
- resp, err := b.backend.HandleRequest(context.Background(), args.Request)
- *reply = HandleRequestReply{
- Response: resp,
- Error: wrapError(err),
- }
-
- return nil
-}
-
-func (b *backendPluginServer) SpecialPaths(_ interface{}, reply *SpecialPathsReply) error {
- *reply = SpecialPathsReply{
- Paths: b.backend.SpecialPaths(),
- }
- return nil
-}
-
-func (b *backendPluginServer) HandleExistenceCheck(args *HandleExistenceCheckArgs, reply *HandleExistenceCheckReply) error {
- if pluginutil.InMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- storage := &StorageClient{client: b.storageClient}
- args.Request.Storage = storage
-
- checkFound, exists, err := b.backend.HandleExistenceCheck(context.TODO(), args.Request)
- *reply = HandleExistenceCheckReply{
- CheckFound: checkFound,
- Exists: exists,
- Error: wrapError(err),
- }
-
- return nil
-}
-
-func (b *backendPluginServer) Cleanup(_ interface{}, _ *struct{}) error {
- b.backend.Cleanup(context.Background())
-
- // Close rpc clients
- b.sysViewClient.Close()
- b.storageClient.Close()
- return nil
-}
-
-func (b *backendPluginServer) InvalidateKey(args string, _ *struct{}) error {
- if pluginutil.InMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- b.backend.InvalidateKey(context.Background(), args)
- return nil
-}
-
-// Setup dials into the plugin's broker to get a shimmed storage, logger, and
-// system view of the backend. This method also instantiates the underlying
-// backend through its factory func for the server side of the plugin.
-func (b *backendPluginServer) Setup(args *SetupArgs, reply *SetupReply) error {
- // Dial for storage
- storageConn, err := b.broker.Dial(args.StorageID)
- if err != nil {
- *reply = SetupReply{
- Error: wrapError(err),
- }
- return nil
- }
- rawStorageClient := rpc.NewClient(storageConn)
- b.storageClient = rawStorageClient
-
- storage := &StorageClient{client: rawStorageClient}
-
- // Dial for sys view
- sysViewConn, err := b.broker.Dial(args.SysViewID)
- if err != nil {
- *reply = SetupReply{
- Error: wrapError(err),
- }
- return nil
- }
- rawSysViewClient := rpc.NewClient(sysViewConn)
- b.sysViewClient = rawSysViewClient
-
- sysView := &SystemViewClient{client: rawSysViewClient}
-
- config := &logical.BackendConfig{
- StorageView: storage,
- Logger: b.logger,
- System: sysView,
- Config: args.Config,
- BackendUUID: args.BackendUUID,
- }
-
- // Call the underlying backend factory after shims have been created
- // to set b.backend
- backend, err := b.factory(context.Background(), config)
- if err != nil {
- *reply = SetupReply{
- Error: wrapError(err),
- }
- }
- b.backend = backend
-
- return nil
-}
-
-func (b *backendPluginServer) Type(_ interface{}, reply *TypeReply) error {
- *reply = TypeReply{
- Type: b.backend.Type(),
- }
-
- return nil
-}
diff --git a/logical/plugin/backend_test.go b/logical/plugin/backend_test.go
deleted file mode 100644
index d36d7639f2f8..000000000000
--- a/logical/plugin/backend_test.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package plugin
-
-import (
- "context"
- "testing"
- "time"
-
- log "github.com/hashicorp/go-hclog"
- gplugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/logging"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/plugin/mock"
-)
-
-func TestBackendPlugin_impl(t *testing.T) {
- var _ gplugin.Plugin = new(BackendPlugin)
- var _ logical.Backend = new(backendPluginClient)
-}
-
-func TestBackendPlugin_HandleRequest(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- resp, err := b.HandleRequest(context.Background(), &logical.Request{
- Operation: logical.CreateOperation,
- Path: "kv/foo",
- Data: map[string]interface{}{
- "value": "bar",
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp.Data["value"] != "bar" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestBackendPlugin_SpecialPaths(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- paths := b.SpecialPaths()
- if paths == nil {
- t.Fatal("SpecialPaths() returned nil")
- }
-}
-
-func TestBackendPlugin_System(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- sys := b.System()
- if sys == nil {
- t.Fatal("System() returned nil")
- }
-
- actual := sys.DefaultLeaseTTL()
- expected := 300 * time.Second
-
- if actual != expected {
- t.Fatalf("bad: %v, expected %v", actual, expected)
- }
-}
-
-func TestBackendPlugin_Logger(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- logger := b.Logger()
- if logger == nil {
- t.Fatal("Logger() returned nil")
- }
-}
-
-func TestBackendPlugin_HandleExistenceCheck(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- checkFound, exists, err := b.HandleExistenceCheck(context.Background(), &logical.Request{
- Operation: logical.CreateOperation,
- Path: "kv/foo",
- Data: map[string]interface{}{"value": "bar"},
- })
- if err != nil {
- t.Fatal(err)
- }
- if !checkFound {
- t.Fatal("existence check not found for path 'kv/foo")
- }
- if exists {
- t.Fatal("existence check should have returned 'false' for 'kv/foo'")
- }
-}
-
-func TestBackendPlugin_Cleanup(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- b.Cleanup(context.Background())
-}
-
-func TestBackendPlugin_InvalidateKey(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- ctx := context.Background()
-
- resp, err := b.HandleRequest(ctx, &logical.Request{
- Operation: logical.ReadOperation,
- Path: "internal",
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp.Data["value"] == "" {
- t.Fatalf("bad: %#v, expected non-empty value", resp)
- }
-
- b.InvalidateKey(ctx, "internal")
-
- resp, err = b.HandleRequest(ctx, &logical.Request{
- Operation: logical.ReadOperation,
- Path: "internal",
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp.Data["value"] != "" {
- t.Fatalf("bad: expected empty response data, got %#v", resp)
- }
-}
-
-func TestBackendPlugin_Setup(t *testing.T) {
- _, cleanup := testBackend(t)
- defer cleanup()
-}
-
-func testBackend(t *testing.T) (logical.Backend, func()) {
- // Create a mock provider
- pluginMap := map[string]gplugin.Plugin{
- "backend": &BackendPlugin{
- GRPCBackendPlugin: &GRPCBackendPlugin{
- Factory: mock.Factory,
- },
- },
- }
- client, _ := gplugin.TestPluginRPCConn(t, pluginMap, nil)
- cleanup := func() {
- client.Close()
- }
-
- // Request the backend
- raw, err := client.Dispense(BackendPluginName)
- if err != nil {
- t.Fatal(err)
- }
- b := raw.(logical.Backend)
-
- err = b.Setup(context.Background(), &logical.BackendConfig{
- Logger: logging.NewVaultLogger(log.Debug),
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: 300 * time.Second,
- MaxLeaseTTLVal: 1800 * time.Second,
- },
- StorageView: &logical.InmemStorage{},
- })
- if err != nil {
- t.Fatal(err)
- }
-
- return b, cleanup
-}
diff --git a/logical/plugin/grpc_backend_client.go b/logical/plugin/grpc_backend_client.go
index 87b740ee0af3..379a7e420804 100644
--- a/logical/plugin/grpc_backend_client.go
+++ b/logical/plugin/grpc_backend_client.go
@@ -16,6 +16,7 @@ import (
)
var ErrPluginShutdown = errors.New("plugin is shut down")
+var ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode")
// Validate backendGRPCPluginClient satisfies the logical.Backend interface
var _ logical.Backend = &backendGRPCPluginClient{}
diff --git a/logical/plugin/grpc_backend_server.go b/logical/plugin/grpc_backend_server.go
index 7869a70b2a14..0d105b8c392b 100644
--- a/logical/plugin/grpc_backend_server.go
+++ b/logical/plugin/grpc_backend_server.go
@@ -2,6 +2,7 @@ package plugin
import (
"context"
+ "errors"
log "github.com/hashicorp/go-hclog"
plugin "github.com/hashicorp/go-plugin"
@@ -11,6 +12,8 @@ import (
"google.golang.org/grpc"
)
+var ErrServerInMetadataMode = errors.New("plugin server can not perform action while in metadata mode")
+
type backendGRPCPluginServer struct {
broker *plugin.GRPCBroker
backend logical.Backend
diff --git a/logical/plugin/grpc_backend_test.go b/logical/plugin/grpc_backend_test.go
index 63d125139326..4765bd51ce25 100644
--- a/logical/plugin/grpc_backend_test.go
+++ b/logical/plugin/grpc_backend_test.go
@@ -14,8 +14,8 @@ import (
)
func TestGRPCBackendPlugin_impl(t *testing.T) {
- var _ gplugin.Plugin = new(BackendPlugin)
- var _ logical.Backend = new(backendPluginClient)
+ var _ gplugin.Plugin = new(GRPCBackendPlugin)
+ var _ logical.Backend = new(backendGRPCPluginClient)
}
func TestGRPCBackendPlugin_HandleRequest(t *testing.T) {
@@ -140,15 +140,13 @@ func TestGRPCBackendPlugin_Setup(t *testing.T) {
func testGRPCBackend(t *testing.T) (logical.Backend, func()) {
// Create a mock provider
pluginMap := map[string]gplugin.Plugin{
- "backend": &BackendPlugin{
- GRPCBackendPlugin: &GRPCBackendPlugin{
- Factory: mock.Factory,
- Logger: log.New(&log.LoggerOptions{
- Level: log.Debug,
- Output: os.Stderr,
- JSONFormat: true,
- }),
- },
+ "backend": &GRPCBackendPlugin{
+ Factory: mock.Factory,
+ Logger: log.New(&log.LoggerOptions{
+ Level: log.Debug,
+ Output: os.Stderr,
+ JSONFormat: true,
+ }),
},
}
client, _ := gplugin.TestPluginGRPCConn(t, pluginMap)
diff --git a/logical/plugin/grpc_storage.go b/logical/plugin/grpc_storage.go
index ffe133900d2c..9957d4190b7f 100644
--- a/logical/plugin/grpc_storage.go
+++ b/logical/plugin/grpc_storage.go
@@ -108,3 +108,23 @@ func (s *GRPCStorageServer) Delete(ctx context.Context, args *pb.StorageDeleteAr
Err: pb.ErrToString(err),
}, nil
}
+
+// NOOPStorage is used to deny access to the storage interface while running a
+// backend plugin in metadata mode.
+type NOOPStorage struct{}
+
+func (s *NOOPStorage) List(_ context.Context, prefix string) ([]string, error) {
+ return []string{}, nil
+}
+
+func (s *NOOPStorage) Get(_ context.Context, key string) (*logical.StorageEntry, error) {
+ return nil, nil
+}
+
+func (s *NOOPStorage) Put(_ context.Context, entry *logical.StorageEntry) error {
+ return nil
+}
+
+func (s *NOOPStorage) Delete(_ context.Context, key string) error {
+ return nil
+}
diff --git a/logical/plugin/plugin.go b/logical/plugin/plugin.go
index 2c63d612f6a0..536fbf20eb57 100644
--- a/logical/plugin/plugin.go
+++ b/logical/plugin/plugin.go
@@ -2,13 +2,9 @@ package plugin
import (
"context"
- "crypto/ecdsa"
- "crypto/rsa"
- "encoding/gob"
"errors"
"fmt"
"sync"
- "time"
"github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
@@ -18,28 +14,6 @@ import (
"github.com/hashicorp/vault/logical"
)
-// init registers basic structs with gob which will be used to transport complex
-// types through the plugin server and client.
-func init() {
- // Common basic structs
- gob.Register([]interface{}{})
- gob.Register(map[string]interface{}{})
- gob.Register(map[string]string{})
- gob.Register(map[string]int{})
-
- // Register these types since we have to serialize and de-serialize
- // tls.ConnectionState over the wire as part of logical.Request.Connection.
- gob.Register(rsa.PublicKey{})
- gob.Register(ecdsa.PublicKey{})
- gob.Register(time.Duration(0))
-
- // Custom common error types for requests. If you add something here, you must
- // also add it to the switch statement in `wrapError`!
- gob.Register(&plugin.BasicError{})
- gob.Register(logical.CodedError(0, ""))
- gob.Register(&logical.StatusBadRequest{})
-}
-
// BackendPluginClient is a wrapper around backendPluginClient
// that also contains its plugin.Client instance. It's primarily
// used to cleanly kill the client on Cleanup()
@@ -98,11 +72,13 @@ func NewBackend(ctx context.Context, pluginName string, pluginType consts.Plugin
func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (logical.Backend, error) {
// pluginMap is the map of plugins we can dispense.
pluginSet := map[int]plugin.PluginSet{
+ // Version 3 used to supports both protocols. We want to keep it around
+ // since it's possible old plugins built against this version will still
+ // work with gRPC. There is currently no difference between version 3
+ // and version 4.
3: plugin.PluginSet{
- "backend": &BackendPlugin{
- GRPCBackendPlugin: &GRPCBackendPlugin{
- MetadataMode: isMetadataMode,
- },
+ "backend": &GRPCBackendPlugin{
+ MetadataMode: isMetadataMode,
},
},
4: plugin.PluginSet{
@@ -142,10 +118,6 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne
// We should have a logical backend type now. This feels like a normal interface
// implementation but is in fact over an RPC connection.
switch raw.(type) {
- case *backendPluginClient:
- logger.Warn("plugin is using deprecated netRPC transport, recompile plugin to upgrade to gRPC", "plugin", pluginRunner.Name)
- backend = raw.(*backendPluginClient)
- transport = "netRPC"
case *backendGRPCPluginClient:
backend = raw.(*backendGRPCPluginClient)
transport = "gRPC"
diff --git a/logical/plugin/serve.go b/logical/plugin/serve.go
index c61e59ecf685..d3771f08d407 100644
--- a/logical/plugin/serve.go
+++ b/logical/plugin/serve.go
@@ -39,12 +39,14 @@ func Serve(opts *ServeOpts) error {
// pluginMap is the map of plugins we can dispense.
pluginSets := map[int]plugin.PluginSet{
+ // Version 3 used to supports both protocols. We want to keep it around
+ // since it's possible old plugins built against this version will still
+ // work with gRPC. There is currently no difference between version 3
+ // and version 4.
3: plugin.PluginSet{
- "backend": &BackendPlugin{
- GRPCBackendPlugin: &GRPCBackendPlugin{
- Factory: opts.BackendFactoryFunc,
- Logger: logger,
- },
+ "backend": &GRPCBackendPlugin{
+ Factory: opts.BackendFactoryFunc,
+ Logger: logger,
},
},
4: plugin.PluginSet{
@@ -74,13 +76,6 @@ func Serve(opts *ServeOpts) error {
},
}
- // If we do not have gRPC support fallback to version 3
- // Remove this block in 0.13
- if !pluginutil.GRPCSupport() {
- serveOpts.GRPCServer = nil
- delete(pluginSets, 4)
- }
-
plugin.Serve(serveOpts)
return nil
diff --git a/logical/plugin/storage.go b/logical/plugin/storage.go
deleted file mode 100644
index 75cda5500fa4..000000000000
--- a/logical/plugin/storage.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package plugin
-
-import (
- "context"
- "net/rpc"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// StorageClient is an implementation of logical.Storage that communicates
-// over RPC.
-type StorageClient struct {
- client *rpc.Client
-}
-
-func (s *StorageClient) List(_ context.Context, prefix string) ([]string, error) {
- var reply StorageListReply
- err := s.client.Call("Plugin.List", prefix, &reply)
- if err != nil {
- return reply.Keys, err
- }
- if reply.Error != nil {
- return reply.Keys, reply.Error
- }
- return reply.Keys, nil
-}
-
-func (s *StorageClient) Get(_ context.Context, key string) (*logical.StorageEntry, error) {
- var reply StorageGetReply
- err := s.client.Call("Plugin.Get", key, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
- return reply.StorageEntry, nil
-}
-
-func (s *StorageClient) Put(_ context.Context, entry *logical.StorageEntry) error {
- var reply StoragePutReply
- err := s.client.Call("Plugin.Put", entry, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
- return nil
-}
-
-func (s *StorageClient) Delete(_ context.Context, key string) error {
- var reply StorageDeleteReply
- err := s.client.Call("Plugin.Delete", key, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
- return nil
-}
-
-// StorageServer is a net/rpc compatible structure for serving
-type StorageServer struct {
- impl logical.Storage
-}
-
-func (s *StorageServer) List(prefix string, reply *StorageListReply) error {
- keys, err := s.impl.List(context.Background(), prefix)
- *reply = StorageListReply{
- Keys: keys,
- Error: wrapError(err),
- }
- return nil
-}
-
-func (s *StorageServer) Get(key string, reply *StorageGetReply) error {
- storageEntry, err := s.impl.Get(context.Background(), key)
- *reply = StorageGetReply{
- StorageEntry: storageEntry,
- Error: wrapError(err),
- }
- return nil
-}
-
-func (s *StorageServer) Put(entry *logical.StorageEntry, reply *StoragePutReply) error {
- err := s.impl.Put(context.Background(), entry)
- *reply = StoragePutReply{
- Error: wrapError(err),
- }
- return nil
-}
-
-func (s *StorageServer) Delete(key string, reply *StorageDeleteReply) error {
- err := s.impl.Delete(context.Background(), key)
- *reply = StorageDeleteReply{
- Error: wrapError(err),
- }
- return nil
-}
-
-type StorageListReply struct {
- Keys []string
- Error error
-}
-
-type StorageGetReply struct {
- StorageEntry *logical.StorageEntry
- Error error
-}
-
-type StoragePutReply struct {
- Error error
-}
-
-type StorageDeleteReply struct {
- Error error
-}
-
-// NOOPStorage is used to deny access to the storage interface while running a
-// backend plugin in metadata mode.
-type NOOPStorage struct{}
-
-func (s *NOOPStorage) List(_ context.Context, prefix string) ([]string, error) {
- return []string{}, nil
-}
-
-func (s *NOOPStorage) Get(_ context.Context, key string) (*logical.StorageEntry, error) {
- return nil, nil
-}
-
-func (s *NOOPStorage) Put(_ context.Context, entry *logical.StorageEntry) error {
- return nil
-}
-
-func (s *NOOPStorage) Delete(_ context.Context, key string) error {
- return nil
-}
diff --git a/logical/plugin/storage_test.go b/logical/plugin/storage_test.go
index 87653463e5ff..9920460248ec 100644
--- a/logical/plugin/storage_test.go
+++ b/logical/plugin/storage_test.go
@@ -11,22 +11,7 @@ import (
)
func TestStorage_impl(t *testing.T) {
- var _ logical.Storage = new(StorageClient)
-}
-
-func TestStorage_RPC(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- storage := &logical.InmemStorage{}
-
- server.RegisterName("Plugin", &StorageServer{
- impl: storage,
- })
-
- testStorage := &StorageClient{client: client}
-
- logical.TestStorage(t, testStorage)
+ var _ logical.Storage = new(GRPCStorageClient)
}
func TestStorage_GRPC(t *testing.T) {
diff --git a/logical/plugin/system.go b/logical/plugin/system.go
deleted file mode 100644
index 148f39a96d8d..000000000000
--- a/logical/plugin/system.go
+++ /dev/null
@@ -1,351 +0,0 @@
-package plugin
-
-import (
- "context"
- "net/rpc"
- "time"
-
- "fmt"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/license"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
-)
-
-type SystemViewClient struct {
- client *rpc.Client
-}
-
-func (s *SystemViewClient) DefaultLeaseTTL() time.Duration {
- var reply DefaultLeaseTTLReply
- err := s.client.Call("Plugin.DefaultLeaseTTL", new(interface{}), &reply)
- if err != nil {
- return 0
- }
-
- return reply.DefaultLeaseTTL
-}
-
-func (s *SystemViewClient) MaxLeaseTTL() time.Duration {
- var reply MaxLeaseTTLReply
- err := s.client.Call("Plugin.MaxLeaseTTL", new(interface{}), &reply)
- if err != nil {
- return 0
- }
-
- return reply.MaxLeaseTTL
-}
-
-func (s *SystemViewClient) SudoPrivilege(ctx context.Context, path string, token string) bool {
- var reply SudoPrivilegeReply
- args := &SudoPrivilegeArgs{
- Path: path,
- Token: token,
- }
-
- err := s.client.Call("Plugin.SudoPrivilege", args, &reply)
- if err != nil {
- return false
- }
-
- return reply.Sudo
-}
-
-func (s *SystemViewClient) Tainted() bool {
- var reply TaintedReply
-
- err := s.client.Call("Plugin.Tainted", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.Tainted
-}
-
-func (s *SystemViewClient) CachingDisabled() bool {
- var reply CachingDisabledReply
-
- err := s.client.Call("Plugin.CachingDisabled", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.CachingDisabled
-}
-
-func (s *SystemViewClient) ReplicationState() consts.ReplicationState {
- var reply ReplicationStateReply
-
- err := s.client.Call("Plugin.ReplicationState", new(interface{}), &reply)
- if err != nil {
- return consts.ReplicationUnknown
- }
-
- return reply.ReplicationState
-}
-
-func (s *SystemViewClient) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
- var reply ResponseWrapDataReply
- // Do not allow JWTs to be returned
- args := &ResponseWrapDataArgs{
- Data: data,
- TTL: ttl,
- JWT: false,
- }
-
- err := s.client.Call("Plugin.ResponseWrapData", args, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
-
- return reply.ResponseWrapInfo, nil
-}
-
-func (s *SystemViewClient) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) {
- return nil, fmt.Errorf("cannot call LookupPlugin from a plugin backend")
-}
-
-func (s *SystemViewClient) HasFeature(feature license.Features) bool {
- // Not implemented
- return false
-}
-
-func (s *SystemViewClient) MlockEnabled() bool {
- var reply MlockEnabledReply
- err := s.client.Call("Plugin.MlockEnabled", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.MlockEnabled
-}
-
-func (s *SystemViewClient) LocalMount() bool {
- var reply LocalMountReply
- err := s.client.Call("Plugin.LocalMount", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.Local
-}
-
-func (s *SystemViewClient) EntityInfo(entityID string) (*logical.Entity, error) {
- var reply EntityInfoReply
- args := &EntityInfoArgs{
- EntityID: entityID,
- }
-
- err := s.client.Call("Plugin.EntityInfo", args, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
-
- return reply.Entity, nil
-}
-
-func (s *SystemViewClient) PluginEnv(_ context.Context) (*logical.PluginEnvironment, error) {
- var reply PluginEnvReply
-
- err := s.client.Call("Plugin.PluginEnv", new(interface{}), &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
-
- return reply.PluginEnvironment, nil
-}
-
-type SystemViewServer struct {
- impl logical.SystemView
-}
-
-func (s *SystemViewServer) DefaultLeaseTTL(_ interface{}, reply *DefaultLeaseTTLReply) error {
- ttl := s.impl.DefaultLeaseTTL()
- *reply = DefaultLeaseTTLReply{
- DefaultLeaseTTL: ttl,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) MaxLeaseTTL(_ interface{}, reply *MaxLeaseTTLReply) error {
- ttl := s.impl.MaxLeaseTTL()
- *reply = MaxLeaseTTLReply{
- MaxLeaseTTL: ttl,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) SudoPrivilege(args *SudoPrivilegeArgs, reply *SudoPrivilegeReply) error {
- sudo := s.impl.SudoPrivilege(context.Background(), args.Path, args.Token)
- *reply = SudoPrivilegeReply{
- Sudo: sudo,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) Tainted(_ interface{}, reply *TaintedReply) error {
- tainted := s.impl.Tainted()
- *reply = TaintedReply{
- Tainted: tainted,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) CachingDisabled(_ interface{}, reply *CachingDisabledReply) error {
- cachingDisabled := s.impl.CachingDisabled()
- *reply = CachingDisabledReply{
- CachingDisabled: cachingDisabled,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) ReplicationState(_ interface{}, reply *ReplicationStateReply) error {
- replicationState := s.impl.ReplicationState()
- *reply = ReplicationStateReply{
- ReplicationState: replicationState,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) ResponseWrapData(args *ResponseWrapDataArgs, reply *ResponseWrapDataReply) error {
- // Do not allow JWTs to be returned
- info, err := s.impl.ResponseWrapData(context.Background(), args.Data, args.TTL, false)
- if err != nil {
- *reply = ResponseWrapDataReply{
- Error: wrapError(err),
- }
- return nil
- }
- *reply = ResponseWrapDataReply{
- ResponseWrapInfo: info,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) MlockEnabled(_ interface{}, reply *MlockEnabledReply) error {
- enabled := s.impl.MlockEnabled()
- *reply = MlockEnabledReply{
- MlockEnabled: enabled,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) LocalMount(_ interface{}, reply *LocalMountReply) error {
- local := s.impl.LocalMount()
- *reply = LocalMountReply{
- Local: local,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) EntityInfo(args *EntityInfoArgs, reply *EntityInfoReply) error {
- entity, err := s.impl.EntityInfo(args.EntityID)
- if err != nil {
- *reply = EntityInfoReply{
- Error: wrapError(err),
- }
- return nil
- }
- *reply = EntityInfoReply{
- Entity: entity,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) PluginEnv(_ interface{}, reply *PluginEnvReply) error {
- pluginEnv, err := s.impl.PluginEnv(context.Background())
- if err != nil {
- *reply = PluginEnvReply{
- Error: wrapError(err),
- }
- return nil
- }
- *reply = PluginEnvReply{
- PluginEnvironment: pluginEnv,
- }
-
- return nil
-}
-
-type DefaultLeaseTTLReply struct {
- DefaultLeaseTTL time.Duration
-}
-
-type MaxLeaseTTLReply struct {
- MaxLeaseTTL time.Duration
-}
-
-type SudoPrivilegeArgs struct {
- Path string
- Token string
-}
-
-type SudoPrivilegeReply struct {
- Sudo bool
-}
-
-type TaintedReply struct {
- Tainted bool
-}
-
-type CachingDisabledReply struct {
- CachingDisabled bool
-}
-
-type ReplicationStateReply struct {
- ReplicationState consts.ReplicationState
-}
-
-type ResponseWrapDataArgs struct {
- Data map[string]interface{}
- TTL time.Duration
- JWT bool
-}
-
-type ResponseWrapDataReply struct {
- ResponseWrapInfo *wrapping.ResponseWrapInfo
- Error error
-}
-
-type MlockEnabledReply struct {
- MlockEnabled bool
-}
-
-type LocalMountReply struct {
- Local bool
-}
-
-type EntityInfoArgs struct {
- EntityID string
-}
-
-type EntityInfoReply struct {
- Entity *logical.Entity
- Error error
-}
-
-type PluginEnvReply struct {
- PluginEnvironment *logical.PluginEnvironment
- Error error
-}
diff --git a/logical/plugin/system_test.go b/logical/plugin/system_test.go
deleted file mode 100644
index dd712631af00..000000000000
--- a/logical/plugin/system_test.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package plugin
-
-import (
- "context"
- "testing"
-
- "reflect"
-
- plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/logical"
-)
-
-func Test_impl(t *testing.T) {
- var _ logical.SystemView = new(SystemViewClient)
-}
-
-func TestSystem_defaultLeaseTTL(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.DefaultLeaseTTL()
- actual := testSystemView.DefaultLeaseTTL()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_maxLeaseTTL(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.MaxLeaseTTL()
- actual := testSystemView.MaxLeaseTTL()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_sudoPrivilege(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.SudoPrivilegeVal = true
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
- ctx := context.Background()
-
- expected := sys.SudoPrivilege(ctx, "foo", "bar")
- actual := testSystemView.SudoPrivilege(ctx, "foo", "bar")
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_tainted(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.TaintedVal = true
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.Tainted()
- actual := testSystemView.Tainted()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_cachingDisabled(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.CachingDisabledVal = true
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.CachingDisabled()
- actual := testSystemView.CachingDisabled()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_replicationState(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.ReplicationStateVal = consts.ReplicationPerformancePrimary
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.ReplicationState()
- actual := testSystemView.ReplicationState()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_responseWrapData(t *testing.T) {
- t.SkipNow()
-}
-
-func TestSystem_lookupPlugin(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- if _, err := testSystemView.LookupPlugin(context.Background(), "foo", consts.PluginTypeDatabase); err == nil {
- t.Fatal("LookPlugin(): expected error on due to unsupported call from plugin")
- }
-}
-
-func TestSystem_mlockEnabled(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.EnableMlock = true
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.MlockEnabled()
- actual := testSystemView.MlockEnabled()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_entityInfo(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.EntityVal = &logical.Entity{
- ID: "test",
- Name: "name",
- }
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- actual, err := testSystemView.EntityInfo("")
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(sys.EntityVal, actual) {
- t.Fatalf("expected: %v, got: %v", sys.EntityVal, actual)
- }
-}
-
-func TestSystem_pluginEnv(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.PluginEnvironment = &logical.PluginEnvironment{
- VaultVersion: "0.10.42",
- }
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected, err := sys.PluginEnv(context.Background())
- if err != nil {
- t.Fatal(err)
- }
-
- actual, err := testSystemView.PluginEnv(context.Background())
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
From 4a7544cbfd835b33382fd42de8152392e119f154 Mon Sep 17 00:00:00 2001
From: Jim Kalafut
Date: Tue, 12 Feb 2019 15:36:13 -0800
Subject: [PATCH 03/31] Output default as part of OpenAPI (#6222)
---
logical/framework/openapi.go | 3 +++
logical/framework/openapi_test.go | 1 +
logical/framework/testdata/operations.json | 1 +
3 files changed, 5 insertions(+)
diff --git a/logical/framework/openapi.go b/logical/framework/openapi.go
index 2b2c76b3e4b5..78fdef3ac8ac 100644
--- a/logical/framework/openapi.go
+++ b/logical/framework/openapi.go
@@ -155,6 +155,7 @@ type OASSchema struct {
Format string `json:"format,omitempty"`
Pattern string `json:"pattern,omitempty"`
Enum []interface{} `json:"enum,omitempty"`
+ Default interface{} `json:"default,omitempty"`
Example interface{} `json:"example,omitempty"`
Deprecated bool `json:"deprecated,omitempty"`
Required bool `json:"required,omitempty"`
@@ -263,6 +264,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, backendType logical.Back
Type: t.baseType,
Pattern: t.pattern,
Enum: field.AllowedValues,
+ Default: field.Default,
DisplayName: field.DisplayName,
DisplayValue: field.DisplayValue,
DisplaySensitive: field.DisplaySensitive,
@@ -321,6 +323,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, backendType logical.Back
Format: openapiField.format,
Pattern: openapiField.pattern,
Enum: field.AllowedValues,
+ Default: field.Default,
Required: field.Required,
Deprecated: field.Deprecated,
DisplayName: field.DisplayName,
diff --git a/logical/framework/openapi_test.go b/logical/framework/openapi_test.go
index 23079a536f27..a669a5adef92 100644
--- a/logical/framework/openapi_test.go
+++ b/logical/framework/openapi_test.go
@@ -326,6 +326,7 @@ func TestOpenAPI_Paths(t *testing.T) {
},
"name": {
Type: TypeNameString,
+ Default: "Larry",
Description: "the name",
},
"age": {
diff --git a/logical/framework/testdata/operations.json b/logical/framework/testdata/operations.json
index 4d462986d8fa..ab93a431cf4c 100644
--- a/logical/framework/testdata/operations.json
+++ b/logical/framework/testdata/operations.json
@@ -85,6 +85,7 @@
"name": {
"type": "string",
"description": "the name",
+ "default": "Larry",
"pattern": "\\w([\\w-.]*\\w)?"
}
}
From a37265fdcc4150524cb052905429d6856c85c6b1 Mon Sep 17 00:00:00 2001
From: Jim Kalafut
Date: Tue, 12 Feb 2019 17:08:04 -0800
Subject: [PATCH 04/31] Update vendored JWT plugin
---
.../vault-plugin-auth-jwt/Gopkg.lock | 10 +-
.../vault-plugin-auth-jwt/Gopkg.toml | 5 +
.../vault-plugin-auth-jwt/backend.go | 8 +-
.../hashicorp/vault-plugin-auth-jwt/cli.go | 376 ++----------------
.../vault-plugin-auth-jwt/cli_responses.go | 313 +++++++++++++++
.../vault-plugin-auth-jwt/test_ui.html | 2 +-
vendor/vendor.json | 10 +-
7 files changed, 376 insertions(+), 348 deletions(-)
create mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli_responses.go
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock
index 9ae1a539743a..e686681ccb2d 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock
@@ -101,15 +101,11 @@
version = "v1.0.0"
[[projects]]
- branch = "master"
- digest = "1:77a6108b8eb3cd0feac4eeb3e032f36c8fdfe9497671952fd9eb682b9c503158"
+ digest = "1:e18a77f3453d129d9a5b4cac4e912b21c2dd2af52a24a8fe6a8c241476ed7b6b"
name = "github.com/hashicorp/go-plugin"
- packages = [
- ".",
- "internal/proto",
- ]
+ packages = ["."]
pruneopts = "UT"
- revision = "362c99b11937c6a84686ee5726a8170e921ab406"
+ revision = "26219a000dd975abd5140a2ddbe415b366498be7"
[[projects]]
digest = "1:d260503602063d71718eb21f85c02133ad5eac894c2a6f0e0546b7dc017dc97e"
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.toml b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.toml
index 1752d2434178..9b1f3229db99 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.toml
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.toml
@@ -61,6 +61,11 @@
name = "gopkg.in/square/go-jose.v2"
version = "2.1.8"
+# Remove this once https://github.com/hashicorp/go-plugin/pull/97 is merged
+[[override]]
+ name = "github.com/hashicorp/go-plugin"
+ revision = "26219a000dd975abd5140a2ddbe415b366498be7"
+
[prune]
go-tests = true
unused-packages = true
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/backend.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/backend.go
index c1d328bdf29e..a9b4ac1ad940 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/backend.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/backend.go
@@ -52,7 +52,9 @@ func backend() *jwtAuthBackend {
"login",
"oidc/auth_url",
"oidc/callback",
- "ui", // TODO: remove when Vault UI is ready
+
+ // Uncomment to mount simple UI handler for local development
+ // "ui",
},
SealWrapStorage: []string{
"config",
@@ -64,7 +66,9 @@ func backend() *jwtAuthBackend {
pathRoleList(b),
pathRole(b),
pathConfig(b),
- pathUI(b), // TODO: remove when Vault UI is ready
+
+ // Uncomment to mount simple UI handler for local development
+ // pathUI(b),
},
pathOIDC(b),
),
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go
index a8b221d261a5..6f50c59d986a 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go
@@ -3,6 +3,7 @@ package jwtauth
import (
"errors"
"fmt"
+ "net"
"net/http"
"os"
"os/exec"
@@ -26,9 +27,9 @@ type loginResp struct {
func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
// handle ctrl-c while waiting for the callback
- ch := make(chan os.Signal, 1)
- signal.Notify(ch, os.Interrupt)
- defer signal.Stop(ch)
+ sigintCh := make(chan os.Signal, 1)
+ signal.Notify(sigintCh, os.Interrupt)
+ defer signal.Stop(sigintCh)
doneCh := make(chan loginResp)
@@ -47,23 +48,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
return nil, errors.New("a 'role' must be specified")
}
- secret, err := fetchAuthURL(c, role, mount, port)
+ authURL, err := fetchAuthURL(c, role, mount, port)
if err != nil {
return nil, err
}
- authURL := secret.Data["auth_url"].(string)
- if authURL == "" {
- return nil, errors.New(fmt.Sprintf("Unable to authorize role %q. Check Vault logs for more information.", role))
- }
-
- fmt.Fprintf(os.Stderr, "Complete the login via your OIDC provider. Launching browser to:\n\n %s\n\n\n", authURL)
- if err := openURL(authURL); err != nil {
- fmt.Fprintf(os.Stderr, "Error attempting to automatically open browser: '%s'.\nPlease visit the authorization URL manually.", err)
- }
-
// Set up callback handler
- http.HandleFunc(fmt.Sprintf("/v1/auth/%s/oidc/callback", mount), func(w http.ResponseWriter, req *http.Request) {
+ http.HandleFunc(fmt.Sprintf("/ui/vault/auth/%s/oidc/callback", mount), func(w http.ResponseWriter, req *http.Request) {
var response string
query := req.URL.Query()
@@ -86,10 +77,22 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
doneCh <- loginResp{secret, err}
})
+ listener, err := net.Listen("tcp", ":"+port)
+ if err != nil {
+ return nil, err
+ }
+
+ // Open the default browser to the callback URL.
+ fmt.Fprintf(os.Stderr, "Complete the login via your OIDC provider. Launching browser to:\n\n %s\n\n\n", authURL)
+ if err := openURL(authURL); err != nil {
+ fmt.Fprintf(os.Stderr, "Error attempting to automatically open browser: '%s'.\nPlease visit the authorization URL manually.", err)
+ }
+
// Start local server
go func() {
- if err := http.ListenAndServe(":"+port, nil); err != nil && err != http.ErrServerClosed {
- fmt.Fprintf(os.Stderr, "Error listening for callback: %v\n\n", err.Error())
+ err := http.Serve(listener, nil)
+ if err != nil && err != http.ErrServerClosed {
+ doneCh <- loginResp{nil, err}
}
}()
@@ -97,18 +100,28 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
select {
case s := <-doneCh:
return s.secret, s.err
- case <-ch:
+ case <-sigintCh:
return nil, errors.New("interrupted")
}
}
-func fetchAuthURL(c *api.Client, role, mount, port string) (*api.Secret, error) {
+func fetchAuthURL(c *api.Client, role, mount, port string) (string, error) {
data := map[string]interface{}{
"role": role,
- "redirect_uri": fmt.Sprintf("http://localhost:%s/v1/auth/%s/oidc/callback", port, mount),
+ "redirect_uri": fmt.Sprintf("http://localhost:%s/ui/vault/auth/%s/oidc/callback", port, mount),
+ }
+
+ secret, err := c.Logical().Write(fmt.Sprintf("auth/%s/oidc/auth_url", mount), data)
+ if err != nil {
+ return "", err
}
- return c.Logical().Write(fmt.Sprintf("auth/%s/oidc/auth_url", mount), data)
+ authURL := secret.Data["auth_url"].(string)
+ if authURL == "" {
+ return "", errors.New(fmt.Sprintf("Unable to authorize role %q. Check Vault logs for more information.", role))
+ }
+
+ return authURL, nil
}
// openURL opens the specified URL in the default browser of the user.
@@ -131,6 +144,14 @@ func openURL(url string) error {
}
// parseError converts error from the API into summary and detailed portions.
+// This is used to present a nicer UI by splitting up *known* prefix sentences
+// from the rest of the text. e.g.
+//
+// "No response from provider. Gateway timeout from upstream proxy."
+//
+// becomes:
+//
+// "No response from provider.", "Gateway timeout from upstream proxy."
func parseError(err error) (string, string) {
headers := []string{errNoResponse, errLoginFailed, errTokenVerification}
summary := "Login error"
@@ -158,175 +179,6 @@ func parseError(err error) (string, string) {
}
return summary, detail
-
-}
-
-func errorHTML(summary, detail string) string {
- const html = `
-
-
-
-
-
-
-
-HashiCorp Vault
-
-
-
-
-
-
-
-`
- return fmt.Sprintf(html, summary, detail)
}
// Help method for OIDC cli
@@ -343,7 +195,7 @@ Usage: vault login -method=oidc [CONFIG K=V...]
Complete the login via your OIDC provider. Launching browser to:
https://accounts.google.com/o/oauth2/v2/...
-
+
The default browser will be opened for the user to complete the login. Alternatively,
the user may visit the provided URL directly.
@@ -358,145 +210,3 @@ Configuration:
return strings.TrimSpace(help)
}
-
-const successHTML = `
-
-
-
-
-
- Vault Authentication Succeeded
-
-
-
-
-
-
-`
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli_responses.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli_responses.go
new file mode 100644
index 000000000000..ba4082829c7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli_responses.go
@@ -0,0 +1,313 @@
+package jwtauth
+
+import "fmt"
+
+const successHTML = `
+
+
+
+
+
+ Vault Authentication Succeeded
+
+
+
+
+
+
+`
+
+func errorHTML(summary, detail string) string {
+ const html = `
+
+
+
+
+
+
+
+HashiCorp Vault
+
+
+
+
+
+
+
+`
+ return fmt.Sprintf(html, summary, detail)
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/test_ui.html b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/test_ui.html
index dd6502ed73f1..5908c62dc76e 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/test_ui.html
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/test_ui.html
@@ -1,4 +1,4 @@
-
+
Role:
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 316b1bd97069..72a0c2237706 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -1409,12 +1409,12 @@
"revisionTime": "2018-12-10T20:01:33Z"
},
{
- "checksumSHA1": "86jzaGc3dRpZ5BKQPFP7ecasQfg=",
+ "checksumSHA1": "jCtLHj3YAONxCcV6v6kifTrRJwM=",
"path": "github.com/hashicorp/vault-plugin-auth-jwt",
- "revision": "bf17a88bb5c43eb2cbdc08011cd76ecec028521c",
- "revisionTime": "2019-02-07T06:35:46Z",
- "version": "=oidc-cli",
- "versionExact": "oidc-cli"
+ "revision": "6eaab2ed65f29101381ac871ffb06110b871a742",
+ "revisionTime": "2019-02-13T00:58:38Z",
+ "version": "=oidc-dev",
+ "versionExact": "oidc-dev"
},
{
"checksumSHA1": "Ldg2jQeyPrpAupyQq4lRVN+jfFY=",
From 50c7797984789efb92d3c2a1a4dae8336942b456 Mon Sep 17 00:00:00 2001
From: Matthew Irish
Date: Thu, 14 Feb 2019 09:39:19 -0600
Subject: [PATCH 05/31] UI - jwt auth (#6188)
* fix default rendering of svg and allow plugins access to mount tune form
* add auth-jwt component
* add callback route, and allow it to be navigated to on load
* add jwt as a supported auth method
* use auth-jwt component and implement intial oidc flow
* allow wrapping un-authed requests
* pass redirect_url and properly redirect with the wrapped token
* popup for login
* center popup window and move to localStorage events for cross window communication because of IE11
* access window via a getter on the auth-form component
* show OIDC provider name on the button
* fetch default role on render of the auth-jwt component
* simplify auth-form template
* style callback page
* refetch auth_url when path changes for auth-jwt component
* fix glimmer error on alias metadata, and add back popup-metadata component
* fix link in metadata page
* add logo-edition component and remove use of partial for logo svg
* render oidc callback template on the loading page if we're going there
* add docs icon and change timeout on the auth form
* move OIDC auth specific things to auth-jwt component
* start to add branded buttons for OIDC providers
* add google button
* finish branded buttons
* update glyph for error messages
* update tests for auth screen not showing tabs, add adapter tests and new auth jwt tests
* start auth-jwt tests
* simplify auth-jwt
* remove negative top margin on AlertInline
* only preventDefault if there's an event
* fill out tests
* sort out some naming
* feedback on templates and styles
* clear error when starting OIDC auth and call for new auth_url
* also allow 'oidc' as the auth method type
* handle namespaces with OIDC auth
* review feedback
* use new getters in popup-metadata
---
ui/app/adapters/application.js | 6 +-
ui/app/adapters/auth-method.js | 4 +
ui/app/adapters/cluster.js | 5 +-
ui/app/adapters/role-jwt.js | 32 +++
ui/app/components/alert-inline.js | 1 +
ui/app/components/auth-form.js | 26 +-
ui/app/components/auth-jwt.js | 165 ++++++++++++
ui/app/components/i-con.js | 42 +--
ui/app/components/identity/popup-metadata.js | 29 ++
ui/app/components/logo-edition.js | 7 +
ui/app/components/logo-splash.js | 8 +-
ui/app/controllers/vault.js | 14 +
.../vault/cluster/oidc-callback.js | 7 +
ui/app/helpers/message-types.js | 2 +-
ui/app/helpers/supported-auth-backends.js | 16 ++
ui/app/mixins/cluster-route.js | 10 +-
ui/app/models/role-jwt.js | 32 +++
ui/app/router.js | 1 +
ui/app/routes/loading.js | 17 ++
ui/app/routes/vault/cluster.js | 4 +-
ui/app/routes/vault/cluster/oidc-callback.js | 21 ++
.../vault/cluster/settings/auth/configure.js | 15 +-
ui/app/styles/components/auth-buttons.scss | 22 ++
ui/app/styles/core.scss | 1 +
ui/app/styles/core/helpers.scss | 3 +
ui/app/styles/core/message.scss | 2 +-
ui/app/templates/application.hbs | 33 ---
ui/app/templates/components/alert-inline.hbs | 8 +-
.../components/auth-button-auth0.hbs | 8 +
.../components/auth-button-gitlab.hbs | 6 +
.../components/auth-button-google.hbs | 44 ++++
.../components/auth-form-options.hbs | 26 ++
ui/app/templates/components/auth-form.hbs | 139 +++++-----
ui/app/templates/components/auth-jwt.hbs | 51 ++++
.../identity/item-alias/alias-metadata.hbs | 17 +-
.../components/identity/item-metadata.hbs | 17 +-
.../components/identity/popup-metadata.hbs | 21 ++
.../logo-edition.hbs} | 0
ui/app/templates/components/logo-splash.hbs | 2 +-
ui/app/templates/components/splash-page.hbs | 2 +-
ui/app/templates/loading.hbs | 2 +-
.../partials/replication/disable.hbs | 8 +-
.../templates/partials/replication/enable.hbs | 10 +-
.../partials/replication/promote.hbs | 5 +-
.../svg/icons/cancel-square-fill.hbs | 4 +
ui/app/templates/svg/icons/docs.hbs | 3 +
ui/app/templates/vault.hbs | 35 +++
.../vault/cluster/access/methods.hbs | 10 +-
.../templates/vault/cluster/oidc-callback.hbs | 14 +
ui/app/templates/vault/error.hbs | 2 +-
ui/app/utils/parse-url.js | 16 ++
ui/tests/acceptance/auth-test.js | 23 +-
.../integration/components/auth-form-test.js | 16 +-
.../integration/components/auth-jwt-test.js | 247 ++++++++++++++++++
ui/tests/pages/components/auth-form.js | 1 +
ui/tests/pages/components/auth-jwt.js | 11 +
ui/tests/unit/adapters/cluster-test.js | 14 +
ui/tests/unit/models/role-jwt-test.js | 43 +++
58 files changed, 1112 insertions(+), 218 deletions(-)
create mode 100644 ui/app/adapters/role-jwt.js
create mode 100644 ui/app/components/auth-jwt.js
create mode 100644 ui/app/components/identity/popup-metadata.js
create mode 100644 ui/app/components/logo-edition.js
create mode 100644 ui/app/controllers/vault/cluster/oidc-callback.js
create mode 100644 ui/app/models/role-jwt.js
create mode 100644 ui/app/routes/loading.js
create mode 100644 ui/app/routes/vault/cluster/oidc-callback.js
create mode 100644 ui/app/styles/components/auth-buttons.scss
create mode 100644 ui/app/templates/components/auth-button-auth0.hbs
create mode 100644 ui/app/templates/components/auth-button-gitlab.hbs
create mode 100644 ui/app/templates/components/auth-button-google.hbs
create mode 100644 ui/app/templates/components/auth-form-options.hbs
create mode 100644 ui/app/templates/components/auth-jwt.hbs
create mode 100644 ui/app/templates/components/identity/popup-metadata.hbs
rename ui/app/templates/{svg/vault-edition-logo.hbs => components/logo-edition.hbs} (100%)
create mode 100644 ui/app/templates/svg/icons/cancel-square-fill.hbs
create mode 100644 ui/app/templates/svg/icons/docs.hbs
create mode 100644 ui/app/templates/vault.hbs
create mode 100644 ui/app/templates/vault/cluster/oidc-callback.hbs
create mode 100644 ui/app/utils/parse-url.js
create mode 100644 ui/tests/integration/components/auth-jwt-test.js
create mode 100644 ui/tests/pages/components/auth-jwt.js
create mode 100644 ui/tests/unit/models/role-jwt-test.js
diff --git a/ui/app/adapters/application.js b/ui/app/adapters/application.js
index 2919b07cd4f6..ab40b45a7ffd 100644
--- a/ui/app/adapters/application.js
+++ b/ui/app/adapters/application.js
@@ -35,9 +35,9 @@ export default DS.RESTAdapter.extend({
let headers = {};
if (token && !options.unauthenticated) {
headers['X-Vault-Token'] = token;
- if (options.wrapTTL) {
- headers['X-Vault-Wrap-TTL'] = options.wrapTTL;
- }
+ }
+ if (options.wrapTTL) {
+ headers['X-Vault-Wrap-TTL'] = options.wrapTTL;
}
let namespace =
typeof options.namespace === 'undefined' ? this.get('namespaceService.path') : options.namespace;
diff --git a/ui/app/adapters/auth-method.js b/ui/app/adapters/auth-method.js
index 759d3a96bfe5..3691cc35831f 100644
--- a/ui/app/adapters/auth-method.js
+++ b/ui/app/adapters/auth-method.js
@@ -56,4 +56,8 @@ export default ApplicationAdapter.extend({
urlForDeleteRecord(id, modelName, snapshot) {
return this.url(snapshot.id);
},
+
+ exchangeOIDC(path, state, code) {
+ return this.ajax(`/v1/auth/${path}/oidc/callback`, 'GET', { data: { state, code } });
+ },
});
diff --git a/ui/app/adapters/cluster.js b/ui/app/adapters/cluster.js
index 8f0fe15bd415..9b01f3fb146e 100644
--- a/ui/app/adapters/cluster.js
+++ b/ui/app/adapters/cluster.js
@@ -109,7 +109,7 @@ export default ApplicationAdapter.extend({
},
authenticate({ backend, data }) {
- const { token, password, username, path } = data;
+ const { role, jwt, token, password, username, path } = data;
const url = this.urlForAuth(backend, username, path);
const verb = backend === 'token' ? 'GET' : 'POST';
let options = {
@@ -119,6 +119,8 @@ export default ApplicationAdapter.extend({
options.headers = {
'X-Vault-Token': token,
};
+ } else if (backend === 'jwt') {
+ options.data = { role, jwt };
} else {
options.data = token ? { token, password } : { password };
}
@@ -139,6 +141,7 @@ export default ApplicationAdapter.extend({
const authBackend = type.toLowerCase();
const authURLs = {
github: 'login',
+ jwt: 'login',
userpass: `login/${encodeURIComponent(username)}`,
ldap: `login/${encodeURIComponent(username)}`,
okta: `login/${encodeURIComponent(username)}`,
diff --git a/ui/app/adapters/role-jwt.js b/ui/app/adapters/role-jwt.js
new file mode 100644
index 000000000000..029b6c5925b0
--- /dev/null
+++ b/ui/app/adapters/role-jwt.js
@@ -0,0 +1,32 @@
+import ApplicationAdapter from './application';
+import { inject as service } from '@ember/service';
+import { get } from '@ember/object';
+
+export default ApplicationAdapter.extend({
+ router: service(),
+
+ findRecord(store, type, id, snapshot) {
+ let [path, role] = JSON.parse(id);
+
+ let namespace = get(snapshot, 'adapterOptions.namespace');
+ let url = `/v1/auth/${path}/oidc/auth_url`;
+ let redirect_uri = `${window.location.origin}${this.router.urlFor('vault.cluster.oidc-callback', {
+ auth_path: path,
+ })}`;
+
+ if (namespace) {
+ redirect_uri = `${window.location.origin}${this.router.urlFor(
+ 'vault.cluster.oidc-callback',
+ { auth_path: path },
+ { queryParams: { namespace } }
+ )}`;
+ }
+
+ return this.ajax(url, 'POST', {
+ data: {
+ role,
+ redirect_uri,
+ },
+ });
+ },
+});
diff --git a/ui/app/components/alert-inline.js b/ui/app/components/alert-inline.js
index ebfa4a5daa02..f594e6d55f84 100644
--- a/ui/app/components/alert-inline.js
+++ b/ui/app/components/alert-inline.js
@@ -5,6 +5,7 @@ import { messageTypes } from 'vault/helpers/message-types';
export default Component.extend({
type: null,
+ message: null,
classNames: ['message-inline'],
diff --git a/ui/app/components/auth-form.js b/ui/app/components/auth-form.js
index 7a8a7bee4c2b..2bb11b369487 100644
--- a/ui/app/components/auth-form.js
+++ b/ui/app/components/auth-form.js
@@ -1,4 +1,4 @@
-import { run } from '@ember/runloop';
+import { next } from '@ember/runloop';
import { inject as service } from '@ember/service';
import { match, alias, or } from '@ember/object/computed';
import { assign } from '@ember/polyfills';
@@ -23,7 +23,7 @@ export default Component.extend(DEFAULTS, {
store: service(),
csp: service('csp-event'),
- // set during init and potentially passed in via a query param
+ // passed in via a query param
selectedAuth: null,
methods: null,
cluster: null,
@@ -131,7 +131,9 @@ export default Component.extend(DEFAULTS, {
try {
let response = yield adapter.toolAction('unwrap', null, { clientToken: token });
this.set('token', response.auth.client_token);
- this.send('doSubmit');
+ next(() => {
+ this.send('doSubmit');
+ });
} catch (e) {
this.set('error', `Token unwrap failed: ${e.errors[0]}`);
}
@@ -146,7 +148,7 @@ export default Component.extend(DEFAULTS, {
},
});
this.set('methods', methods.map(m => m.serialize({ includeId: true })));
- run.next(() => {
+ next(() => {
store.unloadAll('auth-method');
});
} catch (e) {
@@ -154,7 +156,7 @@ export default Component.extend(DEFAULTS, {
}
}),
- showLoading: or('authenticate.isRunning', 'fetchMethods.isRunning', 'unwrapToken.isRunning'),
+ showLoading: or('isLoading', 'authenticate.isRunning', 'fetchMethods.isRunning', 'unwrapToken.isRunning'),
handleError(e) {
this.set('loading', false);
@@ -194,6 +196,15 @@ export default Component.extend(DEFAULTS, {
actions: {
doSubmit() {
+ let passedData, e;
+ if (arguments.length > 1) {
+ [passedData, e] = arguments;
+ } else {
+ [e] = arguments;
+ }
+ if (e) {
+ e.preventDefault();
+ }
let data = {};
this.setProperties({
error: null,
@@ -205,10 +216,13 @@ export default Component.extend(DEFAULTS, {
let attributes = get(backendMeta || {}, 'formAttributes') || {};
data = assign(data, this.getProperties(...attributes));
+ if (passedData) {
+ data = assign(data, passedData);
+ }
if (this.get('customPath') || get(backend, 'id')) {
data.path = this.get('customPath') || get(backend, 'id');
}
- this.authenticate.perform(backend.type, data);
+ return this.authenticate.unlinked().perform(backend.type, data);
},
},
});
diff --git a/ui/app/components/auth-jwt.js b/ui/app/components/auth-jwt.js
new file mode 100644
index 000000000000..c1596b2d5265
--- /dev/null
+++ b/ui/app/components/auth-jwt.js
@@ -0,0 +1,165 @@
+import Ember from 'ember';
+import { inject as service } from '@ember/service';
+import Component from './outer-html';
+import { next, later } from '@ember/runloop';
+import { task, timeout, waitForEvent } from 'ember-concurrency';
+import { computed } from '@ember/object';
+
+const WAIT_TIME = Ember.testing ? 0 : 500;
+const ERROR_WINDOW_CLOSED =
+ 'The provider window was closed before authentication was complete. Please click Sign In to try again.';
+const ERROR_MISSING_PARAMS =
+ 'The callback from the provider did not supply all of the required parameters. Please click Sign In to try again. If the problem persists, you may want to contact your administrator.';
+
+export { ERROR_WINDOW_CLOSED, ERROR_MISSING_PARAMS };
+
+export default Component.extend({
+ store: service(),
+ selectedAuthPath: null,
+ roleName: null,
+ role: null,
+ onRoleName() {},
+ onLoading() {},
+ onError() {},
+ onToken() {},
+ onNamespace() {},
+
+ didReceiveAttrs() {
+ next(() => {
+ let { oldSelectedAuthPath, selectedAuthPath } = this;
+ if (oldSelectedAuthPath !== selectedAuthPath) {
+ this.set('role', null);
+ this.onRoleName(null);
+ this.fetchRole.perform(null, { debounce: false });
+ }
+ this.set('oldSelectedAuthPath', selectedAuthPath);
+ });
+ },
+
+ // OIDC roles in the JWT/OIDC backend are those with an authUrl,
+ // those that are JWT type will 400 when trying to fetch the role
+ isOIDC: computed('role', 'role.authUrl', function() {
+ return this.role && this.role.authUrl;
+ }),
+
+ getWindow() {
+ return this.window || window;
+ },
+
+ fetchRole: task(function*(roleName, options = { debounce: true }) {
+ if (options.debounce) {
+ this.onRoleName(roleName);
+ // debounce
+ yield timeout(WAIT_TIME);
+ }
+ let path = this.selectedAuthPath || 'jwt';
+ let id = JSON.stringify([path, roleName]);
+ let role = null;
+ try {
+ role = yield this.store.findRecord('role-jwt', id, { adapterOptions: { namespace: this.namespace } });
+ } catch (e) {
+ if (!e.httpStatus || e.httpStatus !== 400) {
+ throw e;
+ }
+ }
+ this.set('role', role);
+ }).restartable(),
+
+ handleOIDCError(err) {
+ this.onLoading(false);
+ this.prepareForOIDC.cancelAll();
+ this.onError(err);
+ },
+
+ prepareForOIDC: task(function*(oidcWindow) {
+ // show the loading animation in the parent
+ this.onLoading(true);
+ // start watching the popup window and the current one
+ this.watchPopup.perform(oidcWindow);
+ this.watchCurrent.perform(oidcWindow);
+ // and then wait for storage event to be fired from the popup
+ // window setting a value in localStorage when the callback route is loaded
+ let storageEvent = yield waitForEvent(this.getWindow(), 'storage');
+ this.exchangeOIDC.perform(storageEvent, oidcWindow);
+ }),
+
+ watchPopup: task(function*(oidcWindow) {
+ while (true) {
+ yield timeout(WAIT_TIME);
+ if (!oidcWindow || oidcWindow.closed) {
+ return this.handleOIDCError(ERROR_WINDOW_CLOSED);
+ }
+ }
+ }),
+
+ watchCurrent: task(function*(oidcWindow) {
+ yield waitForEvent(this.getWindow(), 'beforeunload');
+ oidcWindow.close();
+ }),
+
+ closeWindow(oidcWindow) {
+ this.watchPopup.cancelAll();
+ this.watchCurrent.cancelAll();
+ oidcWindow.close();
+ },
+
+ exchangeOIDC: task(function*(event, oidcWindow) {
+ if (event.key !== 'oidcState') {
+ return;
+ }
+ this.onLoading(true);
+ // get the info from the event fired by the other window and
+ // then remove it from localStorage
+ let { namespace, path, state, code } = JSON.parse(event.newValue);
+ this.getWindow().localStorage.removeItem('oidcState');
+
+ // defer closing of the window, but continue executing the task
+ later(() => {
+ this.closeWindow(oidcWindow);
+ }, WAIT_TIME);
+ if (!path || !state || !code) {
+ return this.handleOIDCError(ERROR_MISSING_PARAMS);
+ }
+ let adapter = this.store.adapterFor('auth-method');
+ this.onNamespace(namespace);
+ let resp;
+ // do the OIDC exchange, set the token on the parent component
+ // and submit auth form
+ try {
+ resp = yield adapter.exchangeOIDC(path, state, code);
+ } catch (e) {
+ return this.handleOIDCError(e);
+ }
+ let token = resp.auth.client_token;
+ this.onSelectedAuth('token');
+ this.onToken(token);
+ yield this.onSubmit();
+ }),
+
+ actions: {
+ async startOIDCAuth(data, e) {
+ this.onError(null);
+ if (e && e.preventDefault) {
+ e.preventDefault();
+ }
+ if (!this.isOIDC) {
+ return;
+ }
+
+ await this.fetchRole.perform(this.roleName, { debounce: false });
+ let win = this.getWindow();
+
+ const POPUP_WIDTH = 500;
+ const POPUP_HEIGHT = 600;
+ let left = win.screen.width / 2 - POPUP_WIDTH / 2;
+ let top = win.screen.height / 2 - POPUP_HEIGHT / 2;
+ let oidcWindow = win.open(
+ this.role.authUrl,
+ 'vaultOIDCWindow',
+ `width=${POPUP_WIDTH},height=${POPUP_HEIGHT}resizable,scrollbars=yes,top=${top},left=${left}`
+ );
+
+ this.prepareForOIDC.perform(oidcWindow);
+ },
+ },
+});
diff --git a/ui/app/components/i-con.js b/ui/app/components/i-con.js
index 639094d80e35..d08a03815da1 100644
--- a/ui/app/components/i-con.js
+++ b/ui/app/components/i-con.js
@@ -4,33 +4,35 @@ import { computed } from '@ember/object';
import hbs from 'htmlbars-inline-precompile';
const GLYPHS_WITH_SVG_TAG = [
- 'learn',
- 'video',
- 'tour',
- 'stopwatch',
+ 'cancel-square-outline',
+ 'cancel-square-fill',
+ 'check-circle-fill',
+ 'check-plain',
+ 'checkmark-circled-outline',
+ 'close-circled-outline',
+ 'console',
+ 'control-lock',
+ 'docs',
'download',
- 'folder',
+ 'edition-enterprise',
+ 'edition-oss',
+ 'false',
'file',
+ 'folder',
'hidden',
+ 'information-reversed',
+ 'learn',
+ 'neutral-circled-outline',
'perf-replication',
+ 'person',
'role',
- 'visible',
- 'information-reversed',
+ 'status-indicator',
+ 'stopwatch',
+ 'tour',
'true',
- 'false',
'upload',
- 'control-lock',
- 'edition-enterprise',
- 'edition-oss',
- 'check-plain',
- 'check-circle-fill',
- 'cancel-square-outline',
- 'status-indicator',
- 'person',
- 'console',
- 'checkmark-circled-outline',
- 'close-circled-outline',
- 'neutral-circled-outline',
+ 'video',
+ 'visible',
];
export default Component.extend({
diff --git a/ui/app/components/identity/popup-metadata.js b/ui/app/components/identity/popup-metadata.js
new file mode 100644
index 000000000000..1198368d7d99
--- /dev/null
+++ b/ui/app/components/identity/popup-metadata.js
@@ -0,0 +1,29 @@
+import Base from './_popup-base';
+import { computed } from '@ember/object';
+import { alias } from '@ember/object/computed';
+
+export default Base.extend({
+ model: alias('params.firstObject'),
+ key: computed('params', function() {
+ return this.params.objectAt(1);
+ }),
+
+ messageArgs(model, key) {
+ return [model, key];
+ },
+
+ successMessage(model, key) {
+ return `Successfully removed '${key}' from metadata`;
+ },
+ errorMessage(e, model, key) {
+ let error = e.errors ? e.errors.join(' ') : e.message;
+ return `There was a problem removing '${key}' from the metadata - ${error}`;
+ },
+
+ transaction(model, key) {
+ let metadata = model.metadata;
+ delete metadata[key];
+ model.set('metadata', { ...metadata });
+ return model.save();
+ },
+});
diff --git a/ui/app/components/logo-edition.js b/ui/app/components/logo-edition.js
new file mode 100644
index 000000000000..0e2733facd46
--- /dev/null
+++ b/ui/app/components/logo-edition.js
@@ -0,0 +1,7 @@
+import { inject as service } from '@ember/service';
+import Component from '@ember/component';
+
+export default Component.extend({
+ tagName: '',
+ version: service(),
+});
diff --git a/ui/app/components/logo-splash.js b/ui/app/components/logo-splash.js
index 0e2733facd46..0fef514611ca 100644
--- a/ui/app/components/logo-splash.js
+++ b/ui/app/components/logo-splash.js
@@ -1,7 +1 @@
-import { inject as service } from '@ember/service';
-import Component from '@ember/component';
-
-export default Component.extend({
- tagName: '',
- version: service(),
-});
+export { default } from './outer-html';
diff --git a/ui/app/controllers/vault.js b/ui/app/controllers/vault.js
index 156be6736a9c..bd00f9815d61 100644
--- a/ui/app/controllers/vault.js
+++ b/ui/app/controllers/vault.js
@@ -1,4 +1,7 @@
+import { inject as service } from '@ember/service';
import Controller from '@ember/controller';
+import { computed } from '@ember/object';
+import config from '../config/environment';
export default Controller.extend({
queryParams: [
@@ -7,4 +10,15 @@ export default Controller.extend({
},
],
wrappedToken: '',
+ env: config.environment,
+ auth: service(),
+ store: service(),
+ activeCluster: computed('auth.activeCluster', function() {
+ let id = this.get('auth.activeCluster');
+ return id ? this.get('store').peekRecord('cluster', id) : null;
+ }),
+ activeClusterName: computed('activeCluster', function() {
+ const activeCluster = this.get('activeCluster');
+ return activeCluster ? activeCluster.get('name') : null;
+ }),
});
diff --git a/ui/app/controllers/vault/cluster/oidc-callback.js b/ui/app/controllers/vault/cluster/oidc-callback.js
new file mode 100644
index 000000000000..aef72bb20cee
--- /dev/null
+++ b/ui/app/controllers/vault/cluster/oidc-callback.js
@@ -0,0 +1,7 @@
+import Controller from '@ember/controller';
+
+export default Controller.extend({
+ queryParams: ['state', 'code'],
+ code: null,
+ state: null,
+});
diff --git a/ui/app/helpers/message-types.js b/ui/app/helpers/message-types.js
index e86f836db5b9..ef75332bd2b1 100644
--- a/ui/app/helpers/message-types.js
+++ b/ui/app/helpers/message-types.js
@@ -16,7 +16,7 @@ const MESSAGE_TYPES = {
danger: {
class: 'is-danger',
glyphClass: 'has-text-danger',
- glyph: 'close-circled',
+ glyph: 'cancel-square-fill',
text: 'Error',
},
warning: {
diff --git a/ui/app/helpers/supported-auth-backends.js b/ui/app/helpers/supported-auth-backends.js
index 36ff7c453379..15326e9e04a9 100644
--- a/ui/app/helpers/supported-auth-backends.js
+++ b/ui/app/helpers/supported-auth-backends.js
@@ -33,6 +33,22 @@ const SUPPORTED_AUTH_BACKENDS = [
displayNamePath: 'metadata.username',
formAttributes: ['username', 'password'],
},
+ {
+ type: 'jwt',
+ typeDisplay: 'JWT/OIDC',
+ description: 'Authenticate using JWT or OIDC provider.',
+ tokenPath: 'client_token',
+ displayNamePath: 'display_name',
+ formAttributes: ['role', 'jwt'],
+ },
+ {
+ type: 'oidc',
+ typeDisplay: 'OIDC',
+ description: 'Authenticate using JWT or OIDC provider.',
+ tokenPath: 'client_token',
+ displayNamePath: 'display_name',
+ formAttributes: ['role', 'jwt'],
+ },
{
type: 'github',
typeDisplay: 'GitHub',
diff --git a/ui/app/mixins/cluster-route.js b/ui/app/mixins/cluster-route.js
index 72d6de9882c1..9b814c99ed14 100644
--- a/ui/app/mixins/cluster-route.js
+++ b/ui/app/mixins/cluster-route.js
@@ -6,6 +6,7 @@ const INIT = 'vault.cluster.init';
const UNSEAL = 'vault.cluster.unseal';
const AUTH = 'vault.cluster.auth';
const CLUSTER = 'vault.cluster';
+const OIDC_CALLBACK = 'vault.cluster.oidc-callback';
const DR_REPLICATION_SECONDARY = 'vault.cluster.replication-dr-promote';
export { INIT, UNSEAL, AUTH, CLUSTER, DR_REPLICATION_SECONDARY };
@@ -13,8 +14,8 @@ export { INIT, UNSEAL, AUTH, CLUSTER, DR_REPLICATION_SECONDARY };
export default Mixin.create({
auth: service(),
- transitionToTargetRoute() {
- const targetRoute = this.targetRouteName();
+ transitionToTargetRoute(transition) {
+ const targetRoute = this.targetRouteName(transition);
if (targetRoute && targetRoute !== this.routeName) {
return this.transitionTo(targetRoute);
}
@@ -38,7 +39,7 @@ export default Mixin.create({
return !!get(this.controllerFor(INIT), 'keyData');
},
- targetRouteName() {
+ targetRouteName(transition) {
const cluster = this.clusterModel();
const isAuthed = this.authToken();
if (get(cluster, 'needsInit')) {
@@ -54,6 +55,9 @@ export default Mixin.create({
return DR_REPLICATION_SECONDARY;
}
if (!isAuthed) {
+ if ((transition && transition.targetName === OIDC_CALLBACK) || this.routeName === OIDC_CALLBACK) {
+ return OIDC_CALLBACK;
+ }
return AUTH;
}
if (
diff --git a/ui/app/models/role-jwt.js b/ui/app/models/role-jwt.js
new file mode 100644
index 000000000000..0083b6e4d9a8
--- /dev/null
+++ b/ui/app/models/role-jwt.js
@@ -0,0 +1,32 @@
+import DS from 'ember-data';
+import { computed } from '@ember/object';
+import parseURL from 'vault/utils/parse-url';
+const { attr } = DS;
+
+const DOMAIN_STRINGS = {
+ github: 'GitHub',
+ gitlab: 'GitLab',
+ google: 'Google',
+ ping: 'Ping',
+ okta: 'Okta',
+ auth0: 'Auth0',
+};
+
+const PROVIDER_WITH_LOGO = ['GitLab', 'Google', 'Auth0'];
+
+export { DOMAIN_STRINGS, PROVIDER_WITH_LOGO };
+
+export default DS.Model.extend({
+ authUrl: attr('string'),
+
+ providerName: computed('authUrl', function() {
+ let { hostname } = parseURL(this.authUrl);
+ let firstMatch = Object.keys(DOMAIN_STRINGS).find(name => hostname.includes(name));
+ return DOMAIN_STRINGS[firstMatch] || null;
+ }),
+
+ providerButtonComponent: computed('providerName', function() {
+ let { providerName } = this;
+ return PROVIDER_WITH_LOGO.includes(providerName) ? `auth-button-${providerName.toLowerCase()}` : null;
+ }),
+});
diff --git a/ui/app/router.js b/ui/app/router.js
index 5952710f40c0..b1b87d87904e 100644
--- a/ui/app/router.js
+++ b/ui/app/router.js
@@ -9,6 +9,7 @@ const Router = EmberRouter.extend({
Router.map(function() {
this.route('vault', { path: '/' }, function() {
this.route('cluster', { path: '/:cluster_name' }, function() {
+ this.route('oidc-callback', { path: '/auth/*auth_path/oidc/callback' });
this.route('auth');
this.route('init');
this.route('logout');
diff --git a/ui/app/routes/loading.js b/ui/app/routes/loading.js
new file mode 100644
index 000000000000..a327169ddb78
--- /dev/null
+++ b/ui/app/routes/loading.js
@@ -0,0 +1,17 @@
+import Route from '@ember/routing/route';
+
+export default Route.extend({
+ renderTemplate() {
+ let { targetName } = this.router.currentState.routerJs.activeTransition;
+ let isCallback =
+ targetName === 'vault.cluster.oidc-callback' || targetName === 'vault.cluster.oidc-callback-namespace';
+ if (isCallback) {
+ this.render('vault/cluster/oidc-callback', {
+ into: 'application',
+ outlet: 'main',
+ });
+ } else {
+ this._super(...arguments);
+ }
+ },
+});
diff --git a/ui/app/routes/vault/cluster.js b/ui/app/routes/vault/cluster.js
index 43700e0fcf0a..46a41eb3b889 100644
--- a/ui/app/routes/vault/cluster.js
+++ b/ui/app/routes/vault/cluster.js
@@ -90,7 +90,7 @@ export default Route.extend(ModelBoundaryRoute, ClusterRoute, {
.cancelOn('deactivate')
.keepLatest(),
- afterModel(model) {
+ afterModel(model, transition) {
this._super(...arguments);
this.get('currentCluster').setCluster(model);
@@ -99,7 +99,7 @@ export default Route.extend(ModelBoundaryRoute, ClusterRoute, {
if (this.get('namespaceService.path') && !this.get('version.hasNamespaces')) {
return this.transitionTo(this.routeName, { queryParams: { namespace: '' } });
}
- return this.transitionToTargetRoute();
+ return this.transitionToTargetRoute(transition);
},
setupController() {
diff --git a/ui/app/routes/vault/cluster/oidc-callback.js b/ui/app/routes/vault/cluster/oidc-callback.js
new file mode 100644
index 000000000000..d03e902a74b7
--- /dev/null
+++ b/ui/app/routes/vault/cluster/oidc-callback.js
@@ -0,0 +1,21 @@
+import Route from '@ember/routing/route';
+
+export default Route.extend({
+ templateName: 'vault/cluster/oidc-callback',
+ model() {
+ // left blank so we render the template immediately
+ },
+ afterModel() {
+ let { auth_path: path, code, state } = this.paramsFor(this.routeName);
+ let { namespaceQueryParam: namespace } = this.paramsFor('vault.cluster');
+ path = window.decodeURIComponent(path);
+ let queryParams = { namespace, path, code, state };
+ window.localStorage.setItem('oidcState', JSON.stringify(queryParams));
+ },
+ renderTemplate() {
+ this.render(this.templateName, {
+ into: 'application',
+ outlet: 'main',
+ });
+ },
+});
diff --git a/ui/app/routes/vault/cluster/settings/auth/configure.js b/ui/app/routes/vault/cluster/settings/auth/configure.js
index 26cc8b91e502..a3f1c9ab2d7b 100644
--- a/ui/app/routes/vault/cluster/settings/auth/configure.js
+++ b/ui/app/routes/vault/cluster/settings/auth/configure.js
@@ -1,23 +1,10 @@
-import { set } from '@ember/object';
import Route from '@ember/routing/route';
-import DS from 'ember-data';
-
-import { methods } from 'vault/helpers/mountable-auth-methods';
-
-const METHODS = methods();
export default Route.extend({
model() {
const { method } = this.paramsFor(this.routeName);
return this.store.findAll('auth-method').then(() => {
- const model = this.store.peekRecord('auth-method', method);
- const modelType = model && model.get('methodType');
- if (!model || (modelType !== 'token' && !METHODS.findBy('type', modelType))) {
- const error = new DS.AdapterError();
- set(error, 'httpStatus', 404);
- throw error;
- }
- return model;
+ return this.store.peekRecord('auth-method', method);
});
},
});
diff --git a/ui/app/styles/components/auth-buttons.scss b/ui/app/styles/components/auth-buttons.scss
new file mode 100644
index 000000000000..452fb2fe1235
--- /dev/null
+++ b/ui/app/styles/components/auth-buttons.scss
@@ -0,0 +1,22 @@
+.auth-button-tile {
+ height: 31px;
+ width: 31px;
+ background: $white;
+ border-radius: 1px;
+ box-shadow: 0 0 0 1px rgba(255, 255, 255, 0.4);
+}
+.auth-button-type-google {
+ position: relative;
+ top: -10px;
+ left: -1.05rem;
+}
+
+.auth-button-type-auth0,
+.auth-button-type-gitlab {
+ position: relative;
+ top: -6px;
+ left: -0.75rem;
+}
+[class*='auth-button-type'] .text {
+ padding-left: $spacing-m;
+}
diff --git a/ui/app/styles/core.scss b/ui/app/styles/core.scss
index de93f14eee41..c6237839085f 100644
--- a/ui/app/styles/core.scss
+++ b/ui/app/styles/core.scss
@@ -39,6 +39,7 @@
@import './core/layout';
@import './core/lists';
+@import './components/auth-buttons';
@import './components/auth-form';
@import './components/b64-toggle';
@import './components/box-label';
diff --git a/ui/app/styles/core/helpers.scss b/ui/app/styles/core/helpers.scss
index 3f94d9115af2..7ba150425685 100644
--- a/ui/app/styles/core/helpers.scss
+++ b/ui/app/styles/core/helpers.scss
@@ -143,3 +143,6 @@
font-size: $size-8;
text-transform: lowercase;
}
+.has-bottom-margin {
+ margin-bottom: $spacing-m;
+}
diff --git a/ui/app/styles/core/message.scss b/ui/app/styles/core/message.scss
index 8bd09f1fe0e8..7e8932b91180 100644
--- a/ui/app/styles/core/message.scss
+++ b/ui/app/styles/core/message.scss
@@ -99,7 +99,7 @@
.message-inline {
display: flex;
- margin: -$spacing-xs 0 $spacing-l;
+ margin: 0 0 $spacing-l;
.icon {
flex: 0;
diff --git a/ui/app/templates/application.hbs b/ui/app/templates/application.hbs
index ca382f4e2d95..b560c7189e0e 100644
--- a/ui/app/templates/application.hbs
+++ b/ui/app/templates/application.hbs
@@ -1,36 +1,3 @@
{{outlet}}
-
- {{#if (eq env "development") }}
-
-
- {{i-con glyph="wand" class="type-icon"}}Local Development
-
-
- {{/if}}
-
diff --git a/ui/app/templates/components/alert-inline.hbs b/ui/app/templates/components/alert-inline.hbs
index ea2976495b90..4b841feca4c1 100644
--- a/ui/app/templates/components/alert-inline.hbs
+++ b/ui/app/templates/components/alert-inline.hbs
@@ -1,8 +1,8 @@
-
- {{message}}
+
+ {{@message}}
diff --git a/ui/app/templates/components/auth-button-auth0.hbs b/ui/app/templates/components/auth-button-auth0.hbs
new file mode 100644
index 000000000000..6d5591c4b93d
--- /dev/null
+++ b/ui/app/templates/components/auth-button-auth0.hbs
@@ -0,0 +1,8 @@
+
diff --git a/ui/app/templates/components/auth-button-gitlab.hbs b/ui/app/templates/components/auth-button-gitlab.hbs
new file mode 100644
index 000000000000..6bcbcfbb21bc
--- /dev/null
+++ b/ui/app/templates/components/auth-button-gitlab.hbs
@@ -0,0 +1,6 @@
+
diff --git a/ui/app/templates/components/auth-button-google.hbs b/ui/app/templates/components/auth-button-google.hbs
new file mode 100644
index 000000000000..50a11af454e6
--- /dev/null
+++ b/ui/app/templates/components/auth-button-google.hbs
@@ -0,0 +1,44 @@
+
diff --git a/ui/app/templates/components/auth-form-options.hbs b/ui/app/templates/components/auth-form-options.hbs
new file mode 100644
index 000000000000..7f7f3c121746
--- /dev/null
+++ b/ui/app/templates/components/auth-form-options.hbs
@@ -0,0 +1,26 @@
+{{#if (not this.selectedAuthIsPath)}}
+
+
+ {{#if this.isOpen}}
+
+
+ Mount path
+
+
+
+
+
+
+ {{/if}}
+
+{{/if}}
diff --git a/ui/app/templates/components/auth-form.hbs b/ui/app/templates/components/auth-form.hbs
index 8b8f0a31b112..5c5a8c6b633c 100644
--- a/ui/app/templates/components/auth-form.hbs
+++ b/ui/app/templates/components/auth-form.hbs
@@ -4,23 +4,16 @@
{{partial 'svg/vault-loading'}}
{{/if}}
+ {{#if hasMethodsWithPath}}
{{#each methodsToShow as |method|}}
{{#with (or method.path method.type) as |methodKey|}}
- {{#if hasMethodsWithPath}}
{{#link-to 'vault.cluster.auth' cluster.name (query-params with=methodKey) data-test-auth-method-link=method.type}}
{{or method.id (capitalize method.type)}}
{{/link-to}}
- {{else}}
-
- {{#link-to 'vault.cluster.auth' cluster.name (query-params with=methodKey) data-test-auth-method-link=method.type}}
- {{or method.id method.typeDisplay}}
- {{/link-to}}
-
- {{/if}}
{{/with}}
{{/each}}
{{#if hasMethodsWithPath}}
@@ -32,70 +25,72 @@
{{/if}}
-
-
In the secondary case this means a wipe of the
underlying storage when connected to a primary, and in the primary case,
diff --git a/ui/app/templates/partials/replication/enable.hbs b/ui/app/templates/partials/replication/enable.hbs
index 6ff8ad8063f1..65ee3925cd19 100644
--- a/ui/app/templates/partials/replication/enable.hbs
+++ b/ui/app/templates/partials/replication/enable.hbs
@@ -141,11 +141,11 @@
{{#if (eq mode 'secondary')}}
-
+
{{/if}}
{{#if (eq mode 'primary')}}
diff --git a/ui/app/templates/partials/replication/promote.hbs b/ui/app/templates/partials/replication/promote.hbs
index 44cec91ed876..e1112de34c79 100644
--- a/ui/app/templates/partials/replication/promote.hbs
+++ b/ui/app/templates/partials/replication/promote.hbs
@@ -46,7 +46,6 @@
-
Promote the cluster to primary.
+
Promote the cluster to primary.
+
Primary cluster address (optional)
@@ -105,7 +105,6 @@
+
+
+
diff --git a/ui/app/templates/svg/icons/docs.hbs b/ui/app/templates/svg/icons/docs.hbs
new file mode 100644
index 000000000000..08623c0e50f0
--- /dev/null
+++ b/ui/app/templates/svg/icons/docs.hbs
@@ -0,0 +1,3 @@
+
+
+
diff --git a/ui/app/templates/vault.hbs b/ui/app/templates/vault.hbs
new file mode 100644
index 000000000000..7307dcc9663f
--- /dev/null
+++ b/ui/app/templates/vault.hbs
@@ -0,0 +1,35 @@
+
+{{outlet}}
+
+ {{#if (eq env "development") }}
+
+
+ {{i-con glyph="wand" class="type-icon"}}Local Development
+
+
+ {{/if}}
+
diff --git a/ui/app/templates/vault/cluster/access/methods.hbs b/ui/app/templates/vault/cluster/access/methods.hbs
index 371886267977..ee6d86e16cd0 100644
--- a/ui/app/templates/vault/cluster/access/methods.hbs
+++ b/ui/app/templates/vault/cluster/access/methods.hbs
@@ -22,7 +22,15 @@
-
+
diff --git a/ui/app/templates/vault/cluster/oidc-callback.hbs b/ui/app/templates/vault/cluster/oidc-callback.hbs
new file mode 100644
index 000000000000..e0317112a891
--- /dev/null
+++ b/ui/app/templates/vault/cluster/oidc-callback.hbs
@@ -0,0 +1,14 @@
+
diff --git a/ui/app/templates/vault/error.hbs b/ui/app/templates/vault/error.hbs
index 587690d259d9..cd6b33bc37c6 100644
--- a/ui/app/templates/vault/error.hbs
+++ b/ui/app/templates/vault/error.hbs
@@ -1,7 +1,7 @@
- {{partial "svg/vault-edition-logo"}}
+
diff --git a/ui/app/utils/parse-url.js b/ui/app/utils/parse-url.js
new file mode 100644
index 000000000000..036fbec47bbf
--- /dev/null
+++ b/ui/app/utils/parse-url.js
@@ -0,0 +1,16 @@
+// adapted from https://gist.github.com/jed/964849
+let fn = (function(anchor) {
+ return function(url) {
+ anchor.href = url;
+ let parts = {};
+ for (let prop in anchor) {
+ if ('' + anchor[prop] === anchor[prop]) {
+ parts[prop] = anchor[prop];
+ }
+ }
+
+ return parts;
+ };
+})(document.createElement('a'));
+
+export default fn;
diff --git a/ui/tests/acceptance/auth-test.js b/ui/tests/acceptance/auth-test.js
index 78fb249dd3cc..d7dc6380d33a 100644
--- a/ui/tests/acceptance/auth-test.js
+++ b/ui/tests/acceptance/auth-test.js
@@ -1,9 +1,10 @@
import { module, test } from 'qunit';
import { setupApplicationTest } from 'ember-qunit';
import sinon from 'sinon';
-import { click, currentURL, visit, settled } from '@ember/test-helpers';
+import { currentURL, visit, settled } from '@ember/test-helpers';
import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends';
import authForm from '../pages/components/auth-form';
+import jwtForm from '../pages/components/auth-jwt';
import { create } from 'ember-cli-page-object';
import apiStub from 'vault/tests/helpers/noop-all-api-requests';
import authPage from 'vault/tests/pages/auth';
@@ -12,6 +13,7 @@ import logout from 'vault/tests/pages/logout';
import consoleClass from 'vault/tests/pages/components/console/ui-panel';
const consoleComponent = create(consoleClass);
const component = create(authForm);
+const jwtComponent = create(jwtForm);
module('Acceptance | auth', function(hooks) {
setupApplicationTest(hooks);
@@ -33,10 +35,11 @@ module('Acceptance | auth', function(hooks) {
test('auth query params', async function(assert) {
let backends = supportedAuthBackends();
+ assert.expect(backends.length + 1);
await visit('/vault/auth');
assert.equal(currentURL(), '/vault/auth?with=token');
for (let backend of backends.reverse()) {
- await click(`[data-test-auth-method-link="${backend.type}"]`);
+ await component.selectMethod(backend.type);
assert.equal(
currentURL(),
`/vault/auth?with=${backend.type}`,
@@ -48,11 +51,8 @@ module('Acceptance | auth', function(hooks) {
test('it clears token when changing selected auth method', async function(assert) {
await visit('/vault/auth');
assert.equal(currentURL(), '/vault/auth?with=token');
- await component
- .token('token')
- .tabs.filterBy('name', 'GitHub')[0]
- .link();
- await component.tabs.filterBy('name', 'Token')[0].link();
+ await component.token('token').selectMethod('github');
+ await component.selectMethod('token');
assert.equal(component.tokenValue, '', 'it clears the token value when toggling methods');
});
@@ -60,10 +60,14 @@ module('Acceptance | auth', function(hooks) {
let backends = supportedAuthBackends();
await visit('/vault/auth');
for (let backend of backends.reverse()) {
- await click(`[data-test-auth-method-link="${backend.type}"]`);
+ await component.selectMethod(backend.type);
if (backend.type === 'github') {
await component.token('token');
}
+ if (backend.type === 'jwt') {
+ await jwtComponent.jwt('1');
+ await jwtComponent.role('test');
+ }
await component.login();
let lastRequest = this.server.passthroughRequests[this.server.passthroughRequests.length - 1];
let body = JSON.parse(lastRequest.requestBody);
@@ -74,6 +78,9 @@ module('Acceptance | auth', function(hooks) {
);
} else if (backend.type === 'github') {
assert.ok(Object.keys(body).includes('token'), 'GitHub includes token');
+ } else if (backend.type === 'jwt') {
+ assert.ok(Object.keys(body).includes('jwt'), `${backend.type} includes jwt`);
+ assert.ok(Object.keys(body).includes('role'), `${backend.type} includes role`);
} else {
assert.ok(Object.keys(body).includes('password'), `${backend.type} includes password`);
}
diff --git a/ui/tests/integration/components/auth-form-test.js b/ui/tests/integration/components/auth-form-test.js
index 5515026bffd0..70f84c788db5 100644
--- a/ui/tests/integration/components/auth-form-test.js
+++ b/ui/tests/integration/components/auth-form-test.js
@@ -6,7 +6,6 @@ import Service from '@ember/service';
import { module, test } from 'qunit';
import { setupRenderingTest } from 'ember-qunit';
import { render, settled } from '@ember/test-helpers';
-import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends';
import hbs from 'htmlbars-inline-precompile';
import sinon from 'sinon';
import Pretender from 'pretender';
@@ -14,7 +13,6 @@ import { create } from 'ember-cli-page-object';
import authForm from '../../pages/components/auth-form';
const component = create(authForm);
-const BACKENDS = supportedAuthBackends();
const authService = Service.extend({
authenticate() {
@@ -112,7 +110,7 @@ module('Integration | Component | auth form', function(hooks) {
});
});
- test('it renders all the supported tabs when no methods are passed', async function(assert) {
+ test('it renders no tabs when no methods are passed', async function(assert) {
let methods = {
'approle/': {
type: 'approle',
@@ -123,10 +121,10 @@ module('Integration | Component | auth form', function(hooks) {
return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })];
});
});
- await render(hbs`{{auth-form cluster=cluster}}`);
+ await render(hbs`
`);
await settled();
- assert.equal(component.tabs.length, BACKENDS.length, 'renders a tab for every backend');
+ assert.equal(component.tabs.length, 0, 'renders a tab for every backend');
server.shutdown();
});
@@ -183,7 +181,7 @@ module('Integration | Component | auth form', function(hooks) {
server.shutdown();
});
- test('it renders all the supported methods when no supported methods are present in passed methods', async function(assert) {
+ test('it renders no tabs when no supported methods are present in passed methods', async function(assert) {
let methods = {
'approle/': {
type: 'approle',
@@ -195,10 +193,10 @@ module('Integration | Component | auth form', function(hooks) {
});
});
this.set('cluster', EmberObject.create({}));
- await render(hbs`{{auth-form cluster=cluster}}`);
+ await render(hbs`
`);
await settled();
server.shutdown();
- assert.equal(component.tabs.length, BACKENDS.length, 'renders a tab for every backend');
+ assert.equal(component.tabs.length, 0, 'renders a tab for every backend');
});
test('it makes a request to unwrap if passed a wrappedToken and logs in', async function(assert) {
@@ -222,7 +220,7 @@ module('Integration | Component | auth form', function(hooks) {
let wrappedToken = '54321';
this.set('wrappedToken', wrappedToken);
this.set('cluster', EmberObject.create({}));
- await render(hbs`{{auth-form cluster=cluster wrappedToken=wrappedToken}}`);
+ await render(hbs`
`);
later(() => run.cancelTimers(), 50);
await settled();
assert.equal(server.handledRequests[0].url, '/v1/sys/wrapping/unwrap', 'makes call to unwrap the token');
diff --git a/ui/tests/integration/components/auth-jwt-test.js b/ui/tests/integration/components/auth-jwt-test.js
new file mode 100644
index 000000000000..3e006e456b19
--- /dev/null
+++ b/ui/tests/integration/components/auth-jwt-test.js
@@ -0,0 +1,247 @@
+import { next, later, run } from '@ember/runloop';
+import EmberObject, { computed } from '@ember/object';
+import Evented from '@ember/object/evented';
+import { resolve } from 'rsvp';
+import Service from '@ember/service';
+import { module, test } from 'qunit';
+import { setupRenderingTest } from 'ember-qunit';
+import { render, settled } from '@ember/test-helpers';
+import hbs from 'htmlbars-inline-precompile';
+import sinon from 'sinon';
+import Pretender from 'pretender';
+import { create } from 'ember-cli-page-object';
+import form from '../../pages/components/auth-jwt';
+import { ERROR_WINDOW_CLOSED, ERROR_MISSING_PARAMS } from 'vault/components/auth-jwt';
+
+const component = create(form);
+const fakeWindow = EmberObject.extend(Evented, {
+ init() {
+ this._super(...arguments);
+ this.__proto__.on('close', () => {
+ this.set('closed', true);
+ });
+ },
+ screen: computed(function() {
+ return {
+ height: 600,
+ width: 500,
+ };
+ }),
+ localStorage: computed(function() {
+ return {
+ removeItem: sinon.stub(),
+ };
+ }),
+ closed: false,
+});
+
+fakeWindow.reopen({
+ open() {
+ return fakeWindow.create();
+ },
+
+ close() {
+ fakeWindow.prototype.trigger('close');
+ },
+});
+
+const OIDC_AUTH_RESPONSE = {
+ auth: {
+ client_token: 'token',
+ },
+};
+
+const routerStub = Service.extend({
+ urlFor() {
+ return 'http://example.com';
+ },
+});
+
+const renderIt = async (context, path) => {
+ let handler = (data, e) => {
+ if (e && e.preventDefault) e.preventDefault();
+ };
+ let fake = fakeWindow.create();
+ sinon.spy(fake, 'open');
+ context.set('window', fake);
+ context.set('handler', sinon.spy(handler));
+ context.set('roleName', '');
+ context.set('selectedAuthPath', path);
+
+ await render(hbs`
+
+ `);
+};
+module('Integration | Component | auth jwt', function(hooks) {
+ setupRenderingTest(hooks);
+
+ hooks.beforeEach(function() {
+ this.owner.register('service:router', routerStub);
+ this.server = new Pretender(function() {
+ this.get('/v1/auth/:path/oidc/callback', request => {
+ return [200, { 'Content-Type': 'application/json' }, JSON.stringify(OIDC_AUTH_RESPONSE)];
+ });
+ this.post('/v1/auth/:path/oidc/auth_url', request => {
+ let body = JSON.parse(request.requestBody);
+ if (body.role === 'test') {
+ return [
+ 200,
+ { 'Content-Type': 'application/json' },
+ JSON.stringify({
+ data: {
+ auth_url: 'http://example.com',
+ },
+ }),
+ ];
+ }
+ if (body.role === 'okta') {
+ return [
+ 200,
+ { 'Content-Type': 'application/json' },
+ JSON.stringify({
+ data: {
+ auth_url: 'http://okta.com',
+ },
+ }),
+ ];
+ }
+ return [400, { 'Content-Type': 'application/json' }, JSON.stringify({ errors: ['nope'] })];
+ });
+ });
+ });
+
+ hooks.afterEach(function() {
+ this.server.shutdown();
+ });
+
+ test('it renders the yield', async function(assert) {
+ await render(hbs`
Hello! `);
+ assert.equal(component.yieldContent, 'Hello!', 'yields properly');
+ });
+
+ test('jwt: it renders', async function(assert) {
+ await renderIt(this);
+ assert.ok(component.jwtPresent, 'renders jwt field');
+ assert.ok(component.rolePresent, 'renders jwt field');
+ assert.equal(this.server.handledRequests.length, 0, 'no requests made when there is no path set');
+ this.set('selectedAuthPath', 'foo');
+ await settled();
+ assert.equal(
+ this.server.handledRequests[0].url,
+ '/v1/auth/foo/oidc/auth_url',
+ 'requests when path is set'
+ );
+ });
+
+ test('jwt: it calls passed action on login', async function(assert) {
+ await renderIt(this);
+ await component.login();
+ assert.ok(this.handler.calledOnce);
+ });
+
+ test('oidc: test role: it renders', async function(assert) {
+ await renderIt(this);
+ this.set('selectedAuthPath', 'foo');
+ await component.role('test');
+ assert.notOk(component.jwtPresent, 'does not show jwt input for OIDC type login');
+ assert.equal(component.loginButtonText, 'Sign in with OIDC Provider');
+
+ await component.role('okta');
+ // 1 for initial render, 1 for each time role changed = 3
+ assert.equal(this.server.handledRequests.length, 3, 'fetches the auth_url when the path changes');
+ assert.equal(component.loginButtonText, 'Sign in with Okta', 'recognizes auth methods with certain urls');
+ });
+
+ test('oidc: it calls window.open popup window on login', async function(assert) {
+ await renderIt(this);
+ this.set('selectedAuthPath', 'foo');
+ await component.role('test');
+ component.login();
+
+ next(() => {
+ run.cancelTimers();
+ let call = this.window.open.getCall(0);
+ assert.deepEqual(
+ call.args,
+ [
+ 'http://example.com',
+ 'vaultOIDCWindow',
+ 'width=500,height=600,resizable,scrollbars=yes,top=0,left=0',
+ ],
+ 'called with expected args'
+ );
+ });
+ });
+
+ test('oidc: it calls error handler when popup is closed', async function(assert) {
+ await renderIt(this);
+ this.set('selectedAuthPath', 'foo');
+ await component.role('test');
+ component.login();
+
+ next(async () => {
+ this.window.close();
+ await settled();
+ assert.equal(this.error, ERROR_WINDOW_CLOSED, 'calls onError with error string');
+ });
+ });
+
+ test('oidc: storage event fires with wrong key', async function(assert) {
+ await renderIt(this);
+ this.set('selectedAuthPath', 'foo');
+ await component.role('test');
+ component.login();
+ next(async () => {
+ run.cancelTimers();
+ this.window.trigger('storage', { key: 'wrongThing' });
+ assert.equal(this.window.localStorage.removeItem.callCount, 0, 'never callse removeItem');
+ });
+ });
+
+ test('oidc: storage event fires with correct key, wrong params', async function(assert) {
+ await renderIt(this);
+ this.set('selectedAuthPath', 'foo');
+ await component.role('test');
+ component.login();
+ // need next tick here to let ec tasks set up
+ next(async () => {
+ this.window.trigger('storage', { key: 'oidcState', newValue: JSON.stringify({}) });
+ await settled();
+ assert.equal(this.window.localStorage.removeItem.callCount, 1, 'calls removeItem');
+ assert.equal(this.error, ERROR_MISSING_PARAMS, 'calls onError with params missing error');
+ });
+ });
+
+ test('oidc: storage event fires with correct key, correct params', async function(assert) {
+ await renderIt(this);
+ this.set('selectedAuthPath', 'foo');
+ await component.role('test');
+ component.login();
+ // need next tick here to let ec tasks set up
+ next(async () => {
+ this.window.trigger('storage', {
+ key: 'oidcState',
+ newValue: JSON.stringify({
+ path: 'foo',
+ state: 'state',
+ code: 'code',
+ }),
+ });
+ await settled();
+ assert.equal(this.selectedAuth, 'token', 'calls onSelectedAuth with token');
+ assert.equal(this.token, 'token', 'calls onToken with token');
+ assert.ok(this.handler.calledOnce, 'calls the onSubmit handler');
+ });
+ });
+});
diff --git a/ui/tests/pages/components/auth-form.js b/ui/tests/pages/components/auth-form.js
index 7f930f505a9b..01e38615c4d7 100644
--- a/ui/tests/pages/components/auth-form.js
+++ b/ui/tests/pages/components/auth-form.js
@@ -5,6 +5,7 @@ export default {
name: text(),
link: clickable('[data-test-auth-method-link]'),
}),
+ selectMethod: fillable('[data-test-method-select]'),
username: fillable('[data-test-username]'),
token: fillable('[data-test-token]'),
tokenValue: value('[data-test-token]'),
diff --git a/ui/tests/pages/components/auth-jwt.js b/ui/tests/pages/components/auth-jwt.js
new file mode 100644
index 000000000000..166cd94c5596
--- /dev/null
+++ b/ui/tests/pages/components/auth-jwt.js
@@ -0,0 +1,11 @@
+import { text, isPresent, clickable, fillable } from 'ember-cli-page-object';
+
+export default {
+ jwt: fillable('[data-test-jwt]'),
+ jwtPresent: isPresent('[data-test-jwt]'),
+ role: fillable('[data-test-role]'),
+ rolePresent: isPresent('[data-test-role]'),
+ login: clickable('[data-test-auth-submit]'),
+ loginButtonText: text('[data-test-auth-submit]'),
+ yieldContent: text('[data-test-yield-content]'),
+};
diff --git a/ui/tests/unit/adapters/cluster-test.js b/ui/tests/unit/adapters/cluster-test.js
index eaf3ae28449d..12840baa8d37 100644
--- a/ui/tests/unit/adapters/cluster-test.js
+++ b/ui/tests/unit/adapters/cluster-test.js
@@ -63,6 +63,20 @@ module('Unit | Adapter | cluster', function(hooks) {
'auth:github options OK'
);
+ data = { jwt: 'token', role: 'test' };
+ adapter.authenticate({ backend: 'jwt', data });
+ assert.equal('/v1/auth/jwt/login', url, 'auth:jwt url OK');
+ assert.equal('POST', method, 'auth:jwt method OK');
+ assert.deepEqual(
+ { data: { jwt: 'token', role: 'test' }, unauthenticated: true },
+ options,
+ 'auth:jwt options OK'
+ );
+
+ data = { jwt: 'token', role: 'test', path: 'oidc' };
+ adapter.authenticate({ backend: 'jwt', data });
+ assert.equal('/v1/auth/oidc/login', url, 'auth:jwt custom mount path, url OK');
+
data = { token: 'token', password: 'password', username: 'username', path: 'path' };
adapter.authenticate({ backend: 'token', data });
diff --git a/ui/tests/unit/models/role-jwt-test.js b/ui/tests/unit/models/role-jwt-test.js
new file mode 100644
index 000000000000..115f0725655f
--- /dev/null
+++ b/ui/tests/unit/models/role-jwt-test.js
@@ -0,0 +1,43 @@
+import { module, test } from 'qunit';
+import { setupTest } from 'ember-qunit';
+import { DOMAIN_STRINGS, PROVIDER_WITH_LOGO } from 'vault/models/role-jwt';
+
+module('Unit | Model | role-jwt', function(hooks) {
+ setupTest(hooks);
+
+ test('it exists', function(assert) {
+ let model = this.owner.lookup('service:store').createRecord('role-jwt');
+ assert.ok(!!model);
+ assert.equal(model.providerName, null, 'no providerName');
+ assert.equal(model.providerButtonComponent, null, 'no providerButtonComponent');
+ });
+
+ test('it computes providerName when known provider url match fails', function(assert) {
+ let model = this.owner.lookup('service:store').createRecord('role-jwt', {
+ authUrl: 'http://example.com',
+ });
+
+ assert.equal(model.providerName, null, 'no providerName');
+ assert.equal(model.providerButtonComponent, null, 'no providerButtonComponent');
+ });
+
+ test('it provides a providerName for listed known providers', function(assert) {
+ Object.keys(DOMAIN_STRINGS).forEach(domainPart => {
+ let model = this.owner.lookup('service:store').createRecord('role-jwt', {
+ authUrl: `http://provider-${domainPart}.com`,
+ });
+
+ let expectedName = DOMAIN_STRINGS[domainPart];
+ assert.equal(model.providerName, expectedName, `computes providerName: ${expectedName}`);
+ if (PROVIDER_WITH_LOGO.includes(expectedName)) {
+ assert.equal(
+ model.providerButtonComponent,
+ `auth-button-${domainPart}`,
+ `computes providerButtonComponent: ${domainPart}`
+ );
+ } else {
+ assert.equal(model.providerButtonComponent, null, `computes providerButtonComponent: ${domainPart}`);
+ }
+ });
+ });
+});
From d8e9adc9d36c190aa8eea104495a216fb4dc4306 Mon Sep 17 00:00:00 2001
From: madalynrose
Date: Thu, 14 Feb 2019 12:42:44 -0500
Subject: [PATCH 06/31] Update OpenAPI responses to include information the UI
can use (#6204)
---
CHANGELOG.md | 9 +++-
builtin/credential/github/path_config.go | 3 ++
builtin/credential/okta/path_config.go | 7 +++
builtin/credential/radius/path_config.go | 4 ++
builtin/logical/aws/path_roles.go | 7 +++
builtin/logical/pki/fields.go | 18 ++++++-
builtin/logical/pki/path_roles.go | 25 +++++++++
builtin/logical/ssh/path_roles.go | 17 ++++--
helper/ldaputil/config.go | 31 ++++++++---
terraform/aws/variables.tf | 2 +-
.../vault-plugin-auth-azure/Gopkg.lock | 16 +++++-
.../vault-plugin-auth-azure/path_config.go | 5 ++
.../plugin/path_config.go | 3 ++
.../vault-plugin-auth-jwt/path_config.go | 9 ++++
.../vault-plugin-auth-kubernetes/Gopkg.lock | 16 +++++-
.../path_config.go | 5 ++
.../clients/sts.go | 4 +-
.../path_creds.go | 21 +++++---
.../vault-plugin-secrets-gcpkms/README.md | 6 +++
vendor/vendor.json | 52 +++++++++----------
version/version_base.go | 2 +-
website/config.rb | 2 +-
.../source/docs/secrets/transit/index.html.md | 15 +++---
23 files changed, 218 insertions(+), 61 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index de92860cc1de..4dcb052e45e7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,4 @@
-## Next
+## 1.0.3 (February 12th, 2019)
CHANGES:
@@ -9,6 +9,11 @@ CHANGES:
entity either by name or by id [GH-6105]
* The Vault UI's navigation and onboarding wizard now only displays items that
are permitted in a users' policy [GH-5980, GH-6094]
+ * An issue was fixed that caused recovery keys to not work on secondary
+ clusters when using a different unseal mechanism/key than the primary. This
+ would be hit if the cluster was rekeyed or initialized after 1.0. We recommend
+ rekeying the recovery keys on the primary cluster if you meet the above
+ requirements.
FEATURES:
@@ -47,6 +52,8 @@ BUG FIXES:
a performance standby very quickly, before an associated entity has been
replicated. If the entity is not found in this scenario, the request will
forward to the active node.
+ * replication: Fix issue where recovery keys would not work on secondary
+ clusters if using a different unseal mechanism than the primary.
* replication: Fix a "failed to register lease" error when using performance
standbys
* storage/postgresql: The `Get` method will now return an Entry object with
diff --git a/builtin/credential/github/path_config.go b/builtin/credential/github/path_config.go
index f42b156db712..b590746863c7 100644
--- a/builtin/credential/github/path_config.go
+++ b/builtin/credential/github/path_config.go
@@ -25,14 +25,17 @@ func pathConfig(b *backend) *framework.Path {
Description: `The API endpoint to use. Useful if you
are running GitHub Enterprise or an
API-compatible authentication server.`,
+ DisplayName: "Base URL",
},
"ttl": &framework.FieldSchema{
Type: framework.TypeString,
Description: `Duration after which authentication will be expired`,
+ DisplayName: "TTL",
},
"max_ttl": &framework.FieldSchema{
Type: framework.TypeString,
Description: `Maximum duration after which authentication will be expired`,
+ DisplayName: "Max TTL",
},
},
diff --git a/builtin/credential/okta/path_config.go b/builtin/credential/okta/path_config.go
index 284a8dcc551d..02785812e5b0 100644
--- a/builtin/credential/okta/path_config.go
+++ b/builtin/credential/okta/path_config.go
@@ -25,26 +25,32 @@ func pathConfig(b *backend) *framework.Path {
"organization": &framework.FieldSchema{
Type: framework.TypeString,
Description: "(DEPRECATED) Okta organization to authenticate against. Use org_name instead.",
+ Deprecated: true,
},
"org_name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the organization to be used in the Okta API.",
+ DisplayName: "Organization Name",
},
"token": &framework.FieldSchema{
Type: framework.TypeString,
Description: "(DEPRECATED) Okta admin API token. Use api_token instead.",
+ Deprecated: true,
},
"api_token": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Okta API key.",
+ DisplayName: "API Token",
},
"base_url": &framework.FieldSchema{
Type: framework.TypeString,
Description: `The base domain to use for the Okta API. When not specified in the configuration, "okta.com" is used.`,
+ DisplayName: "Base URL",
},
"production": &framework.FieldSchema{
Type: framework.TypeBool,
Description: `(DEPRECATED) Use base_url.`,
+ Deprecated: true,
},
"ttl": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
@@ -57,6 +63,7 @@ func pathConfig(b *backend) *framework.Path {
"bypass_okta_mfa": &framework.FieldSchema{
Type: framework.TypeBool,
Description: `When set true, requests by Okta for a MFA check will be bypassed. This also disallows certain status checks on the account, such as whether the password is expired.`,
+ DisplayName: "Bypass Okta MFA",
},
},
diff --git a/builtin/credential/radius/path_config.go b/builtin/credential/radius/path_config.go
index 85d60a3d68d2..6dde36c1d7f5 100644
--- a/builtin/credential/radius/path_config.go
+++ b/builtin/credential/radius/path_config.go
@@ -15,6 +15,7 @@ func pathConfig(b *backend) *framework.Path {
"host": &framework.FieldSchema{
Type: framework.TypeString,
Description: "RADIUS server host",
+ DisplayName: "Host",
},
"port": &framework.FieldSchema{
@@ -30,6 +31,7 @@ func pathConfig(b *backend) *framework.Path {
Type: framework.TypeString,
Default: "",
Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregisted user (default: emtpy)",
+ DisplayName: "Policies for unregistered users",
},
"dial_timeout": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
@@ -45,11 +47,13 @@ func pathConfig(b *backend) *framework.Path {
Type: framework.TypeInt,
Default: 10,
Description: "RADIUS NAS port field (default: 10)",
+ DisplayName: "NAS Port",
},
"nas_identifier": &framework.FieldSchema{
Type: framework.TypeString,
Default: "",
Description: "RADIUS NAS Identifier field (optional)",
+ DisplayName: "NAS Identifier",
},
},
diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go
index 8e40e36dbc69..0745172253ba 100644
--- a/builtin/logical/aws/path_roles.go
+++ b/builtin/logical/aws/path_roles.go
@@ -36,6 +36,7 @@ func pathRoles(b *backend) *framework.Path {
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the policy",
+ DisplayName: "Policy Name",
},
"credential_type": &framework.FieldSchema{
@@ -46,11 +47,13 @@ func pathRoles(b *backend) *framework.Path {
"role_arns": &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: "ARNs of AWS roles allowed to be assumed. Only valid when credential_type is " + assumedRoleCred,
+ DisplayName: "Role ARNs",
},
"policy_arns": &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: "ARNs of AWS policies to attach to IAM users. Only valid when credential_type is " + iamUserCred,
+ DisplayName: "Policy ARNs",
},
"policy_document": &framework.FieldSchema{
@@ -65,22 +68,26 @@ GetFederationToken API call, acting as a filter on permissions available.`,
"default_sts_ttl": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
Description: fmt.Sprintf("Default TTL for %s and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred),
+ DisplayName: "Default TTL",
},
"max_sts_ttl": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
Description: fmt.Sprintf("Max allowed TTL for %s and %s credential types", assumedRoleCred, federationTokenCred),
+ DisplayName: "Max TTL",
},
"arn": &framework.FieldSchema{
Type: framework.TypeString,
Description: `Deprecated; use role_arns or policy_arns instead. ARN Reference to a managed policy
or IAM role to assume`,
+ Deprecated: true,
},
"policy": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Deprecated; use policy_document instead. IAM policy document",
+ Deprecated: true,
},
},
diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go
index dee7779be2c9..1a0be88a6184 100644
--- a/builtin/logical/pki/fields.go
+++ b/builtin/logical/pki/fields.go
@@ -11,6 +11,7 @@ func addIssueAndSignCommonFields(fields map[string]*framework.FieldSchema) map[s
Description: `If true, the Common Name will not be
included in DNS or Email Subject Alternate Names.
Defaults to false (CN is included).`,
+ DisplayName: "Exclude Common Name from Subject Alternative Names (SANs)",
}
fields["format"] = &framework.FieldSchema{
@@ -20,6 +21,7 @@ Defaults to false (CN is included).`,
or "pem_bundle". If "pem_bundle" any private
key and issuing cert will be appended to the
certificate pem. Defaults to "pem".`,
+ AllowedValues: []interface{}{"pem", "der", "pem_bundle"},
}
fields["private_key_format"] = &framework.FieldSchema{
@@ -31,24 +33,28 @@ parameter as either base64-encoded DER or PEM-encoded DER.
However, this can be set to "pkcs8" to have the returned
private key contain base64-encoded pkcs8 or PEM-encoded
pkcs8 instead. Defaults to "der".`,
+ AllowedValues: []interface{}{"", "der", "pem", "pkcs8"},
}
fields["ip_sans"] = &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `The requested IP SANs, if any, in a
comma-delimited list`,
+ DisplayName: "IP Subject Alternative Names (SANs)",
}
fields["uri_sans"] = &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `The requested URI SANs, if any, in a
comma-delimited list.`,
+ DisplayName: "URI Subject Alternative Names (SANs)",
}
fields["other_sans"] = &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `Requested other SANs, in an array with the format
;UTF8: for each entry.`,
+ DisplayName: "Other SANs",
}
return fields
@@ -79,6 +85,7 @@ in the role, this may be an email address.`,
in a comma-delimited list. If email protection
is enabled for the role, this may contain
email addresses.`,
+ DisplayName: "DNS/Email Subject Alternative Names (SANs)",
}
fields["serial_number"] = &framework.FieldSchema{
@@ -95,6 +102,7 @@ sets the expiration date. If not specified
the role default, backend default, or system
default TTL is used, in that order. Cannot
be larger than the role max TTL.`,
+ DisplayName: "TTL",
}
return fields
@@ -110,6 +118,7 @@ func addCACommonFields(fields map[string]*framework.FieldSchema) map[string]*fra
Description: `The requested Subject Alternative Names, if any,
in a comma-delimited list. May contain both
DNS names and email addresses.`,
+ DisplayName: "DNS/Email Subject Alternative Names (SANs)",
}
fields["common_name"] = &framework.FieldSchema{
@@ -131,12 +140,14 @@ be larger than the mount max TTL. Note:
this only has an effect when generating
a CA cert or signing a CA cert, not when
generating a CSR for an intermediate CA.`,
+ DisplayName: "TTL",
}
fields["ou"] = &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `If set, OU (OrganizationalUnit) will be set to
this value.`,
+ DisplayName: "OU (Organizational Unit)",
}
fields["organization"] = &framework.FieldSchema{
@@ -155,24 +166,28 @@ this value.`,
Type: framework.TypeCommaStringSlice,
Description: `If set, Locality will be set to
this value.`,
+ DisplayName: "Locality/City",
}
fields["province"] = &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `If set, Province will be set to
this value.`,
+ DisplayName: "Province/State",
}
fields["street_address"] = &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `If set, Street Address will be set to
this value.`,
+ DisplayName: "Street Address",
}
fields["postal_code"] = &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `If set, Postal Code will be set to
this value.`,
+ DisplayName: "Postal Code",
}
fields["serial_number"] = &framework.FieldSchema{
@@ -209,8 +224,8 @@ the key_type.`,
Default: "rsa",
Description: `The type of key to use; defaults to RSA. "rsa"
and "ec" are the only valid values.`,
+ AllowedValues: []interface{}{"rsa", "ec"},
}
-
return fields
}
@@ -226,6 +241,7 @@ func addCAIssueFields(fields map[string]*framework.FieldSchema) map[string]*fram
fields["permitted_dns_domains"] = &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `Domains for which this certificate is allowed to sign or issue child certificates. If set, all DNS names (subject and alt) on child certs must be exact matches or subsets of the given domains (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`,
+ DisplayName: "Permitted DNS Domains",
}
return fields
diff --git a/builtin/logical/pki/path_roles.go b/builtin/logical/pki/path_roles.go
index e796df0122df..a39fe97cb943 100644
--- a/builtin/logical/pki/path_roles.go
+++ b/builtin/logical/pki/path_roles.go
@@ -31,6 +31,11 @@ func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
+ "backend": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Backend Type",
+ },
+
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role",
@@ -42,11 +47,13 @@ func pathRoles(b *backend) *framework.Path {
requested. The lease duration controls the expiration
of certificates issued by this backend. Defaults to
the value of max_ttl.`,
+ DisplayName: "TTL",
},
"max_ttl": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
Description: "The maximum allowed lease duration",
+ DisplayName: "Max TTL",
},
"allow_localhost": &framework.FieldSchema{
@@ -107,17 +114,20 @@ CN and SANs. Defaults to true.`,
Default: true,
Description: `If set, IP Subject Alternative Names are allowed.
Any valid IP is accepted.`,
+ DisplayName: "Allow IP Subject Alternative Names",
},
"allowed_uri_sans": &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `If set, an array of allowed URIs to put in the URI Subject Alternative Names.
Any valid URI is accepted, these values support globbing.`,
+ DisplayName: "Allowed URI Subject Alternative Names",
},
"allowed_other_sans": &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format ;:. Currently only "utf8" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single "*" which allows any OID and any value (but type must still be utf8).`,
+ DisplayName: "Allowed Other Subject Alternative Names",
},
"allowed_serial_numbers": &framework.FieldSchema{
@@ -156,6 +166,7 @@ protection use. Defaults to false.`,
Default: "rsa",
Description: `The type of key to use; defaults to RSA. "rsa"
and "ec" are the only valid values.`,
+ AllowedValues: []interface{}{"rsa", "ec"},
},
"key_bits": &framework.FieldSchema{
@@ -175,6 +186,7 @@ https://golang.org/pkg/crypto/x509/#KeyUsage
-- simply drop the "KeyUsage" part of the name.
To remove all key usages from being set, set
this value to an empty list.`,
+ DisplayValue: "DigitalSignature,KeyAgreement,KeyEncipherment",
},
"ext_key_usage": &framework.FieldSchema{
@@ -185,11 +197,13 @@ https://golang.org/pkg/crypto/x509/#ExtKeyUsage
-- simply drop the "ExtKeyUsage" part of the name.
To remove all key usages from being set, set
this value to an empty list.`,
+ DisplayName: "Extended Key Usage",
},
"ext_key_usage_oids": &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `A comma-separated string or list of extended key usage oids.`,
+ DisplayName: "Extended Key Usage OIDs",
},
"use_csr_common_name": &framework.FieldSchema{
@@ -199,6 +213,7 @@ this value to an empty list.`,
the common name in the CSR will be used. This
does *not* include any requested Subject Alternative
Names. Defaults to true.`,
+ DisplayName: "Use CSR Common Name",
},
"use_csr_sans": &framework.FieldSchema{
@@ -207,12 +222,14 @@ Names. Defaults to true.`,
Description: `If set, when used with a signing profile,
the SANs in the CSR will be used. This does *not*
include the Common Name (cn). Defaults to true.`,
+ DisplayName: "Use CSR Subject Alternative Names",
},
"ou": &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `If set, OU (OrganizationalUnit) will be set to
this value in certificates issued by this role.`,
+ DisplayName: "Organizational Unit",
},
"organization": &framework.FieldSchema{
@@ -231,12 +248,14 @@ this value in certificates issued by this role.`,
Type: framework.TypeCommaStringSlice,
Description: `If set, Locality will be set to
this value in certificates issued by this role.`,
+ DisplayName: "Locality/City",
},
"province": &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `If set, Province will be set to
this value in certificates issued by this role.`,
+ DisplayName: "Province/State",
},
"street_address": &framework.FieldSchema{
@@ -263,6 +282,7 @@ to the CRL. When large number of certificates are generated with long
lifetimes, it is recommended that lease generation be disabled, as large amount of
leases adversely affect the startup time of Vault.`,
},
+
"no_store": &framework.FieldSchema{
Type: framework.TypeBool,
Description: `
@@ -273,18 +293,23 @@ or revoked, so this option is recommended only for certificates that are
non-sensitive, or extremely short-lived. This option implies a value of "false"
for "generate_lease".`,
},
+
"require_cn": &framework.FieldSchema{
Type: framework.TypeBool,
Default: true,
Description: `If set to false, makes the 'common_name' field optional while generating a certificate.`,
+ DisplayName: "Use CSR Common Name",
},
+
"policy_identifiers": &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `A comma-separated string or list of policy oids.`,
},
+
"basic_constraints_valid_for_non_ca": &framework.FieldSchema{
Type: framework.TypeBool,
Description: `Mark Basic Constraints valid when issuing non-CA certificates.`,
+ DisplayName: "Basic Constraints Valid for Non-CA",
},
"not_before_duration": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go
index 932d6edf65d2..3d535f2b8991 100644
--- a/builtin/logical/ssh/path_roles.go
+++ b/builtin/logical/ssh/path_roles.go
@@ -93,6 +93,7 @@ func pathRoles(b *backend) *framework.Path {
credential is being generated for other users, Vault uses this admin
username to login to remote host and install the generated credential
for the other user.`,
+ DisplayName: "Admin Username",
},
"default_user": &framework.FieldSchema{
Type: framework.TypeString,
@@ -101,6 +102,7 @@ func pathRoles(b *backend) *framework.Path {
Default username for which a credential will be generated.
When the endpoint 'creds/' is used without a username, this
value will be used as default username.`,
+ DisplayName: "Default Username",
},
"cidr_list": &framework.FieldSchema{
Type: framework.TypeString,
@@ -108,6 +110,7 @@ func pathRoles(b *backend) *framework.Path {
[Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type]
Comma separated list of CIDR blocks for which the role is applicable for.
CIDR blocks can belong to more than one role.`,
+ DisplayName: "CIDR List",
},
"exclude_cidr_list": &framework.FieldSchema{
Type: framework.TypeString,
@@ -116,6 +119,7 @@ func pathRoles(b *backend) *framework.Path {
Comma separated list of CIDR blocks. IP addresses belonging to these blocks are not
accepted by the role. This is particularly useful when big CIDR blocks are being used
by the role and certain parts of it needs to be kept out.`,
+ DisplayName: "Exclude CIDR List",
},
"port": &framework.FieldSchema{
Type: framework.TypeInt,
@@ -125,6 +129,7 @@ func pathRoles(b *backend) *framework.Path {
play any role in creation of OTP. For 'otp' type, this is just a way
to inform client about the port number to use. Port number will be
returned to client by Vault server along with OTP.`,
+ DisplayValue: 22,
},
"key_type": &framework.FieldSchema{
Type: framework.TypeString,
@@ -132,6 +137,8 @@ func pathRoles(b *backend) *framework.Path {
[Required for all types]
Type of key used to login to hosts. It can be either 'otp', 'dynamic' or 'ca'.
'otp' type requires agent to be installed in remote hosts.`,
+ AllowedValues: []interface{}{"otp", "dynamic","ca"},
+ DisplayValue: "ca",
},
"key_bits": &framework.FieldSchema{
Type: framework.TypeInt,
@@ -188,6 +195,7 @@ func pathRoles(b *backend) *framework.Path {
requested. The lease duration controls the expiration
of certificates issued by this backend. Defaults to
the value of max_ttl.`,
+ DisplayName: "TTL",
},
"max_ttl": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
@@ -195,6 +203,7 @@ func pathRoles(b *backend) *framework.Path {
[Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
The maximum allowed lease duration
`,
+ DisplayName: "Max TTL",
},
"allowed_critical_options": &framework.FieldSchema{
Type: framework.TypeString,
@@ -202,7 +211,7 @@ func pathRoles(b *backend) *framework.Path {
[Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
A comma-separated list of critical options that certificates can have when signed.
To allow any critical options, set this to an empty string.
- `,
+ `,
},
"allowed_extensions": &framework.FieldSchema{
Type: framework.TypeString,
@@ -238,7 +247,7 @@ func pathRoles(b *backend) *framework.Path {
[Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
If set, certificates are allowed to be signed for use as a 'user'.
`,
- Default: false,
+ Default: false,
},
"allow_host_certificates": &framework.FieldSchema{
Type: framework.TypeBool,
@@ -246,7 +255,7 @@ func pathRoles(b *backend) *framework.Path {
[Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
If set, certificates are allowed to be signed for use as a 'host'.
`,
- Default: false,
+ Default: false,
},
"allow_bare_domains": &framework.FieldSchema{
Type: framework.TypeBool,
@@ -272,6 +281,7 @@ func pathRoles(b *backend) *framework.Path {
When false, the key ID will always be the token display name.
The key ID is logged by the SSH server and can be useful for auditing.
`,
+ DisplayName: "Allow User Key IDs",
},
"key_id_format": &framework.FieldSchema{
Type: framework.TypeString,
@@ -282,6 +292,7 @@ func pathRoles(b *backend) *framework.Path {
the token used to make the request. '{{role_name}}' - The name of the role signing the request.
'{{public_key_hash}}' - A SHA256 checksum of the public key that is being signed.
`,
+ DisplayName: "Key ID Format",
},
"allowed_user_key_lengths": &framework.FieldSchema{
Type: framework.TypeMap,
diff --git a/helper/ldaputil/config.go b/helper/ldaputil/config.go
index 9da1ffd71a11..7169a8c82b4a 100644
--- a/helper/ldaputil/config.go
+++ b/helper/ldaputil/config.go
@@ -22,26 +22,31 @@ func ConfigFields() map[string]*framework.FieldSchema {
Type: framework.TypeString,
Default: "ldap://127.0.0.1",
Description: "LDAP URL to connect to (default: ldap://127.0.0.1). Multiple URLs can be specified by concatenating them with commas; they will be tried in-order.",
+ DisplayName: "URL",
},
"userdn": {
Type: framework.TypeString,
Description: "LDAP domain to use for users (eg: ou=People,dc=example,dc=org)",
+ DisplayName: "User DN",
},
"binddn": {
Type: framework.TypeString,
Description: "LDAP DN for searching for the user DN (optional)",
+ DisplayName: "Name of Object to bind (binddn)",
},
"bindpass": {
- Type: framework.TypeString,
- Description: "LDAP password for searching for the user DN (optional)",
+ Type: framework.TypeString,
+ Description: "LDAP password for searching for the user DN (optional)",
+ DisplaySensitive: true,
},
"groupdn": {
Type: framework.TypeString,
Description: "LDAP search base to use for group membership search (eg: ou=Groups,dc=example,dc=org)",
+ DisplayName: "Group DN",
},
"groupfilter": {
@@ -60,17 +65,20 @@ Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}
in order to enumerate user group membership.
Examples: "cn" or "memberOf", etc.
Default: cn`,
+ DisplayName: "Group Attribute",
},
"upndomain": {
Type: framework.TypeString,
Description: "Enables userPrincipalDomain login with [username]@UPNDomain (optional)",
+ DisplayName: "User Principal (UPN) Domain",
},
"userattr": {
Type: framework.TypeString,
Default: "cn",
Description: "Attribute used for users (default: cn)",
+ DisplayName: "User Attribute",
},
"certificate": {
@@ -81,28 +89,35 @@ Default: cn`,
"discoverdn": {
Type: framework.TypeBool,
Description: "Use anonymous bind to discover the bind DN of a user (optional)",
+ DisplayName: "Discover DN",
},
"insecure_tls": {
Type: framework.TypeBool,
Description: "Skip LDAP server SSL Certificate verification - VERY insecure (optional)",
+ DisplayName: "Insecure TLS",
},
"starttls": {
Type: framework.TypeBool,
Description: "Issue a StartTLS command after establishing unencrypted connection (optional)",
+ DisplayName: "Issue StartTLS command after establishing an unencrypted connection",
},
"tls_min_version": {
- Type: framework.TypeString,
- Default: "tls12",
- Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
+ Type: framework.TypeString,
+ Default: "tls12",
+ Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
+ DisplayName: "Minimum TLS Version",
+ AllowedValues: []interface{}{"tls10", "tls11", "tls12"},
},
"tls_max_version": {
- Type: framework.TypeString,
- Default: "tls12",
- Description: "Maximum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
+ Type: framework.TypeString,
+ Default: "tls12",
+ Description: "Maximum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
+ DisplayName: "Maxumum TLS Version",
+ AllowedValues: []interface{}{"tls10", "tls11", "tls12"},
},
"deny_null_bind": {
diff --git a/terraform/aws/variables.tf b/terraform/aws/variables.tf
index 64ffc78b6215..ece9cc78df3d 100644
--- a/terraform/aws/variables.tf
+++ b/terraform/aws/variables.tf
@@ -3,7 +3,7 @@
//-------------------------------------------------------------------
variable "download-url" {
- default = "https://releases.hashicorp.com/vault/1.0.2/vault_1.0.2_linux_amd64.zip"
+ default = "https://releases.hashicorp.com/vault/1.0.3/vault_1.0.3_linux_amd64.zip"
description = "URL to download Vault"
}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-azure/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-auth-azure/Gopkg.lock
index a8eb992af37c..b73a81c63b35 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-azure/Gopkg.lock
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-azure/Gopkg.lock
@@ -216,7 +216,7 @@
[[projects]]
branch = "master"
- digest = "1:450803219e484669ba680c777ecac629dac92abde2bc83009beaa630f5368e71"
+ digest = "1:606c7307ae83d1adc0901aa8909b700489d7f1294533344453436a8dbff0091b"
name = "github.com/hashicorp/vault"
packages = [
"api",
@@ -226,6 +226,7 @@
"helper/errutil",
"helper/hclutil",
"helper/jsonutil",
+ "helper/license",
"helper/locksutil",
"helper/logging",
"helper/mlock",
@@ -245,7 +246,7 @@
"version",
]
pruneopts = "UT"
- revision = "8655d167084028d627f687ddc25d0c71307eb5be"
+ revision = "c0739a0f2367d5fdd20cef502b628e01bdb90470"
[[projects]]
branch = "master"
@@ -287,6 +288,17 @@
revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39"
version = "v1.0.0"
+[[projects]]
+ digest = "1:c7a5e79396b6eb570159df7a1d487ce5775bf43b7907976fbef6de544ea160ad"
+ name = "github.com/pierrec/lz4"
+ packages = [
+ ".",
+ "internal/xxh32",
+ ]
+ pruneopts = "UT"
+ revision = "473cd7ce01a1113208073166464b98819526150e"
+ version = "v2.0.8"
+
[[projects]]
branch = "master"
digest = "1:bd9efe4e0b0f768302a1e2f0c22458149278de533e521206e5ddc71848c269a0"
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-azure/path_config.go b/vendor/github.com/hashicorp/vault-plugin-auth-azure/path_config.go
index 8b18cc90650a..4e5fad3890df 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-azure/path_config.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-azure/path_config.go
@@ -16,26 +16,31 @@ func pathConfig(b *azureAuthBackend) *framework.Path {
Description: `The tenant id for the Azure Active Directory. This is sometimes
referred to as Directory ID in AD. This value can also be provided with the
AZURE_TENANT_ID environment variable.`,
+ DisplayName: "Tenant ID",
},
"resource": &framework.FieldSchema{
Type: framework.TypeString,
Description: `The resource URL for the vault application in Azure Active Directory.
This value can also be provided with the AZURE_AD_RESOURCE environment variable.`,
+ DisplayName: "Resource",
},
"environment": &framework.FieldSchema{
Type: framework.TypeString,
Description: `The Azure environment name. If not provided, AzurePublicCloud is used.
This value can also be provided with the AZURE_ENVIRONMENT environment variable.`,
+ DisplayName: "Environment",
},
"client_id": &framework.FieldSchema{
Type: framework.TypeString,
Description: `The OAuth2 client id to connection to Azure.
This value can also be provided with the AZURE_CLIENT_ID environment variable.`,
+ DisplayName: "Client ID",
},
"client_secret": &framework.FieldSchema{
Type: framework.TypeString,
Description: `The OAuth2 client secret to connection to Azure.
This value can also be provided with the AZURE_CLIENT_SECRET environment variable.`,
+ DisplayName: "Client Secret",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_config.go b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_config.go
index 3476a64134e3..da4d32f310ba 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_config.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_config.go
@@ -6,6 +6,7 @@ import (
"fmt"
"encoding/json"
+
"github.com/hashicorp/go-gcp-common/gcputil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@@ -20,11 +21,13 @@ func pathConfig(b *GcpAuthBackend) *framework.Path {
Description: `
Google credentials JSON that Vault will use to verify users against GCP APIs.
If not specified, will use application default credentials`,
+ DisplayName: "Credentials",
},
"google_certs_endpoint": {
Type: framework.TypeString,
Description: `
Deprecated. This field does nothing and be removed in a future release`,
+ Deprecated: true,
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go
index 3fc200df4cc2..8c44519ab088 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go
@@ -174,6 +174,15 @@ func (b *jwtAuthBackend) pathConfigWrite(ctx context.Context, req *logical.Reque
}
}
+ case len(config.JWTSupportedAlgs) != 0:
+ for _, a := range config.JWTSupportedAlgs {
+ switch a {
+ case oidc.RS256, oidc.RS384, oidc.RS512, oidc.ES256, oidc.ES384, oidc.ES512, oidc.PS256, oidc.PS384, oidc.PS512:
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("Invalid supported algorithm: %s", a)), nil
+ }
+ }
+
default:
return nil, errors.New("unknown condition")
}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock
index 1d1007232e85..f6ee0566a5d4 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock
@@ -203,7 +203,7 @@
[[projects]]
branch = "master"
- digest = "1:d00de8725219a569ffbb5dd1042e4ced1f3b5ccee2b07218371f71026cc7609a"
+ digest = "1:7be65468c591c5e836ec7ff70b6e7665452a6e700d5f0d5bb9edec8aa57b58e2"
name = "github.com/hashicorp/vault"
packages = [
"api",
@@ -214,6 +214,7 @@
"helper/errutil",
"helper/hclutil",
"helper/jsonutil",
+ "helper/license",
"helper/locksutil",
"helper/logging",
"helper/mlock",
@@ -233,7 +234,7 @@
"version",
]
pruneopts = "UT"
- revision = "add60e6dc7ff7b94487f3b5b680d00d7c05fe621"
+ revision = "c0739a0f2367d5fdd20cef502b628e01bdb90470"
[[projects]]
branch = "master"
@@ -275,6 +276,17 @@
revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39"
version = "v1.0.0"
+[[projects]]
+ digest = "1:c7a5e79396b6eb570159df7a1d487ce5775bf43b7907976fbef6de544ea160ad"
+ name = "github.com/pierrec/lz4"
+ packages = [
+ ".",
+ "internal/xxh32",
+ ]
+ pruneopts = "UT"
+ revision = "473cd7ce01a1113208073166464b98819526150e"
+ version = "v2.0.8"
+
[[projects]]
digest = "1:0e792eea6c96ec55ff302ef33886acbaa5006e900fefe82689e88d96439dcd84"
name = "github.com/ryanuber/go-glob"
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/path_config.go b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/path_config.go
index f10d4b4c7fb3..01f6de36c806 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/path_config.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/path_config.go
@@ -22,16 +22,20 @@ func pathConfig(b *kubeAuthBackend) *framework.Path {
"kubernetes_host": {
Type: framework.TypeString,
Description: "Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server.",
+ DisplayName: "Kubernetes Host",
},
+
"kubernetes_ca_cert": {
Type: framework.TypeString,
Description: "PEM encoded CA cert for use by the TLS client used to talk with the API.",
+ DisplayName: "Kubernetes CA Certificate",
},
"token_reviewer_jwt": {
Type: framework.TypeString,
Description: `A service account JWT used to access the
TokenReview API to validate other JWTs during login. If not set
the JWT used for login will be used to access the API.`,
+ DisplayName: "Token Reviewer JWT",
},
"pem_keys": {
Type: framework.TypeCommaStringSlice,
@@ -39,6 +43,7 @@ the JWT used for login will be used to access the API.`,
used to verify the signatures of kubernetes service account
JWTs. If a certificate is given, its public key will be
extracted. Not every installation of Kuberentes exposes these keys.`,
+ DisplayName: "Service account verification keys",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/clients/sts.go b/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/clients/sts.go
index 5771417ac93b..210e2566e7ec 100644
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/clients/sts.go
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/clients/sts.go
@@ -23,9 +23,9 @@ type STSClient struct {
client *sts.Client
}
-func (c *STSClient) AssumeRole(userName, roleARN string) (*sts.AssumeRoleResponse, error) {
+func (c *STSClient) AssumeRole(roleSessionName, roleARN string) (*sts.AssumeRoleResponse, error) {
assumeRoleReq := sts.CreateAssumeRoleRequest()
assumeRoleReq.RoleArn = roleARN
- assumeRoleReq.RoleSessionName = userName
+ assumeRoleReq.RoleSessionName = roleSessionName
return c.client.AssumeRole(assumeRoleReq)
}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/path_creds.go b/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/path_creds.go
index e901a8c8cca1..a2d7330948a7 100644
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/path_creds.go
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/path_creds.go
@@ -60,7 +60,7 @@ func (b *backend) operationCredsRead(ctx context.Context, req *logical.Request,
if err != nil {
return nil, err
}
- assumeRoleResp, err := client.AssumeRole(generateUsername(req.DisplayName, roleName), role.RoleARN)
+ assumeRoleResp, err := client.AssumeRole(generateRoleSessionName(req.DisplayName, roleName), role.RoleARN)
if err != nil {
return nil, err
}
@@ -243,15 +243,24 @@ func (b *backend) operationCredsRead(ctx context.Context, req *logical.Request,
// The max length of a username per AliCloud is 64.
func generateUsername(displayName, roleName string) string {
- username := fmt.Sprintf("%s-%s-", displayName, roleName)
+ return generateName(displayName, roleName, 64)
+}
+
+// The max length of a role session name per AliCloud is 32.
+func generateRoleSessionName(displayName, roleName string) string {
+ return generateName(displayName, roleName, 32)
+}
+
+func generateName(displayName, roleName string, maxLength int) string {
+ name := fmt.Sprintf("%s-%s-", displayName, roleName)
- // The time and random number take up to 15 more in length, so if the username
+ // The time and random number take up to 15 more in length, so if the name
// is too long we need to trim it.
- if len(username) > 49 {
- username = username[:49]
+ if len(name) > maxLength-15 {
+ name = name[:maxLength-15]
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
- return fmt.Sprintf("%s%d-%d", username, time.Now().Unix(), r.Intn(10000))
+ return fmt.Sprintf("%s%d-%d", name, time.Now().Unix(), r.Intn(10000))
}
const pathCredsHelpSyn = `
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-gcpkms/README.md b/vendor/github.com/hashicorp/vault-plugin-secrets-gcpkms/README.md
index f32c2cddb0bb..1924eb99156d 100644
--- a/vendor/github.com/hashicorp/vault-plugin-secrets-gcpkms/README.md
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-gcpkms/README.md
@@ -84,6 +84,12 @@ instructions are only useful if you want to develop against the plugin.**
$ vault secrets enable -path=gcpkms -plugin=vault-plugin-secrets-gcpkms plugin
```
+### Documentation
+
+The documentation for the plugin lives in the [main Vault
+repository](/hashicorp/vault) in the `website/` folder. Please make any
+documentation updates as separate Pull Requests against that repo.
+
### Tests
This plugin has both unit tests and acceptance tests. To run the acceptance
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 72a0c2237706..85025862f438 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -1391,10 +1391,10 @@
"revisionTime": "2018-11-09T18:06:36Z"
},
{
- "checksumSHA1": "Jj3mz58lSv0dsuXd6bVxGV4759w=",
+ "checksumSHA1": "UgLfwpXoRLpMOF0rzaj+cRcTtdo=",
"path": "github.com/hashicorp/vault-plugin-auth-azure",
- "revision": "4c0b46069a2293d5a6ca7506c8d3e0c4a92f3dbc",
- "revisionTime": "2018-12-07T23:25:28Z"
+ "revision": "0af1d040b5b329f41904cadcd96be55179468880",
+ "revisionTime": "2019-02-01T22:26:32Z"
},
{
"checksumSHA1": "4Z/niOo76EcP8KpLdSL5GdDcy78=",
@@ -1403,10 +1403,10 @@
"revisionTime": "2018-08-16T20:11:31Z"
},
{
- "checksumSHA1": "llLHR3FVdqtuFgjIoL9GNN8zKKI=",
+ "checksumSHA1": "Nd9aBfL80t7N8B9VVsNBgihA5f4=",
"path": "github.com/hashicorp/vault-plugin-auth-gcp/plugin",
- "revision": "4d63bbfe6fcf0363a2ea2c273846e88b95d85089",
- "revisionTime": "2018-12-10T20:01:33Z"
+ "revision": "7d4c2101e7d0b61ec9fb0dc3c75d79920c6369c5",
+ "revisionTime": "2019-02-01T21:54:14Z"
},
{
"checksumSHA1": "jCtLHj3YAONxCcV6v6kifTrRJwM=",
@@ -1417,40 +1417,40 @@
"versionExact": "oidc-dev"
},
{
- "checksumSHA1": "Ldg2jQeyPrpAupyQq4lRVN+jfFY=",
+ "checksumSHA1": "NfVgV3CmKXGRsXk1sYVgMMRZ5Zc=",
"path": "github.com/hashicorp/vault-plugin-auth-kubernetes",
- "revision": "091d9e5d5fabce920533eff31ad778778992a671",
- "revisionTime": "2018-11-30T16:25:33Z"
+ "revision": "db96aa4ab438cbc1cf544cec758d0d16ca4e9681",
+ "revisionTime": "2019-02-01T22:22:09Z"
},
{
"checksumSHA1": "PmhyvCKVlEMEP6JO31ozW+CBIiE=",
"path": "github.com/hashicorp/vault-plugin-secrets-ad/plugin",
- "revision": "540c0b6f1f113a1c6bdaa130a35ee8530c072b5a",
- "revisionTime": "2018-11-09T18:28:34Z"
+ "revision": "4796d99801253c6f10d7d96b968a3204a9a1ead8",
+ "revisionTime": "2019-01-31T22:24:16Z"
},
{
"checksumSHA1": "GOxdFElG31lXWgKFG9aqpDcG47M=",
"path": "github.com/hashicorp/vault-plugin-secrets-ad/plugin/client",
- "revision": "540c0b6f1f113a1c6bdaa130a35ee8530c072b5a",
- "revisionTime": "2018-11-09T18:28:34Z"
+ "revision": "4796d99801253c6f10d7d96b968a3204a9a1ead8",
+ "revisionTime": "2019-01-31T22:24:16Z"
},
{
"checksumSHA1": "RaH2xTkjaToCk+RoPhap7I66ibo=",
"path": "github.com/hashicorp/vault-plugin-secrets-ad/plugin/util",
- "revision": "540c0b6f1f113a1c6bdaa130a35ee8530c072b5a",
- "revisionTime": "2018-11-09T18:28:34Z"
+ "revision": "4796d99801253c6f10d7d96b968a3204a9a1ead8",
+ "revisionTime": "2019-01-31T22:24:16Z"
},
{
- "checksumSHA1": "VLXyxS5dEoiWTSFmpMJIz+Pwtmw=",
+ "checksumSHA1": "l0xVOHA0/SIjNfrmBRbrFvMVOaw=",
"path": "github.com/hashicorp/vault-plugin-secrets-alicloud",
- "revision": "2aee79cc5cbf1bbca654dbc594f809cafc19cd8d",
- "revisionTime": "2018-11-09T18:14:53Z"
+ "revision": "b0abe36195cb171e673a9f6425df977eff1ef825",
+ "revisionTime": "2019-01-31T21:18:12Z"
},
{
- "checksumSHA1": "dqduixICi6NeyLNRCDdw62t1LFU=",
+ "checksumSHA1": "e96mN6plz/ApctpjvU2kiCumOl0=",
"path": "github.com/hashicorp/vault-plugin-secrets-alicloud/clients",
- "revision": "2aee79cc5cbf1bbca654dbc594f809cafc19cd8d",
- "revisionTime": "2018-11-09T18:14:53Z"
+ "revision": "b0abe36195cb171e673a9f6425df977eff1ef825",
+ "revisionTime": "2019-01-31T21:18:12Z"
},
{
"checksumSHA1": "rgeBhrdLyF2orH3QA/H66ZSSbuo=",
@@ -1477,16 +1477,16 @@
"revisionTime": "2018-09-21T17:32:00Z"
},
{
- "checksumSHA1": "TbPoZQkYZ7Bukdw6U+/GejbaZAs=",
+ "checksumSHA1": "StwRTX92gyH7iHkyZk4df+dLISM=",
"path": "github.com/hashicorp/vault-plugin-secrets-gcpkms",
- "revision": "6cd991800a6d7af69b1950ec4cbf402d021a099d",
- "revisionTime": "2018-12-12T18:25:53Z"
+ "revision": "d6b25b0b4a39132ec3c02f19631b6a9bdadef042",
+ "revisionTime": "2019-01-16T16:49:38Z"
},
{
"checksumSHA1": "yhUUqN5rbEXnfI8WfGUofXToD+o=",
"path": "github.com/hashicorp/vault-plugin-secrets-kv",
- "revision": "9dbe04db0e34c9c3c75bedcdb16d8ff78f0c54bd",
- "revisionTime": "2018-12-19T17:59:33Z"
+ "revision": "edbfe287c5d9277cecf2c91c79ffcc34f19d2049",
+ "revisionTime": "2019-01-15T20:37:47Z"
},
{
"checksumSHA1": "ldkAQ1CpiAaQ9sti0qIch+UyRsI=",
diff --git a/version/version_base.go b/version/version_base.go
index 9fe8ab27fc54..1b566a634337 100644
--- a/version/version_base.go
+++ b/version/version_base.go
@@ -2,7 +2,7 @@ package version
func init() {
// The main version number that is being run at the moment.
- Version = "1.0.2"
+ Version = "1.0.3"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
diff --git a/website/config.rb b/website/config.rb
index 697dbc206fde..bdc8a4a9a7c3 100644
--- a/website/config.rb
+++ b/website/config.rb
@@ -6,7 +6,7 @@
activate :hashicorp do |h|
h.name = "vault"
- h.version = "1.0.2"
+ h.version = "1.0.3"
h.github_slug = "hashicorp/vault"
h.website_root = "website"
h.releases_enabled = true
diff --git a/website/source/docs/secrets/transit/index.html.md b/website/source/docs/secrets/transit/index.html.md
index 7bf6998fe7f5..881bac6253f2 100644
--- a/website/source/docs/secrets/transit/index.html.md
+++ b/website/source/docs/secrets/transit/index.html.md
@@ -32,13 +32,14 @@ disabled to accommodate auditing requirements.
## Working Set Management
-This secrets engine does not currently delete keys. Keys that are out of the
-working set (earlier than a key's specified `min_decryption_version` are
-instead archived. This is a performance consideration to keep key loading fast,
-as well as a security consideration: by disallowing decryption of old versions
-of keys, found ciphertext corresponding to obsolete (but sensitive) data can
-not be decrypted by most users, but in an emergency the
-`min_decryption_version` can be moved back to allow for legitimate decryption.
+The Transit engine supports versioning of keys. Key versions that are earlier
+than a key's specified `min_decryption_version` gets archived, and the rest of
+the key versions belong to the working set. This is a performance consideration
+to keep key loading fast, as well as a security consideration: by disallowing
+decryption of old versions of keys, found ciphertext corresponding to obsolete
+(but sensitive) data can not be decrypted by most users, but in an emergency
+the `min_decryption_version` can be moved back to allow for legitimate
+decryption.
Currently this archive is stored in a single storage entry. With some storage
backends, notably those using Raft or Paxos for HA capabilities, frequent
From 368d431b93a766e01715968a01e92a6d38e9ed86 Mon Sep 17 00:00:00 2001
From: madalynrose
Date: Thu, 14 Feb 2019 13:52:34 -0500
Subject: [PATCH 07/31] Dynamic OpenAPI UI (#6209)
---
ui/app/adapters/auth-config/azure.js | 1 +
ui/app/adapters/auth-config/gcp.js | 1 +
ui/app/adapters/auth-config/github.js | 1 +
ui/app/adapters/auth-config/kubernetes.js | 1 +
ui/app/adapters/auth-config/ldap.js | 1 +
ui/app/adapters/auth-config/okta.js | 1 +
ui/app/adapters/auth-config/radius.js | 1 +
ui/app/adapters/pki-certificate-sign.js | 4 +
ui/app/adapters/pki-certificate.js | 1 -
ui/app/adapters/secret-engine.js | 14 +-
ui/app/adapters/secret.js | 4 +
ui/app/components/auth-config-form/config.js | 11 +-
ui/app/components/auth-config-form/options.js | 16 +-
ui/app/components/mount-backend-form.js | 118 ++---------
.../vault/cluster/settings/auth/enable.js | 12 +-
ui/app/machines/auth-machine.js | 8 +-
ui/app/models/auth-config.js | 5 +-
ui/app/models/auth-config/azure.js | 9 +-
ui/app/models/auth-config/gcp.js | 9 +-
ui/app/models/auth-config/github.js | 8 +-
ui/app/models/auth-config/kubernetes.js | 12 +-
ui/app/models/auth-config/ldap.js | 56 +----
ui/app/models/auth-config/okta.js | 13 +-
ui/app/models/auth-config/radius.js | 30 +--
ui/app/models/pki-certificate-sign.js | 9 +-
ui/app/models/role-aws.js | 9 +-
ui/app/models/role-pki.js | 110 +---------
ui/app/models/role-ssh.js | 22 +-
ui/app/models/ssh-sign.js | 5 +-
.../routes/vault/cluster/secrets/backend.js | 1 +
.../cluster/secrets/backend/credentials.js | 13 ++
.../vault/cluster/secrets/backend/list.js | 23 ++-
.../cluster/secrets/backend/secret-edit.js | 31 ++-
.../vault/cluster/secrets/backend/sign.js | 4 +
.../settings/auth/configure/section.js | 31 ++-
ui/app/services/path-help.js | 59 ++++++
.../components/auth-config-form/config.hbs | 9 +-
.../components/auth-config-form/options.hbs | 9 +-
ui/app/templates/components/form-field.hbs | 22 +-
.../components/mount-backend-form.hbs | 150 ++++++++------
ui/app/templates/components/string-list.hbs | 32 ++-
.../components/wizard/auth-config.hbs | 10 +
.../components/wizard/auth-enable.hbs | 6 +-
.../partials/form-field-from-model.hbs | 97 +++++----
.../settings/auth/configure/section.hbs | 4 +-
.../vault/cluster/settings/auth/enable.hbs | 5 +-
ui/app/utils/openapi-to-attrs.js | 89 ++++++++
.../settings/auth/configure/section-test.js | 2 +-
.../acceptance/settings/auth/enable-test.js | 6 +-
.../auth-config-form/options-test.js | 16 ++
.../integration/components/auth-form-test.js | 3 -
.../components/mount-backend-form-test.js | 55 -----
.../pages/secrets/backend/pki/edit-role.js | 2 +-
ui/tests/unit/machines/auth-machine-test.js | 10 +-
ui/tests/unit/utils/openapi-to-attrs-test.js | 191 ++++++++++++++++++
55 files changed, 811 insertions(+), 561 deletions(-)
create mode 100644 ui/app/services/path-help.js
create mode 100644 ui/app/templates/components/wizard/auth-config.hbs
create mode 100644 ui/app/utils/openapi-to-attrs.js
create mode 100644 ui/tests/unit/utils/openapi-to-attrs-test.js
diff --git a/ui/app/adapters/auth-config/azure.js b/ui/app/adapters/auth-config/azure.js
index 21f5624ac4d0..e43cb2ea860f 100644
--- a/ui/app/adapters/auth-config/azure.js
+++ b/ui/app/adapters/auth-config/azure.js
@@ -1,2 +1,3 @@
import AuthConfig from './_base';
+
export default AuthConfig.extend();
diff --git a/ui/app/adapters/auth-config/gcp.js b/ui/app/adapters/auth-config/gcp.js
index 21f5624ac4d0..e43cb2ea860f 100644
--- a/ui/app/adapters/auth-config/gcp.js
+++ b/ui/app/adapters/auth-config/gcp.js
@@ -1,2 +1,3 @@
import AuthConfig from './_base';
+
export default AuthConfig.extend();
diff --git a/ui/app/adapters/auth-config/github.js b/ui/app/adapters/auth-config/github.js
index 21f5624ac4d0..e43cb2ea860f 100644
--- a/ui/app/adapters/auth-config/github.js
+++ b/ui/app/adapters/auth-config/github.js
@@ -1,2 +1,3 @@
import AuthConfig from './_base';
+
export default AuthConfig.extend();
diff --git a/ui/app/adapters/auth-config/kubernetes.js b/ui/app/adapters/auth-config/kubernetes.js
index 21f5624ac4d0..e43cb2ea860f 100644
--- a/ui/app/adapters/auth-config/kubernetes.js
+++ b/ui/app/adapters/auth-config/kubernetes.js
@@ -1,2 +1,3 @@
import AuthConfig from './_base';
+
export default AuthConfig.extend();
diff --git a/ui/app/adapters/auth-config/ldap.js b/ui/app/adapters/auth-config/ldap.js
index 21f5624ac4d0..e43cb2ea860f 100644
--- a/ui/app/adapters/auth-config/ldap.js
+++ b/ui/app/adapters/auth-config/ldap.js
@@ -1,2 +1,3 @@
import AuthConfig from './_base';
+
export default AuthConfig.extend();
diff --git a/ui/app/adapters/auth-config/okta.js b/ui/app/adapters/auth-config/okta.js
index 21f5624ac4d0..e43cb2ea860f 100644
--- a/ui/app/adapters/auth-config/okta.js
+++ b/ui/app/adapters/auth-config/okta.js
@@ -1,2 +1,3 @@
import AuthConfig from './_base';
+
export default AuthConfig.extend();
diff --git a/ui/app/adapters/auth-config/radius.js b/ui/app/adapters/auth-config/radius.js
index 21f5624ac4d0..e43cb2ea860f 100644
--- a/ui/app/adapters/auth-config/radius.js
+++ b/ui/app/adapters/auth-config/radius.js
@@ -1,2 +1,3 @@
import AuthConfig from './_base';
+
export default AuthConfig.extend();
diff --git a/ui/app/adapters/pki-certificate-sign.js b/ui/app/adapters/pki-certificate-sign.js
index eb5ca26157a4..1acdbfb8192b 100644
--- a/ui/app/adapters/pki-certificate-sign.js
+++ b/ui/app/adapters/pki-certificate-sign.js
@@ -7,4 +7,8 @@ export default Adapter.extend({
}
return `/v1/${role.backend}/sign/${role.name}`;
},
+
+ pathForType() {
+ return 'sign';
+ },
});
diff --git a/ui/app/adapters/pki-certificate.js b/ui/app/adapters/pki-certificate.js
index 942d2610b106..8954a0ff60d3 100644
--- a/ui/app/adapters/pki-certificate.js
+++ b/ui/app/adapters/pki-certificate.js
@@ -13,7 +13,6 @@ export default Adapter.extend({
}
return url;
},
-
optionsForQuery(id) {
let data = {};
if (!id) {
diff --git a/ui/app/adapters/secret-engine.js b/ui/app/adapters/secret-engine.js
index 56481655368c..42b998f26905 100644
--- a/ui/app/adapters/secret-engine.js
+++ b/ui/app/adapters/secret-engine.js
@@ -7,16 +7,20 @@ export default ApplicationAdapter.extend({
return path ? url + '/' + path : url;
},
+ internalURL(path) {
+ let url = `/${this.urlPrefix()}/internal/ui/mounts`;
+ if (path) {
+ url = `${url}/${path}`;
+ }
+ return url;
+ },
+
pathForType() {
return 'mounts';
},
query(store, type, query) {
- let url = `/${this.urlPrefix()}/internal/ui/mounts`;
- if (query.path) {
- url = `${url}/${query.path}`;
- }
- return this.ajax(url, 'GET');
+ return this.ajax(this.internalURL(query.path), 'GET');
},
createRecord(store, type, snapshot) {
diff --git a/ui/app/adapters/secret.js b/ui/app/adapters/secret.js
index d282d08e89d0..59c39a0387ea 100644
--- a/ui/app/adapters/secret.js
+++ b/ui/app/adapters/secret.js
@@ -34,6 +34,10 @@ export default ApplicationAdapter.extend({
return url;
},
+ pathForType() {
+ return 'mounts';
+ },
+
optionsForQuery(id, action, wrapTTL) {
let data = {};
if (action === 'query') {
diff --git a/ui/app/components/auth-config-form/config.js b/ui/app/components/auth-config-form/config.js
index 31fd4ac0b258..d601ebbf1115 100644
--- a/ui/app/components/auth-config-form/config.js
+++ b/ui/app/components/auth-config-form/config.js
@@ -8,10 +8,11 @@ const AuthConfigBase = Component.extend({
model: null,
flashMessages: service(),
-
+ router: service(),
+ wizard: service(),
saveModel: task(function*() {
try {
- yield this.get('model').save();
+ yield this.model.save();
} catch (err) {
// AdapterErrors are handled by the error-message component
// in the form
@@ -20,7 +21,11 @@ const AuthConfigBase = Component.extend({
}
return;
}
- this.get('flashMessages').success('The configuration was saved successfully.');
+ if (this.wizard.currentMachine === 'authentication' && this.wizard.featureState === 'config') {
+ this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE');
+ }
+ this.router.transitionTo('vault.cluster.access.methods').followRedirects();
+ this.flashMessages.success('The configuration was saved successfully.');
}),
});
diff --git a/ui/app/components/auth-config-form/options.js b/ui/app/components/auth-config-form/options.js
index 82f850e1e5b1..528c803c8591 100644
--- a/ui/app/components/auth-config-form/options.js
+++ b/ui/app/components/auth-config-form/options.js
@@ -1,14 +1,16 @@
import AuthConfigComponent from './config';
+import { inject as service } from '@ember/service';
import { task } from 'ember-concurrency';
import DS from 'ember-data';
export default AuthConfigComponent.extend({
+ router: service(),
+ wizard: service(),
saveModel: task(function*() {
- const model = this.get('model');
- let data = model.get('config').serialize();
- data.description = model.get('description');
+ let data = this.model.config.serialize();
+ data.description = this.model.description;
try {
- yield model.tune(data);
+ yield this.model.tune(data);
} catch (err) {
// AdapterErrors are handled by the error-message component
// in the form
@@ -17,6 +19,10 @@ export default AuthConfigComponent.extend({
}
return;
}
- this.get('flashMessages').success('The configuration options were saved successfully.');
+ if (this.wizard.currentMachine === 'authentication' && this.wizard.featureState === 'config') {
+ this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE');
+ }
+ this.router.transitionTo('vault.cluster.access.methods').followRedirects();
+ this.flashMessages.success('The configuration was saved successfully.');
}),
});
diff --git a/ui/app/components/mount-backend-form.js b/ui/app/components/mount-backend-form.js
index fbfb6c6a09dd..80cb02502873 100644
--- a/ui/app/components/mount-backend-form.js
+++ b/ui/app/components/mount-backend-form.js
@@ -21,7 +21,6 @@ export default Component.extend({
*
*/
onMountSuccess() {},
- onConfigError() {},
/*
* @param String
* @public
@@ -41,18 +40,18 @@ export default Component.extend({
*/
mountModel: null,
- showConfig: false,
+ showEnable: false,
init() {
this._super(...arguments);
- const type = this.get('mountType');
+ const type = this.mountType;
const modelType = type === 'secret' ? 'secret-engine' : 'auth-method';
- const model = this.get('store').createRecord(modelType);
+ const model = this.store.createRecord(modelType);
this.set('mountModel', model);
},
mountTypes: computed('mountType', function() {
- return this.get('mountType') === 'secret' ? ENGINES : METHODS;
+ return this.mountType === 'secret' ? ENGINES : METHODS;
}),
willDestroy() {
@@ -60,44 +59,10 @@ export default Component.extend({
this.get('mountModel').rollbackAttributes();
},
- getConfigModelType(methodType) {
- let mountType = this.get('mountType');
- // will be something like secret-aws
- // or auth-azure
- let key = `${mountType}-${methodType}`;
- let noConfig = ['auth-approle', 'auth-alicloud'];
- if (mountType === 'secret' || noConfig.includes(key)) {
- return;
- }
- if (methodType === 'aws') {
- return 'auth-config/aws/client';
- }
- return `auth-config/${methodType}`;
- },
-
- changeConfigModel(methodType) {
- let mount = this.get('mountModel');
- if (this.get('mountType') === 'secret') {
- return;
- }
- let configRef = mount.hasMany('authConfigs').value();
- let currentConfig = configRef && configRef.get('firstObject');
- if (currentConfig) {
- // rollbackAttributes here will remove the the config model from the store
- // because `isNew` will be true
- currentConfig.rollbackAttributes();
- currentConfig.unloadRecord();
- }
- let configType = this.getConfigModelType(methodType);
- if (!configType) return;
- let config = this.get('store').createRecord(configType);
- config.set('backend', mount);
- },
-
checkPathChange(type) {
- let mount = this.get('mountModel');
- let currentPath = mount.get('path');
- let list = this.get('mountTypes');
+ let mount = this.mountModel;
+ let currentPath = mount.path;
+ let list = this.mountTypes;
// if the current path matches a type (meaning the user hasn't altered it),
// change it here to match the new type
let isUnchanged = list.findBy('type', currentPath);
@@ -107,7 +72,7 @@ export default Component.extend({
},
mountBackend: task(function*() {
- const mountModel = this.get('mountModel');
+ const mountModel = this.mountModel;
const { type, path } = mountModel.getProperties('type', 'path');
try {
yield mountModel.save();
@@ -116,74 +81,27 @@ export default Component.extend({
return;
}
- let mountType = this.get('mountType');
+ let mountType = this.mountType;
mountType = mountType === 'secret' ? `${mountType}s engine` : `${mountType} method`;
- this.get('flashMessages').success(`Successfully mounted the ${type} ${mountType} at ${path}.`);
- if (this.get('mountType') === 'secret') {
- yield this.get('onMountSuccess')(type, path);
- return;
- }
- yield this.get('saveConfig').perform(mountModel);
- }).drop(),
-
- advanceWizard() {
- this.get('wizard').transitionFeatureMachine(
- this.get('wizard.featureState'),
- 'CONTINUE',
- this.get('mountModel').get('type')
- );
- },
- saveConfig: task(function*(mountModel) {
- const configRef = mountModel.hasMany('authConfigs').value();
- const { type, path } = mountModel.getProperties('type', 'path');
- if (!configRef) {
- this.advanceWizard();
- yield this.get('onMountSuccess')(type, path);
- return;
- }
- const config = configRef.get('firstObject');
- try {
- if (config && Object.keys(config.changedAttributes()).length) {
- yield config.save();
- this.advanceWizard();
- this.get('flashMessages').success(
- `The config for ${type} ${this.get('mountType')} method at ${path} was saved successfully.`
- );
- }
- yield this.get('onMountSuccess')(type, path);
- } catch (err) {
- this.get('flashMessages').danger(
- `There was an error saving the configuration for ${type} ${this.get(
- 'mountType'
- )} method at ${path}. ${err.errors.join(' ')}`
- );
- yield this.get('onConfigError')(mountModel.id);
- }
+ this.flashMessages.success(`Successfully mounted the ${type} ${mountType} at ${path}.`);
+ yield this.onMountSuccess(type, path);
+ return;
}).drop(),
actions: {
onTypeChange(path, value) {
if (path === 'type') {
- this.get('wizard').set('componentState', value);
- this.changeConfigModel(value);
+ this.wizard.set('componentState', value);
this.checkPathChange(value);
}
},
- toggleShowConfig(value) {
- this.set('showConfig', value);
- if (value === true && this.get('wizard.featureState') === 'idle') {
- this.get('wizard').transitionFeatureMachine(
- this.get('wizard.featureState'),
- 'CONTINUE',
- this.get('mountModel').get('type')
- );
+ toggleShowEnable(value) {
+ this.set('showEnable', value);
+ if (value === true && this.wizard.featureState === 'idle') {
+ this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', this.mountModel.type);
} else {
- this.get('wizard').transitionFeatureMachine(
- this.get('wizard.featureState'),
- 'RESET',
- this.get('mountModel').get('type')
- );
+ this.wizard.transitionFeatureMachine(this.wizard.featureState, 'RESET', this.mountModel.type);
}
},
},
diff --git a/ui/app/controllers/vault/cluster/settings/auth/enable.js b/ui/app/controllers/vault/cluster/settings/auth/enable.js
index d5dd984c3256..9d3153678f6a 100644
--- a/ui/app/controllers/vault/cluster/settings/auth/enable.js
+++ b/ui/app/controllers/vault/cluster/settings/auth/enable.js
@@ -4,14 +4,10 @@ import Controller from '@ember/controller';
export default Controller.extend({
wizard: service(),
actions: {
- onMountSuccess: function(type) {
- let transition = this.transitionToRoute('vault.cluster.access.methods');
- return transition.followRedirects().then(() => {
- this.get('wizard').transitionFeatureMachine(this.get('wizard.featureState'), 'CONTINUE', type);
- });
- },
- onConfigError: function(modelId) {
- return this.transitionToRoute('vault.cluster.settings.auth.configure', modelId);
+ onMountSuccess: function(type, path) {
+ this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', type);
+ let transition = this.transitionToRoute('vault.cluster.settings.auth.configure', path);
+ return transition.followRedirects();
},
},
});
diff --git a/ui/app/machines/auth-machine.js b/ui/app/machines/auth-machine.js
index cec39a9896ba..6ee82dd08a62 100644
--- a/ui/app/machines/auth-machine.js
+++ b/ui/app/machines/auth-machine.js
@@ -22,16 +22,16 @@ export default {
{ type: 'render', level: 'step', component: 'wizard/auth-enable' },
],
on: {
- CONTINUE: 'list',
+ CONTINUE: 'config',
},
},
- list: {
+ config: {
onEntry: [
- { type: 'render', level: 'step', component: 'wizard/auth-list' },
{ type: 'render', level: 'feature', component: 'wizard/mounts-wizard' },
+ { type: 'render', level: 'step', component: 'wizard/auth-config' },
],
on: {
- DETAILS: 'details',
+ CONTINUE: 'details',
},
},
details: {
diff --git a/ui/app/models/auth-config.js b/ui/app/models/auth-config.js
index 88befe73b6ab..3c70bb14ec3d 100644
--- a/ui/app/models/auth-config.js
+++ b/ui/app/models/auth-config.js
@@ -2,5 +2,8 @@ import DS from 'ember-data';
const { belongsTo } = DS;
export default DS.Model.extend({
- backend: belongsTo('auth-method', { readOnly: true, async: false }),
+ backend: belongsTo('auth-method', { inverse: 'authConfigs', readOnly: true, async: false }),
+ getHelpUrl: function(backend) {
+ return `/v1/auth/${backend}/config?help=1`;
+ },
});
diff --git a/ui/app/models/auth-config/azure.js b/ui/app/models/auth-config/azure.js
index 1949d20e8563..6ef8a3f507f0 100644
--- a/ui/app/models/auth-config/azure.js
+++ b/ui/app/models/auth-config/azure.js
@@ -1,12 +1,13 @@
import { computed } from '@ember/object';
import DS from 'ember-data';
-
import AuthConfig from '../auth-config';
+import { combineFieldGroups } from 'vault/utils/openapi-to-attrs';
import fieldToAttrs from 'vault/utils/field-to-attrs';
const { attr } = DS;
export default AuthConfig.extend({
+ useOpenAPI: true,
tenantId: attr('string', {
label: 'Tenant ID',
helpText: 'The tenant ID for the Azure Active Directory organization',
@@ -26,12 +27,16 @@ export default AuthConfig.extend({
googleCertsEndpoint: attr('string'),
fieldGroups: computed(function() {
- const groups = [
+ let groups = [
{ default: ['tenantId', 'resource'] },
{
'Azure Options': ['clientId', 'clientSecret'],
},
];
+ if (this.newFields) {
+ groups = combineFieldGroups(groups, this.newFields, []);
+ }
+
return fieldToAttrs(this, groups);
}),
});
diff --git a/ui/app/models/auth-config/gcp.js b/ui/app/models/auth-config/gcp.js
index 9185fb297614..25d91d284812 100644
--- a/ui/app/models/auth-config/gcp.js
+++ b/ui/app/models/auth-config/gcp.js
@@ -1,12 +1,14 @@
import { computed } from '@ember/object';
import DS from 'ember-data';
-
import AuthConfig from '../auth-config';
+import { combineFieldGroups } from 'vault/utils/openapi-to-attrs';
import fieldToAttrs from 'vault/utils/field-to-attrs';
const { attr } = DS;
export default AuthConfig.extend({
+ useOpenAPI: true,
+ // We have to leave this here because the backend doesn't support the file type yet.
credentials: attr('string', {
editType: 'file',
}),
@@ -14,12 +16,15 @@ export default AuthConfig.extend({
googleCertsEndpoint: attr('string'),
fieldGroups: computed(function() {
- const groups = [
+ let groups = [
{ default: ['credentials'] },
{
'Google Cloud Options': ['googleCertsEndpoint'],
},
];
+ if (this.newFields) {
+ groups = combineFieldGroups(groups, this.newFields, []);
+ }
return fieldToAttrs(this, groups);
}),
});
diff --git a/ui/app/models/auth-config/github.js b/ui/app/models/auth-config/github.js
index 0c54653836d6..df745af14e86 100644
--- a/ui/app/models/auth-config/github.js
+++ b/ui/app/models/auth-config/github.js
@@ -1,24 +1,28 @@
import { computed } from '@ember/object';
import DS from 'ember-data';
-
import AuthConfig from '../auth-config';
import fieldToAttrs from 'vault/utils/field-to-attrs';
+import { combineFieldGroups } from 'vault/utils/openapi-to-attrs';
const { attr } = DS;
export default AuthConfig.extend({
+ useOpenAPI: true,
organization: attr('string'),
baseUrl: attr('string', {
label: 'Base URL',
}),
fieldGroups: computed(function() {
- const groups = [
+ let groups = [
{ default: ['organization'] },
{
'GitHub Options': ['baseUrl'],
},
];
+ if (this.newFields) {
+ groups = combineFieldGroups(groups, this.newFields, []);
+ }
return fieldToAttrs(this, groups);
}),
diff --git a/ui/app/models/auth-config/kubernetes.js b/ui/app/models/auth-config/kubernetes.js
index 4d8da679a767..8eb0b0913e4a 100644
--- a/ui/app/models/auth-config/kubernetes.js
+++ b/ui/app/models/auth-config/kubernetes.js
@@ -2,36 +2,34 @@ import { computed } from '@ember/object';
import DS from 'ember-data';
import AuthConfig from '../auth-config';
+import { combineFieldGroups } from 'vault/utils/openapi-to-attrs';
import fieldToAttrs from 'vault/utils/field-to-attrs';
const { attr } = DS;
export default AuthConfig.extend({
+ useOpenAPI: true,
kubernetesHost: attr('string', {
- label: 'Kubernetes Host',
helpText:
'Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server',
}),
kubernetesCaCert: attr('string', {
- label: 'Kubernetes CA Certificate',
editType: 'file',
helpText: 'PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API',
}),
tokenReviewerJwt: attr('string', {
- label: 'Token Reviewer JWT',
helpText:
'A service account JWT used to access the TokenReview API to validate other JWTs during login. If not set the JWT used for login will be used to access the API',
}),
pemKeys: attr({
- label: 'Service account verification keys',
editType: 'stringArray',
}),
fieldGroups: computed(function() {
- const groups = [
+ let groups = [
{
default: ['kubernetesHost', 'kubernetesCaCert'],
},
@@ -39,6 +37,10 @@ export default AuthConfig.extend({
'Kubernetes Options': ['tokenReviewerJwt', 'pemKeys'],
},
];
+ if (this.newFields) {
+ groups = combineFieldGroups(groups, this.newFields, []);
+ }
+
return fieldToAttrs(this, groups);
}),
});
diff --git a/ui/app/models/auth-config/ldap.js b/ui/app/models/auth-config/ldap.js
index 9bb4a14491e3..de51d1489bd1 100644
--- a/ui/app/models/auth-config/ldap.js
+++ b/ui/app/models/auth-config/ldap.js
@@ -3,97 +3,50 @@ import DS from 'ember-data';
import AuthConfig from '../auth-config';
import fieldToAttrs from 'vault/utils/field-to-attrs';
+import { combineFieldGroups } from 'vault/utils/openapi-to-attrs';
const { attr } = DS;
export default AuthConfig.extend({
- url: attr('string', {
- label: 'URL',
- }),
- starttls: attr('boolean', {
- defaultValue: false,
- label: 'Issue StartTLS command after establishing an unencrypted connection',
- }),
- tlsMinVersion: attr('string', {
- label: 'Minimum TLS Version',
- defaultValue: 'tls12',
- possibleValues: ['tls10', 'tls11', 'tls12'],
- }),
-
- tlsMaxVersion: attr('string', {
- label: 'Maximum TLS Version',
- defaultValue: 'tls12',
- possibleValues: ['tls10', 'tls11', 'tls12'],
- }),
- insecureTls: attr('boolean', {
- defaultValue: false,
- label: 'Skip LDAP server SSL certificate verification',
- }),
- certificate: attr('string', {
- label: 'CA certificate to verify LDAP server certificate',
- editType: 'file',
- }),
-
+ useOpenAPI: true,
binddn: attr('string', {
- label: 'Name of Object to bind (binddn)',
helpText: 'Used when performing user search. Example: cn=vault,ou=Users,dc=example,dc=com',
}),
bindpass: attr('string', {
- label: 'Password',
helpText: 'Used along with binddn when performing user search',
sensitive: true,
}),
-
userdn: attr('string', {
- label: 'User DN',
helpText: 'Base DN under which to perform user search. Example: ou=Users,dc=example,dc=com',
}),
userattr: attr('string', {
- label: 'User Attribute',
- defaultValue: 'cn',
helpText:
'Attribute on user attribute object matching the username passed when authenticating. Examples: sAMAccountName, cn, uid',
}),
- discoverdn: attr('boolean', {
- defaultValue: false,
- label: 'Use anonymous bind to discover the bind DN of a user',
- }),
- denyNullBind: attr('boolean', {
- defaultValue: true,
- label: 'Prevent users from bypassing authentication when providing an empty password',
- }),
upndomain: attr('string', {
- label: 'User Principal (UPN) Domain',
helpText:
'The userPrincipalDomain used to construct the UPN string for the authenticating user. The constructed UPN will appear as [username]@UPNDomain. Example: example.com, which will cause vault to bind as username@example.com.',
}),
groupfilter: attr('string', {
- label: 'Group Filter',
helpText:
'Go template used when constructing the group membership query. The template can access the following context variables: [UserDN, Username]. The default is (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}})), which is compatible with several common directory schemas. To support nested group resolution for Active Directory, instead use the following query: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))',
}),
groupdn: attr('string', {
- label: 'Group DN',
helpText:
'LDAP search base for group membership search. This can be the root containing either groups or users. Example: ou=Groups,dc=example,dc=com',
}),
groupattr: attr('string', {
- label: 'Group Attribute',
- defaultValue: 'cn',
-
helpText:
'LDAP attribute to follow on objects returned by groupfilter in order to enumerate user group membership. Examples: for groupfilter queries returning group objects, use: cn. For queries returning user objects, use: memberOf. The default is cn.',
}),
useTokenGroups: attr('boolean', {
- defaultValue: false,
- label: 'Use Token Groups',
helpText:
'Use the Active Directory tokenGroups constructed attribute to find the group memberships. This returns all security groups for the user, including nested groups. In an Active Directory environment with a large number of groups this method offers increased performance. Selecting this will cause Group DN, Attribute, and Filter to be ignored.',
}),
fieldGroups: computed(function() {
- const groups = [
+ let groups = [
{
default: ['url'],
},
@@ -117,6 +70,9 @@ export default AuthConfig.extend({
'Customize Group Membership Search': ['groupfilter', 'groupattr', 'groupdn', 'useTokenGroups'],
},
];
+ if (this.newFields) {
+ groups = combineFieldGroups(groups, this.newFields, []);
+ }
return fieldToAttrs(this, groups);
}),
});
diff --git a/ui/app/models/auth-config/okta.js b/ui/app/models/auth-config/okta.js
index 8796db7c5bf7..ae76e70777b1 100644
--- a/ui/app/models/auth-config/okta.js
+++ b/ui/app/models/auth-config/okta.js
@@ -2,32 +2,31 @@ import { computed } from '@ember/object';
import DS from 'ember-data';
import AuthConfig from '../auth-config';
import fieldToAttrs from 'vault/utils/field-to-attrs';
+import { combineFieldGroups } from 'vault/utils/openapi-to-attrs';
const { attr } = DS;
export default AuthConfig.extend({
+ useOpenAPI: true,
orgName: attr('string', {
- label: 'Organization Name',
helpText: 'Name of the organization to be used in the Okta API',
}),
apiToken: attr('string', {
- label: 'API Token',
helpText:
'Okta API token. This is required to query Okta for user group membership. If this is not supplied only locally configured groups will be enabled.',
}),
baseUrl: attr('string', {
- label: 'Base URL',
helpText:
'If set, will be used as the base domain for API requests. Examples are okta.com, oktapreview.com, and okta-emea.com',
}),
bypassOktaMfa: attr('boolean', {
defaultValue: false,
- label: 'Bypass Okta MFA',
helpText:
"Useful if Vault's built-in MFA mechanisms. Will also cause certain other statuses to be ignored, such as PASSWORD_EXPIRED",
}),
+
fieldGroups: computed(function() {
- const groups = [
+ let groups = [
{
default: ['orgName'],
},
@@ -35,6 +34,10 @@ export default AuthConfig.extend({
Options: ['apiToken', 'baseUrl', 'bypassOktaMfa'],
},
];
+ if (this.newFields) {
+ groups = combineFieldGroups(groups, this.newFields, []);
+ }
+
return fieldToAttrs(this, groups);
}),
});
diff --git a/ui/app/models/auth-config/radius.js b/ui/app/models/auth-config/radius.js
index d88088a3f408..7479c5a8b743 100644
--- a/ui/app/models/auth-config/radius.js
+++ b/ui/app/models/auth-config/radius.js
@@ -1,38 +1,18 @@
import { computed } from '@ember/object';
import DS from 'ember-data';
import AuthConfig from '../auth-config';
+import { combineFieldGroups } from 'vault/utils/openapi-to-attrs';
import fieldToAttrs from 'vault/utils/field-to-attrs';
const { attr } = DS;
export default AuthConfig.extend({
+ useOpenAPI: true,
host: attr('string'),
-
- port: attr('number', {
- defaultValue: 1812,
- }),
-
secret: attr('string'),
- unregisteredUserPolicies: attr('string', {
- label: 'Policies for unregistered users',
- }),
-
- dialTimeout: attr('number', {
- defaultValue: 10,
- }),
-
- nasPort: attr('number', {
- defaultValue: 10,
- label: 'NAS Port',
- }),
-
- nasIdentifier: attr('string', {
- label: 'NAS Identifier',
- }),
-
fieldGroups: computed(function() {
- const groups = [
+ let groups = [
{
default: ['host', 'secret'],
},
@@ -40,6 +20,10 @@ export default AuthConfig.extend({
'RADIUS Options': ['port', 'nasPort', 'nasIdentifier', 'dialTimeout', 'unregisteredUserPolicies'],
},
];
+ if (this.newFields) {
+ groups = combineFieldGroups(groups, this.newFields, []);
+ }
+
return fieldToAttrs(this, groups);
}),
});
diff --git a/ui/app/models/pki-certificate-sign.js b/ui/app/models/pki-certificate-sign.js
index 58bf2c35be76..8fa57d31eb41 100644
--- a/ui/app/models/pki-certificate-sign.js
+++ b/ui/app/models/pki-certificate-sign.js
@@ -2,7 +2,7 @@ import { copy } from 'ember-copy';
import { computed } from '@ember/object';
import DS from 'ember-data';
import Certificate from './pki-certificate';
-
+import { combineFieldGroups } from 'vault/utils/openapi-to-attrs';
const { attr } = DS;
export default Certificate.extend({
@@ -10,7 +10,7 @@ export default Certificate.extend({
readOnly: true,
defaultValue: false,
}),
-
+ useOpenAPI: true,
csr: attr('string', {
label: 'Certificate Signing Request (CSR)',
editType: 'textarea',
@@ -18,11 +18,14 @@ export default Certificate.extend({
fieldGroups: computed('signVerbatim', function() {
const options = { Options: ['altNames', 'ipSans', 'ttl', 'excludeCnFromSans', 'otherSans'] };
- const groups = [
+ let groups = [
{
default: ['csr', 'commonName', 'format', 'signVerbatim'],
},
];
+ if (this.newFields) {
+ groups = combineFieldGroups(groups, this.newFields, []);
+ }
if (this.get('signVerbatim') === false) {
groups.push(options);
}
diff --git a/ui/app/models/role-aws.js b/ui/app/models/role-aws.js
index a5be5a82d7e4..6f740dbc5bc2 100644
--- a/ui/app/models/role-aws.js
+++ b/ui/app/models/role-aws.js
@@ -29,6 +29,7 @@ export default DS.Model.extend({
fieldValue: 'id',
readOnly: true,
}),
+ useOpenAPI: false,
// credentialTypes are for backwards compatibility.
// we use this to populate "credentialType" in
// the serializer. if there is more than one, the
@@ -52,17 +53,15 @@ export default DS.Model.extend({
editType: 'json',
}),
fields: computed('credentialType', function() {
- let keys;
- let credentialType = this.get('credentialType');
+ let credentialType = this.credentialType;
let keysForType = {
iam_user: ['name', 'credentialType', 'policyArns', 'policyDocument'],
assumed_role: ['name', 'credentialType', 'roleArns', 'policyDocument'],
federation_token: ['name', 'credentialType', 'policyDocument'],
};
- keys = keysForType[credentialType];
- return expandAttributeMeta(this, keys);
- }),
+ return expandAttributeMeta(this, keysForType[credentialType]);
+ }),
updatePath: lazyCapabilities(apiPath`${'backend'}/roles/${'id'}`, 'backend', 'id'),
canDelete: alias('updatePath.canDelete'),
canEdit: alias('updatePath.canUpdate'),
diff --git a/ui/app/models/role-pki.js b/ui/app/models/role-pki.js
index 77fe609d7df6..3f4459f55c94 100644
--- a/ui/app/models/role-pki.js
+++ b/ui/app/models/role-pki.js
@@ -3,7 +3,7 @@ import { computed } from '@ember/object';
import DS from 'ember-data';
import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities';
import fieldToAttrs from 'vault/utils/field-to-attrs';
-
+import { combineFieldGroups } from 'vault/utils/openapi-to-attrs';
const { attr } = DS;
export default DS.Model.extend({
@@ -15,101 +15,10 @@ export default DS.Model.extend({
fieldValue: 'id',
readOnly: true,
}),
- keyType: attr('string', {
- possibleValues: ['rsa', 'ec'],
- }),
- ttl: attr({
- label: 'TTL',
- editType: 'ttl',
- }),
- maxTtl: attr({
- label: 'Max TTL',
- editType: 'ttl',
- }),
- allowLocalhost: attr('boolean', {}),
- allowedDomains: attr('string', {}),
- allowBareDomains: attr('boolean', {}),
- allowSubdomains: attr('boolean', {}),
- allowGlobDomains: attr('boolean', {}),
- allowAnyName: attr('boolean', {}),
- enforceHostnames: attr('boolean', {}),
- allowIpSans: attr('boolean', {
- defaultValue: true,
- label: 'Allow clients to request IP Subject Alternative Names (SANs)',
- }),
- allowedOtherSans: attr({
- editType: 'stringArray',
- label: 'Allowed Other SANs',
- }),
- serverFlag: attr('boolean', {
- defaultValue: true,
- }),
- clientFlag: attr('boolean', {
- defaultValue: true,
- }),
- codeSigningFlag: attr('boolean', {}),
- emailProtectionFlag: attr('boolean', {}),
- keyBits: attr('number', {
- defaultValue: 2048,
- }),
- keyUsage: attr('string', {
- defaultValue: 'DigitalSignature,KeyAgreement,KeyEncipherment',
- editType: 'stringArray',
- }),
- extKeyUsageOids: attr({
- label: 'Custom extended key usage OIDs',
- editType: 'stringArray',
- }),
- requireCn: attr('boolean', {
- label: 'Require common name',
- defaultValue: true,
- }),
- useCsrCommonName: attr('boolean', {
- label: 'Use CSR common name',
- defaultValue: true,
- }),
- useCsrSans: attr('boolean', {
- defaultValue: true,
- label: 'Use CSR subject alternative names (SANs)',
- }),
- ou: attr({
- label: 'OU (OrganizationalUnit)',
- editType: 'stringArray',
- }),
- organization: attr({
- editType: 'stringArray',
- }),
- country: attr({
- editType: 'stringArray',
- }),
- locality: attr({
- editType: 'stringArray',
- label: 'Locality/City',
- }),
- province: attr({
- editType: 'stringArray',
- label: 'Province/State',
- }),
- streetAddress: attr({
- editType: 'stringArray',
- }),
- postalCode: attr({
- editType: 'stringArray',
- }),
- generateLease: attr('boolean', {}),
- noStore: attr('boolean', {}),
- policyIdentifiers: attr({
- editType: 'stringArray',
- }),
- basicConstraintsValidForNonCA: attr('boolean', {
- label: 'Mark Basic Constraints valid when issuing non-CA certificates.',
- }),
- notBeforeDuration: attr({
- label: 'Not Before Duration',
- editType: 'ttl',
- defaultValue: '30s',
- }),
-
+ useOpenAPI: true,
+ getHelpUrl: function(backend) {
+ return `/v1/${backend}/roles/example?help=1`;
+ },
updatePath: lazyCapabilities(apiPath`${'backend'}/roles/${'id'}`, 'backend', 'id'),
canDelete: alias('updatePath.canDelete'),
canEdit: alias('updatePath.canUpdate'),
@@ -125,7 +34,7 @@ export default DS.Model.extend({
canSignVerbatim: alias('signVerbatimPath.canUpdate'),
fieldGroups: computed(function() {
- const groups = [
+ let groups = [
{ default: ['name', 'keyType'] },
{
Options: [
@@ -167,10 +76,13 @@ export default DS.Model.extend({
],
},
{
- Advanced: ['generateLease', 'noStore', 'basicConstraintsValidForNonCA', 'policyIdentifiers'],
+ Advanced: ['generateLease', 'noStore', 'basicConstraintsValidForNonCa', 'policyIdentifiers'],
},
];
-
+ let excludedFields = ['extKeyUsage'];
+ if (this.newFields) {
+ groups = combineFieldGroups(groups, this.newFields, excludedFields);
+ }
return fieldToAttrs(this, groups);
}),
});
diff --git a/ui/app/models/role-ssh.js b/ui/app/models/role-ssh.js
index 1bb553ab5d6c..fb1d6e052354 100644
--- a/ui/app/models/role-ssh.js
+++ b/ui/app/models/role-ssh.js
@@ -38,7 +38,12 @@ const CA_FIELDS = [
'allowUserKeyIds',
'keyIdFormat',
];
+
export default DS.Model.extend({
+ useOpenAPI: true,
+ getHelpUrl: function(backend) {
+ return `/v1/${backend}/roles/example?help=1`;
+ },
zeroAddress: attr('boolean', {
readOnly: true,
}),
@@ -46,12 +51,12 @@ export default DS.Model.extend({
readOnly: true,
}),
name: attr('string', {
- label: 'Role name',
+ label: 'Role Name',
fieldValue: 'id',
readOnly: true,
}),
keyType: attr('string', {
- possibleValues: ['ca', 'otp'],
+ possibleValues: ['ca', 'otp'], //overriding the API which also lists 'dynamic' as a type though it is deprecated
}),
adminUser: attr('string', {
helpText: 'Username of the admin user at the remote host',
@@ -68,25 +73,14 @@ export default DS.Model.extend({
'List of domains for which a client can request a certificate (e.g. `example.com`, or `*` to allow all)',
}),
cidrList: attr('string', {
- label: 'CIDR list',
helpText: 'List of CIDR blocks for which this role is applicable',
}),
excludeCidrList: attr('string', {
- label: 'Exclude CIDR list',
helpText: 'List of CIDR blocks that are not accepted by this role',
}),
port: attr('number', {
- defaultValue: 22,
helpText: 'Port number for the SSH connection (default is `22`)',
}),
- ttl: attr({
- label: 'TTL',
- editType: 'ttl',
- }),
- maxTtl: attr({
- label: 'Max TTL',
- editType: 'ttl',
- }),
allowedCriticalOptions: attr('string', {
helpText: 'List of critical options that certificates have when signed',
}),
@@ -114,11 +108,9 @@ export default DS.Model.extend({
'Specifies if host certificates that are requested are allowed to be subdomains of those listed in Allowed Domains',
}),
allowUserKeyIds: attr('boolean', {
- label: 'Allow user key IDs',
helpText: 'Specifies if users can override the key ID for a signed certificate with the "key_id" field',
}),
keyIdFormat: attr('string', {
- label: 'Key ID format',
helpText: 'When supplied, this value specifies a custom format for the key id of a signed certificate',
}),
diff --git a/ui/app/models/ssh-sign.js b/ui/app/models/ssh-sign.js
index 6368b9109330..8d8cb76a86ce 100644
--- a/ui/app/models/ssh-sign.js
+++ b/ui/app/models/ssh-sign.js
@@ -18,7 +18,10 @@ export default DS.Model.extend({
role: attr('object', {
readOnly: true,
}),
- publicKey: attr('string'),
+ publicKey: attr('string', {
+ label: 'Public Key',
+ editType: 'textarea',
+ }),
ttl: attr({
label: 'TTL',
editType: 'ttl',
diff --git a/ui/app/routes/vault/cluster/secrets/backend.js b/ui/app/routes/vault/cluster/secrets/backend.js
index b7964a59b186..9003842e013f 100644
--- a/ui/app/routes/vault/cluster/secrets/backend.js
+++ b/ui/app/routes/vault/cluster/secrets/backend.js
@@ -2,6 +2,7 @@ import { inject as service } from '@ember/service';
import Route from '@ember/routing/route';
export default Route.extend({
flashMessages: service(),
+ oldModel: null,
model(params) {
let { backend } = params;
return this.store
diff --git a/ui/app/routes/vault/cluster/secrets/backend/credentials.js b/ui/app/routes/vault/cluster/secrets/backend/credentials.js
index 655577ca90c0..958f0cb9ccb3 100644
--- a/ui/app/routes/vault/cluster/secrets/backend/credentials.js
+++ b/ui/app/routes/vault/cluster/secrets/backend/credentials.js
@@ -1,15 +1,28 @@
import { resolve } from 'rsvp';
import Route from '@ember/routing/route';
+import { getOwner } from '@ember/application';
+import { inject as service } from '@ember/service';
const SUPPORTED_DYNAMIC_BACKENDS = ['ssh', 'aws', 'pki'];
export default Route.extend({
templateName: 'vault/cluster/secrets/backend/credentials',
+ pathHelp: service('path-help'),
backendModel() {
return this.modelFor('vault.cluster.secrets.backend');
},
+ beforeModel() {
+ const { backend } = this.paramsFor('vault.cluster.secrets.backend');
+ if (backend != 'ssh') {
+ return;
+ }
+ let modelType = 'ssh-otp-credential';
+ let owner = getOwner(this);
+ return this.pathHelp.getNewModel(modelType, backend, owner);
+ },
+
model(params) {
let role = params.secret;
let backendModel = this.backendModel();
diff --git a/ui/app/routes/vault/cluster/secrets/backend/list.js b/ui/app/routes/vault/cluster/secrets/backend/list.js
index 3f1a876fd368..cdf5e6ac1f86 100644
--- a/ui/app/routes/vault/cluster/secrets/backend/list.js
+++ b/ui/app/routes/vault/cluster/secrets/backend/list.js
@@ -1,7 +1,9 @@
import { set } from '@ember/object';
import { hash, all } from 'rsvp';
import Route from '@ember/routing/route';
+import { getOwner } from '@ember/application';
import { supportedSecretBackends } from 'vault/helpers/supported-secret-backends';
+import { inject as service } from '@ember/service';
const SUPPORTED_BACKENDS = supportedSecretBackends();
@@ -19,24 +21,29 @@ export default Route.extend({
},
templateName: 'vault/cluster/secrets/backend/list',
+ pathHelp: service('path-help'),
beforeModel() {
- let { backend } = this.paramsFor('vault.cluster.secrets.backend');
+ let owner = getOwner(this);
let { secret } = this.paramsFor(this.routeName);
- let backendModel = this.store.peekRecord('secret-engine', backend);
- let type = backendModel && backendModel.get('engineType');
+ let { backend, tab } = this.paramsFor('vault.cluster.secrets.backend');
+ let secretEngine = this.store.peekRecord('secret-engine', backend);
+ let type = secretEngine && secretEngine.get('engineType');
if (!type || !SUPPORTED_BACKENDS.includes(type)) {
return this.transitionTo('vault.cluster.secrets');
}
if (this.routeName === 'vault.cluster.secrets.backend.list' && !secret.endsWith('/')) {
return this.replaceWith('vault.cluster.secrets.backend.list', secret + '/');
}
- this.store.unloadAll('capabilities');
+ let modelType = this.getModelType(backend, tab);
+ return this.pathHelp.getNewModel(modelType, owner, backend).then(() => {
+ this.store.unloadAll('capabilities');
+ });
},
getModelType(backend, tab) {
- let backendModel = this.store.peekRecord('secret-engine', backend);
- let type = backendModel.get('engineType');
+ let secretEngine = this.store.peekRecord('secret-engine', backend);
+ let type = secretEngine.get('engineType');
let types = {
transit: 'transit-key',
ssh: 'role-ssh',
@@ -44,8 +51,8 @@ export default Route.extend({
pki: tab === 'certs' ? 'pki-certificate' : 'role-pki',
// secret or secret-v2
cubbyhole: 'secret',
- kv: backendModel.get('modelTypeForKV'),
- generic: backendModel.get('modelTypeForKV'),
+ kv: secretEngine.get('modelTypeForKV'),
+ generic: secretEngine.get('modelTypeForKV'),
};
return types[type];
},
diff --git a/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js b/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js
index 943ad2a54a0a..f87573a5dee1 100644
--- a/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js
+++ b/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js
@@ -1,11 +1,14 @@
import { set } from '@ember/object';
import { hash, resolve } from 'rsvp';
+import { inject as service } from '@ember/service';
+import DS from 'ember-data';
import Route from '@ember/routing/route';
import utils from 'vault/lib/key-utils';
+import { getOwner } from '@ember/application';
import UnloadModelRoute from 'vault/mixins/unload-model-route';
-import DS from 'ember-data';
export default Route.extend(UnloadModelRoute, {
+ pathHelp: service('path-help'),
capabilities(secret) {
const { backend } = this.paramsFor('vault.cluster.secrets.backend');
let backendModel = this.modelFor('vault.cluster.secrets.backend');
@@ -35,15 +38,27 @@ export default Route.extend(UnloadModelRoute, {
// perhaps in the future we could recurse _for_ users, but for now, just kick them
// back to the list
const { secret } = this.paramsFor(this.routeName);
- const parentKey = utils.parentKeyForKey(secret);
- const mode = this.routeName.split('.').pop();
- if (mode === 'edit' && utils.keyIsFolder(secret)) {
- if (parentKey) {
- return this.transitionTo('vault.cluster.secrets.backend.list', parentKey);
- } else {
- return this.transitionTo('vault.cluster.secrets.backend.list-root');
+ return this.buildModel(secret).then(() => {
+ const parentKey = utils.parentKeyForKey(secret);
+ const mode = this.routeName.split('.').pop();
+ if (mode === 'edit' && utils.keyIsFolder(secret)) {
+ if (parentKey) {
+ return this.transitionTo('vault.cluster.secrets.backend.list', parentKey);
+ } else {
+ return this.transitionTo('vault.cluster.secrets.backend.list-root');
+ }
}
+ });
+ },
+
+ buildModel(secret) {
+ const { backend } = this.paramsFor('vault.cluster.secrets.backend');
+ let modelType = this.modelType(backend, secret);
+ if (['secret', 'secret-v2'].includes(modelType)) {
+ return resolve();
}
+ let owner = getOwner(this);
+ return this.pathHelp.getNewModel(modelType, owner, backend);
},
modelType(backend, secret) {
diff --git a/ui/app/routes/vault/cluster/secrets/backend/sign.js b/ui/app/routes/vault/cluster/secrets/backend/sign.js
index 8c33298b01fc..c187a089d183 100644
--- a/ui/app/routes/vault/cluster/secrets/backend/sign.js
+++ b/ui/app/routes/vault/cluster/secrets/backend/sign.js
@@ -14,6 +14,10 @@ export default Route.extend(UnloadModel, {
};
},
+ pathForType() {
+ return 'sign';
+ },
+
model(params) {
const role = params.secret;
const backendModel = this.backendModel();
diff --git a/ui/app/routes/vault/cluster/settings/auth/configure/section.js b/ui/app/routes/vault/cluster/settings/auth/configure/section.js
index cc7c358d0992..6ee745bebf45 100644
--- a/ui/app/routes/vault/cluster/settings/auth/configure/section.js
+++ b/ui/app/routes/vault/cluster/settings/auth/configure/section.js
@@ -4,10 +4,12 @@ import Route from '@ember/routing/route';
import RSVP from 'rsvp';
import DS from 'ember-data';
import UnloadModelRoute from 'vault/mixins/unload-model-route';
+import { getOwner } from '@ember/application';
export default Route.extend(UnloadModelRoute, {
modelPath: 'model.model',
- wizard: service(),
+ pathHelp: service('path-help'),
+
modelType(backendType, section) {
const MODELS = {
'aws-client': 'auth-config/aws/client',
@@ -25,15 +27,22 @@ export default Route.extend(UnloadModelRoute, {
return MODELS[`${backendType}-${section}`];
},
+ beforeModel() {
+ const { section_name } = this.paramsFor(this.routeName);
+ if (section_name === 'options') {
+ return;
+ }
+ const { method } = this.paramsFor('vault.cluster.settings.auth.configure');
+ const backend = this.modelFor('vault.cluster.settings.auth.configure');
+ const modelType = this.modelType(backend.type, section_name);
+ let owner = getOwner(this);
+ return this.pathHelp.getNewModel(modelType, owner, method);
+ },
+
model(params) {
const backend = this.modelFor('vault.cluster.settings.auth.configure');
const { section_name: section } = params;
if (section === 'options') {
- this.get('wizard').transitionFeatureMachine(
- this.get('wizard.featureState'),
- 'EDIT',
- backend.get('type')
- );
return RSVP.hash({
model: backend,
section,
@@ -47,11 +56,6 @@ export default Route.extend(UnloadModelRoute, {
}
const model = this.store.peekRecord(modelType, backend.id);
if (model) {
- this.get('wizard').transitionFeatureMachine(
- this.get('wizard.featureState'),
- 'EDIT',
- backend.get('type')
- );
return RSVP.hash({
model,
section,
@@ -60,11 +64,6 @@ export default Route.extend(UnloadModelRoute, {
return this.store
.findRecord(modelType, backend.id)
.then(config => {
- this.get('wizard').transitionFeatureMachine(
- this.get('wizard.featureState'),
- 'EDIT',
- backend.get('type')
- );
config.set('backend', backend);
return RSVP.hash({
model: config,
diff --git a/ui/app/services/path-help.js b/ui/app/services/path-help.js
new file mode 100644
index 000000000000..e7fa3883b2fd
--- /dev/null
+++ b/ui/app/services/path-help.js
@@ -0,0 +1,59 @@
+/*
+ This service is used to pull an OpenAPI document describing the
+ shape of data at a specific path to hydrate a model with attrs it
+ has less (or no) information about.
+*/
+import Service from '@ember/service';
+
+import { getOwner } from '@ember/application';
+import { expandOpenApiProps, combineAttributes } from 'vault/utils/openapi-to-attrs';
+import { resolve } from 'rsvp';
+
+export function sanitizePath(path) {
+ //remove whitespace + remove trailing and leading slashes
+ return path.trim().replace(/^\/+|\/+$/g, '');
+}
+
+export default Service.extend({
+ attrs: null,
+ ajax(url, options = {}) {
+ let appAdapter = getOwner(this).lookup(`adapter:application`);
+ let { data } = options;
+ return appAdapter.ajax(url, 'GET', {
+ data,
+ });
+ },
+
+ //Makes a call to grab the OpenAPI document.
+ //Returns relevant information from OpenAPI
+ //as determined by the expandOpenApiProps util
+ getProps(helpUrl, backend) {
+ return this.ajax(helpUrl, backend).then(help => {
+ let path = Object.keys(help.openapi.paths)[0];
+ let props = help.openapi.paths[path].post.requestBody.content['application/json'].schema.properties;
+ return expandOpenApiProps(props);
+ });
+ },
+
+ getNewModel(modelType, owner, backend) {
+ let name = `model:${modelType}`;
+ let newModel = owner.factoryFor(name).class;
+ if (newModel.merged || newModel.prototype.useOpenAPI !== true) {
+ return resolve();
+ }
+ let helpUrl = newModel.prototype.getHelpUrl(backend);
+
+ return this.getProps(helpUrl, backend).then(props => {
+ if (owner.hasRegistration(name) && !newModel.merged) {
+ let { attrs, newFields } = combineAttributes(newModel.attributes, props);
+ newModel = newModel.extend(attrs, { newFields });
+ } else {
+ //generate a whole new model
+ }
+
+ newModel.reopenClass({ merged: true });
+ owner.unregister(name);
+ owner.register(name, newModel);
+ });
+ },
+});
diff --git a/ui/app/templates/components/auth-config-form/config.hbs b/ui/app/templates/components/auth-config-form/config.hbs
index 769d98251295..4f9d55fd6079 100644
--- a/ui/app/templates/components/auth-config-form/config.hbs
+++ b/ui/app/templates/components/auth-config-form/config.hbs
@@ -11,8 +11,13 @@
{{/if}}
-
+
Save
-
+
\ No newline at end of file
diff --git a/ui/app/templates/components/auth-config-form/options.hbs b/ui/app/templates/components/auth-config-form/options.hbs
index 3c7cc9b0bb84..b7630d2856e3 100644
--- a/ui/app/templates/components/auth-config-form/options.hbs
+++ b/ui/app/templates/components/auth-config-form/options.hbs
@@ -7,8 +7,13 @@
{{/each}}
-
+
Update Options
-
+
\ No newline at end of file
diff --git a/ui/app/templates/components/form-field.hbs b/ui/app/templates/components/form-field.hbs
index c84e51cb77e6..f2d363158fa7 100644
--- a/ui/app/templates/components/form-field.hbs
+++ b/ui/app/templates/components/form-field.hbs
@@ -1,7 +1,19 @@
{{#unless
(or
- (and attr.options.editType (not-eq attr.options.editType "textarea"))
(eq attr.type "boolean")
+ (contains
+ attr.options.editType
+ (array
+ "boolean"
+ "searchSelect"
+ "mountAccessor"
+ "kv"
+ "file"
+ "ttl"
+ "stringArray"
+ "json"
+ )
+ )
)
}}
@@ -114,6 +126,7 @@
}}
{{else if (eq attr.options.editType "stringArray")}}
{{string-list
+ data-test-input=attr.name
label=labelString
warning=attr.options.warning
helpText=attr.options.helpText
@@ -124,9 +137,10 @@
-{{else if (or (eq attr.type 'number') (eq attr.type 'string'))}}
+ @allowCopy="true"
+ />
+
+{{else if (or (eq attr.type "number") (eq attr.type "string"))}}
{{#if (eq attr.options.editType "textarea")}}
-
- {{#if showConfig}}
- {{#with (find-by 'type' mountModel.type mountTypes) as |typeInfo|}}
-
+
+ {{#if showEnable}}
+ {{#with (find-by "type" mountModel.type mountTypes) as |typeInfo|}}
+
+
{{#if (eq mountType "auth")}}
- Enable {{typeInfo.displayName}} authentication method
+ {{concat "Enable " typeInfo.displayName " authentication method"}}
{{else}}
- Enable {{typeInfo.displayName}} secrets engine
+ {{concat "Enable " typeInfo.displayName "secrets engine"}}
{{/if}}
{{/with}}
+ {{else if (eq mountType "auth")}}
+ Enable an authentication method
{{else}}
- {{#if (eq mountType "auth")}}
- Enable an authentication method
- {{else}}
- Enable a secrets engine
- {{/if}}
+ Enable a secrets engine
{{/if}}
-
+
+
{{message-error model=mountModel}}
- {{#if showConfig}}
- {{form-field-groups model=mountModel onChange=(action "onTypeChange") renderGroup="default"}}
- {{#if mountModel.authConfigs.firstObject}}
- {{form-field-groups model=mountModel.authConfigs.firstObject}}
- {{/if}}
- {{form-field-groups model=mountModel onChange=(action "onTypeChange") renderGroup="Method Options"}}
+ {{#if showEnable}}
+ {{form-field-groups
+ model=mountModel
+ onChange=(action "onTypeChange")
+ renderGroup="default"
+ }}
+ {{form-field-groups
+ model=mountModel
+ onChange=(action "onTypeChange")
+ renderGroup="Method Options"
+ }}
{{else}}
{{#each (array "generic" "cloud" "infra") as |category|}}
- {{#each (filter-by "category" category mountTypes) as |type|}}
-
-
- {{type.displayName}}
-
-
-
- {{/each}}
+ {{#each (filter-by "category" category mountTypes) as |type|}}
+
+
+
+ {{type.displayName}}
+
+
+
+
+ {{/each}}
{{/each}}
{{/if}}
- {{#if showConfig}}
-
-
- {{#if (eq mountType "auth")}}
- Enable Method
- {{else}}
- Enable Engine
- {{/if}}
-
-
-
-
- Back
-
-
+ {{#if showEnable}}
+
+
+ {{#if (eq mountType "auth")}}
+ Enable Method
+ {{else}}
+ Enable Engine
+ {{/if}}
+
+
+
+
+ Back
+
+
{{else}}
+ >
Next
{{/if}}
-
+
\ No newline at end of file
diff --git a/ui/app/templates/components/string-list.hbs b/ui/app/templates/components/string-list.hbs
index 43a7a73b3d1d..79fcde0c6124 100644
--- a/ui/app/templates/components/string-list.hbs
+++ b/ui/app/templates/components/string-list.hbs
@@ -2,17 +2,12 @@
{{label}}
{{#if helpText}}
- {{#info-tooltip}}
- {{helpText}}
- {{/info-tooltip}}
+ {{#info-tooltip}}{{helpText}}{{/info-tooltip}}
{{/if}}
{{/if}}
{{#if warning}}
-
+
{{/if}}
{{#each inputList as |data index|}}
@@ -30,14 +25,29 @@
{{#if (eq (inc index) inputList.length)}}
-
+
Add
{{else}}
-
- {{i-con size=22 glyph='trash-a' excludeIconClass=true class="is-large has-text-grey-light"}}
+
+ {{i-con
+ size=22
+ glyph="trash-a"
+ excludeIconClass=true
+ class="is-large has-text-grey-light"
+ }}
{{/if}}
-{{/each}}
+{{/each}}
\ No newline at end of file
diff --git a/ui/app/templates/components/wizard/auth-config.hbs b/ui/app/templates/components/wizard/auth-config.hbs
new file mode 100644
index 000000000000..0ec32b737180
--- /dev/null
+++ b/ui/app/templates/components/wizard/auth-config.hbs
@@ -0,0 +1,10 @@
+
+
+ You can update your new auth method configuration here.
+
+
\ No newline at end of file
diff --git a/ui/app/templates/components/wizard/auth-enable.hbs b/ui/app/templates/components/wizard/auth-enable.hbs
index df732ecbd1b3..087119bb9eb3 100644
--- a/ui/app/templates/components/wizard/auth-enable.hbs
+++ b/ui/app/templates/components/wizard/auth-enable.hbs
@@ -2,9 +2,9 @@
@headerText="Entering Auth Method details"
@docText="Docs: Authentication Methods"
@docPath="/docs/auth/index.html"
- @instructions='Customize your new method and click "Enable Method".'
+ @instructions="Name your method and click 'Enable Method'."
>
- Great! Now you can customize this method with a name and description that makes sense for your team, and fill out any options that are specific to this method.
+ Great! Now you can customize this method with a name and fill out general configuration under "Method Options".
-
+
\ No newline at end of file
diff --git a/ui/app/templates/partials/form-field-from-model.hbs b/ui/app/templates/partials/form-field-from-model.hbs
index 43acae746a43..ca7e49df25ab 100644
--- a/ui/app/templates/partials/form-field-from-model.hbs
+++ b/ui/app/templates/partials/form-field-from-model.hbs
@@ -1,23 +1,38 @@
- {{#unless (or attr.options.editType (eq attr.type 'boolean'))}}
+ {{#unless
+ (or
+ (contains
+ attr.options.editType
+ (array
+ "boolean"
+ "searchSelect"
+ "mountAccessor"
+ "kv"
+ "file"
+ "ttl"
+ "stringArray"
+ "json"
+ )
+ )
+ (eq attr.type "boolean")
+ )
+ }}
{{capitalize (or attr.options.label (humanize (dasherize attr.name)))}}
{{#if attr.options.helpText}}
- {{#info-tooltip}}
- {{attr.options.helpText}}
- {{/info-tooltip}}
+ {{#info-tooltip}}{{attr.options.helpText}}{{/info-tooltip}}
{{/if}}
{{/unless}}
{{#if attr.options.possibleValues}}
-
+
+ >
{{#each attr.options.possibleValues as |val|}}
{{val}}
@@ -26,34 +41,48 @@
- {{else if (eq attr.options.editType 'ttl')}}
- {{ttl-picker initialValue=(or (get model attr.name) attr.options.defaultValue) labelText=(if attr.options.label attr.options.label (humanize (dasherize attr.name))) setDefaultValue=false onChange=(action (mut (get model attr.name)))}}
- {{else if (or (eq attr.type 'number') (eq attr.type 'string'))}}
+ {{else if (eq attr.options.editType "ttl")}}
+ {{ttl-picker
+ initialValue=(or (get model attr.name) attr.options.defaultValue)
+ labelText=(if
+ attr.options.label attr.options.label (humanize (dasherize attr.name))
+ )
+ setDefaultValue=false
+ onChange=(action (mut (get model attr.name)))
+ }}
+ {{else if (or (eq attr.type "number") (eq attr.type "string"))}}
- {{input id=attr.name value=(get model (or attr.options.fieldValue attr.name)) class="input" data-test-input=attr.name}}
+ {{input
+ id=attr.name
+ value=(get model (or attr.options.fieldValue attr.name))
+ class="input"
+ data-test-input=attr.name
+ }}
- {{else if (eq attr.type 'boolean')}}
+ {{else if (eq attr.type "boolean")}}
-
-
- {{capitalize (or attr.options.label (humanize (dasherize attr.name)))}}
- {{#if attr.options.helpText}}
- {{#info-tooltip}}
- {{attr.options.helpText}}
- {{/info-tooltip}}
- {{/if}}
-
-
- {{else if (eq attr.type 'object')}}
- {{json-editor
- value=(if (get model attr.name) (stringify (get model attr.name)) emptyData)
- valueUpdated=(action "codemirrorUpdated" attr.name)
- }}
- {{/if}}
-
+
+
+
+ {{capitalize (or attr.options.label (humanize (dasherize attr.name)))}}
+ {{#if attr.options.helpText}}
+ {{#info-tooltip}}{{attr.options.helpText}}{{/info-tooltip}}
+ {{/if}}
+
+
+ {{else if (eq attr.type "object")}}
+ {{json-editor
+ value=(if
+ (get model attr.name) (stringify (get model attr.name)) emptyData
+ )
+ valueUpdated=(action "codemirrorUpdated" attr.name)
+ }}
+ {{/if}}
+
\ No newline at end of file
diff --git a/ui/app/templates/vault/cluster/settings/auth/configure/section.hbs b/ui/app/templates/vault/cluster/settings/auth/configure/section.hbs
index 9c2c02d0c60c..d26541cbbb5d 100644
--- a/ui/app/templates/vault/cluster/settings/auth/configure/section.hbs
+++ b/ui/app/templates/vault/cluster/settings/auth/configure/section.hbs
@@ -1,5 +1,5 @@
-{{#if (eq model.section "options") }}
+{{#if (eq model.section "options")}}
{{auth-config-form/options model.model}}
{{else}}
{{auth-config-form/config model.model}}
-{{/if}}
+{{/if}}
\ No newline at end of file
diff --git a/ui/app/templates/vault/cluster/settings/auth/enable.hbs b/ui/app/templates/vault/cluster/settings/auth/enable.hbs
index 4f459ae77890..086bf983f8b8 100644
--- a/ui/app/templates/vault/cluster/settings/auth/enable.hbs
+++ b/ui/app/templates/vault/cluster/settings/auth/enable.hbs
@@ -1,4 +1 @@
-
+
\ No newline at end of file
diff --git a/ui/app/utils/openapi-to-attrs.js b/ui/app/utils/openapi-to-attrs.js
new file mode 100644
index 000000000000..e31b3f31bc49
--- /dev/null
+++ b/ui/app/utils/openapi-to-attrs.js
@@ -0,0 +1,89 @@
+import DS from 'ember-data';
+const { attr } = DS;
+import { assign } from '@ember/polyfills';
+import { isEmpty } from '@ember/utils';
+
+export const expandOpenApiProps = function(props) {
+ let attrs = {};
+ // expand all attributes
+ for (let prop in props) {
+ let details = props[prop];
+ if (details.deprecated === true) {
+ continue;
+ }
+ if (details.type === 'integer') {
+ details.type = 'number';
+ }
+ let editType = details.type;
+ if (details.format === 'seconds') {
+ editType = 'ttl';
+ } else if (details.items) {
+ editType = details.items.type + details.type.capitalize();
+ }
+ attrs[prop.camelize()] = {
+ editType: editType,
+ type: details.type,
+ };
+ if (details['x-vault-displayName']) {
+ attrs[prop.camelize()].label = details['x-vault-displayName'];
+ }
+ if (details['enum']) {
+ attrs[prop.camelize()].possibleValues = details['enum'];
+ }
+ if (details['x-vault-displayValue']) {
+ attrs[prop.camelize()].defaultValue = details['x-vault-displayValue'];
+ } else {
+ if (!isEmpty(details['default'])) {
+ attrs[prop.camelize()].defaultValue = details['default'];
+ }
+ }
+ }
+ return attrs;
+};
+
+export const combineAttributes = function(oldAttrs, newProps) {
+ let newAttrs = {};
+ let newFields = [];
+ oldAttrs.forEach(function(value, name) {
+ if (newProps[name]) {
+ newAttrs[name] = attr(newProps[name].type, assign({}, newProps[name], value.options));
+ } else {
+ newAttrs[name] = attr(value.type, value.options);
+ }
+ });
+ for (let prop in newProps) {
+ if (newAttrs[prop]) {
+ continue;
+ } else {
+ newAttrs[prop] = attr(newProps[prop].type, newProps[prop]);
+ newFields.push(prop);
+ }
+ }
+ return { attrs: newAttrs, newFields };
+};
+
+export const combineFields = function(currentFields, newFields, excludedFields) {
+ let otherFields = newFields.filter(field => {
+ return !currentFields.includes(field) && !excludedFields.includes(field);
+ });
+ if (otherFields.length) {
+ currentFields = currentFields.concat(otherFields);
+ }
+ return currentFields;
+};
+
+export const combineFieldGroups = function(currentGroups, newFields, excludedFields) {
+ let allFields = [];
+ for (let group of currentGroups) {
+ let fieldName = Object.keys(group)[0];
+ allFields = allFields.concat(group[fieldName]);
+ }
+ let otherFields = newFields.filter(field => {
+ return !allFields.includes(field) && !excludedFields.includes(field);
+ });
+ if (otherFields.length) {
+ currentGroups[0].default = currentGroups[0].default.concat(otherFields);
+ }
+
+ return currentGroups;
+};
diff --git a/ui/tests/acceptance/settings/auth/configure/section-test.js b/ui/tests/acceptance/settings/auth/configure/section-test.js
index 0216a21b561f..8ad25718f662 100644
--- a/ui/tests/acceptance/settings/auth/configure/section-test.js
+++ b/ui/tests/acceptance/settings/auth/configure/section-test.js
@@ -33,7 +33,7 @@ module('Acceptance | settings/auth/configure/section', function(hooks) {
await withFlash(page.save(), () => {
assert.equal(
page.flash.latestMessage,
- `The configuration options were saved successfully.`,
+ `The configuration was saved successfully.`,
'success flash shows'
);
});
diff --git a/ui/tests/acceptance/settings/auth/enable-test.js b/ui/tests/acceptance/settings/auth/enable-test.js
index 3e24f010bedb..aea0cd267887 100644
--- a/ui/tests/acceptance/settings/auth/enable-test.js
+++ b/ui/tests/acceptance/settings/auth/enable-test.js
@@ -28,9 +28,11 @@ module('Acceptance | settings/auth/enable', function(hooks) {
});
assert.equal(
currentRouteName(),
- 'vault.cluster.access.methods',
- 'redirects to the auth backend list page'
+ 'vault.cluster.settings.auth.configure.section',
+ 'redirects to the auth config page'
);
+
+ await listPage.visit();
assert.ok(listPage.findLinkById(path), 'mount is present in the list');
});
});
diff --git a/ui/tests/integration/components/auth-config-form/options-test.js b/ui/tests/integration/components/auth-config-form/options-test.js
index 77459e98f4ff..f2200c14e82d 100644
--- a/ui/tests/integration/components/auth-config-form/options-test.js
+++ b/ui/tests/integration/components/auth-config-form/options-test.js
@@ -5,17 +5,33 @@ import { setupRenderingTest } from 'ember-qunit';
import { render, settled } from '@ember/test-helpers';
import hbs from 'htmlbars-inline-precompile';
import sinon from 'sinon';
+import Service from '@ember/service';
import { create } from 'ember-cli-page-object';
import authConfigForm from 'vault/tests/pages/components/auth-config-form/options';
const component = create(authConfigForm);
+const routerService = Service.extend({
+ transitionTo() {
+ return {
+ followRedirects() {
+ return resolve();
+ },
+ };
+ },
+ replaceWith() {
+ return resolve();
+ },
+});
module('Integration | Component | auth-config-form options', function(hooks) {
setupRenderingTest(hooks);
hooks.beforeEach(function() {
this.owner.lookup('service:flash-messages').registerTypes(['success']);
+ this.owner.register('service:router', routerService);
+ this.router = this.owner.lookup('service:router');
+
component.setContext(this);
});
diff --git a/ui/tests/integration/components/auth-form-test.js b/ui/tests/integration/components/auth-form-test.js
index 70f84c788db5..2330fbd7d5c8 100644
--- a/ui/tests/integration/components/auth-form-test.js
+++ b/ui/tests/integration/components/auth-form-test.js
@@ -36,9 +36,6 @@ const routerService = Service.extend({
},
};
},
- replaceWith() {
- return resolve();
- },
});
module('Integration | Component | auth form', function(hooks) {
diff --git a/ui/tests/integration/components/mount-backend-form-test.js b/ui/tests/integration/components/mount-backend-form-test.js
index c717a2f512c1..a01798b3a96c 100644
--- a/ui/tests/integration/components/mount-backend-form-test.js
+++ b/ui/tests/integration/components/mount-backend-form-test.js
@@ -71,59 +71,4 @@ module('Integration | Component | mount backend form', function(hooks) {
assert.ok(enableRequest, 'it calls enable on an auth method');
assert.ok(spy.calledOnce, 'calls the passed success method');
});
-
- test('it calls the correct jwt config', async function(assert) {
- this.server.post('/v1/sys/auth/jwt', () => {
- return [204, { 'Content-Type': 'application/json' }];
- });
-
- this.server.post('/v1/auth/jwt/config', () => {
- return [
- 400,
- { 'Content-Type': 'application/json' },
- JSON.stringify({ errors: ['there was an error'] }),
- ];
- });
- await render(hbs` `);
-
- await component.selectType('jwt');
- await component.next();
- await component.fillIn('oidcDiscoveryUrl', 'host');
- component.submit();
-
- later(() => run.cancelTimers(), 50);
- await settled();
- let configRequest = this.server.handledRequests.findBy('url', '/v1/auth/jwt/config');
- assert.ok(configRequest, 'it calls the config url');
- });
-
- test('it calls mount config error', async function(assert) {
- this.server.post('/v1/sys/auth/bar', () => {
- return [204, { 'Content-Type': 'application/json' }];
- });
- this.server.post('/v1/auth/bar/config', () => {
- return [
- 400,
- { 'Content-Type': 'application/json' },
- JSON.stringify({ errors: ['there was an error'] }),
- ];
- });
- const spy = sinon.spy();
- const spy2 = sinon.spy();
- this.set('onMountSuccess', spy);
- this.set('onConfigError', spy2);
- await render(hbs`{{mount-backend-form onMountSuccess=onMountSuccess onConfigError=onConfigError}}`);
-
- await component.selectType('kubernetes');
- await component.next().path('bar');
- // kubernetes requires a host + a cert / pem, so only filling the host will error
- await component.fillIn('kubernetesHost', 'host');
- component.submit();
- later(() => run.cancelTimers(), 50);
- await settled();
- let enableRequest = this.server.handledRequests.findBy('url', '/v1/sys/auth/bar');
- assert.ok(enableRequest, 'it calls enable on an auth method');
- assert.equal(spy.callCount, 0, 'does not call the success method');
- assert.ok(spy2.calledOnce, 'calls the passed error method');
- });
});
diff --git a/ui/tests/pages/secrets/backend/pki/edit-role.js b/ui/tests/pages/secrets/backend/pki/edit-role.js
index 23935be3e26a..76b51e24daac 100644
--- a/ui/tests/pages/secrets/backend/pki/edit-role.js
+++ b/ui/tests/pages/secrets/backend/pki/edit-role.js
@@ -9,7 +9,7 @@ export default create({
toggleOptions: clickable('[data-test-toggle-group="Options"]'),
name: fillable('[data-test-input="name"]'),
allowAnyName: clickable('[data-test-input="allowAnyName"]'),
- allowedDomains: fillable('[data-test-input="allowedDomains"]'),
+ allowedDomains: fillable('[data-test-input="allowedDomains"] input'),
save: clickable('[data-test-role-create]'),
deleteBtn: clickable('[data-test-role-delete] button'),
confirmBtn: clickable('[data-test-confirm-button]'),
diff --git a/ui/tests/unit/machines/auth-machine-test.js b/ui/tests/unit/machines/auth-machine-test.js
index 90b179a03110..d430f78192a2 100644
--- a/ui/tests/unit/machines/auth-machine-test.js
+++ b/ui/tests/unit/machines/auth-machine-test.js
@@ -23,16 +23,16 @@ module('Unit | Machine | auth-machine', function() {
event: 'CONTINUE',
params: null,
expectedResults: {
- value: 'list',
+ value: 'config',
actions: [
- { component: 'wizard/auth-list', level: 'step', type: 'render' },
- { component: 'wizard/mounts-wizard', level: 'feature', type: 'render' },
+ { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' },
+ { type: 'render', level: 'step', component: 'wizard/auth-config' },
],
},
},
{
- currentState: 'list',
- event: 'DETAILS',
+ currentState: 'config',
+ event: 'CONTINUE',
expectedResults: {
value: 'details',
actions: [
diff --git a/ui/tests/unit/utils/openapi-to-attrs-test.js b/ui/tests/unit/utils/openapi-to-attrs-test.js
new file mode 100644
index 000000000000..8b9464a22f44
--- /dev/null
+++ b/ui/tests/unit/utils/openapi-to-attrs-test.js
@@ -0,0 +1,191 @@
+import { expandOpenApiProps, combineAttributes, combineFieldGroups } from 'vault/utils/openapi-to-attrs';
+import { module, test } from 'qunit';
+import DS from 'ember-data';
+const { attr } = DS;
+
+module('Unit | Util | OpenAPI Data Utilities', function() {
+ const OPENAPI_RESPONSE_PROPS = {
+ ttl: {
+ type: 'string',
+ format: 'seconds',
+ 'x-vault-displayName': 'TTL',
+ },
+ 'awesome-people': {
+ type: 'array',
+ items: {
+ type: 'string',
+ },
+ 'x-vault-displayValue': 'Grace Hopper,Lady Ada',
+ },
+ 'favorite-ice-cream': {
+ type: 'string',
+ enum: ['vanilla', 'chocolate', 'strawberry'],
+ },
+ 'default-value': {
+ default: 30,
+ 'x-vault-displayValue': 300,
+ type: 'integer',
+ },
+ default: {
+ default: 30,
+ type: 'integer',
+ },
+ };
+ const EXPANDED_PROPS = {
+ ttl: {
+ editType: 'ttl',
+ type: 'string',
+ label: 'TTL',
+ },
+ awesomePeople: {
+ editType: 'stringArray',
+ type: 'array',
+ defaultValue: 'Grace Hopper,Lady Ada',
+ },
+ favoriteIceCream: {
+ editType: 'string',
+ type: 'string',
+ possibleValues: ['vanilla', 'chocolate', 'strawberry'],
+ },
+ defaultValue: {
+ editType: 'number',
+ type: 'number',
+ defaultValue: 300,
+ },
+ default: {
+ editType: 'number',
+ type: 'number',
+ defaultValue: 30,
+ },
+ };
+
+ const EXISTING_MODEL_ATTRS = [
+ {
+ key: 'name',
+ value: {
+ isAttribute: true,
+ name: 'name',
+ options: {
+ editType: 'string',
+ label: 'Role name',
+ },
+ },
+ },
+ {
+ key: 'awesomePeople',
+ value: {
+ isAttribute: true,
+ name: 'awesomePeople',
+ options: {
+ label: 'People Who Are Awesome',
+ },
+ },
+ },
+ ];
+
+ const COMBINED_ATTRS = {
+ name: attr('string', {
+ editType: 'string',
+ type: 'string',
+ label: 'Role name',
+ }),
+ ttl: attr('string', {
+ editType: 'ttl',
+ type: 'string',
+ label: 'TTL',
+ }),
+ awesomePeople: attr({
+ label: 'People Who Are Awesome',
+ editType: 'stringArray',
+ type: 'array',
+ defaultValue: 'Grace Hopper,Lady Ada',
+ }),
+ favoriteIceCream: attr('string', {
+ type: 'string',
+ editType: 'string',
+ possibleValues: ['vanilla', 'chocolate', 'strawberry'],
+ }),
+ };
+
+ const NEW_FIELDS = ['one', 'two', 'three'];
+
+ test('it creates objects from OpenAPI schema props', function(assert) {
+ const generatedProps = expandOpenApiProps(OPENAPI_RESPONSE_PROPS);
+ for (let propName in EXPANDED_PROPS) {
+ assert.deepEqual(EXPANDED_PROPS[propName], generatedProps[propName], `correctly expands ${propName}`);
+ }
+ });
+
+ test('it combines OpenAPI props with existing model attrs', function(assert) {
+ const combined = combineAttributes(EXISTING_MODEL_ATTRS, EXPANDED_PROPS);
+ for (let propName in EXISTING_MODEL_ATTRS) {
+ assert.deepEqual(COMBINED_ATTRS[propName], combined[propName]);
+ }
+ });
+
+ test('it adds new fields from OpenAPI to fieldGroups except for exclusions', function(assert) {
+ let modelFieldGroups = [
+ { default: ['name', 'awesomePeople'] },
+ {
+ Options: ['ttl'],
+ },
+ ];
+ const excludedFields = ['two'];
+ const expectedGroups = [
+ { default: ['name', 'awesomePeople', 'one', 'three'] },
+ {
+ Options: ['ttl'],
+ },
+ ];
+ const newFieldGroups = combineFieldGroups(modelFieldGroups, NEW_FIELDS, excludedFields);
+ for (let groupName in modelFieldGroups) {
+ assert.deepEqual(
+ newFieldGroups[groupName],
+ expectedGroups[groupName],
+ 'it incorporates all new fields except for those excluded'
+ );
+ }
+ });
+ test('it adds all new fields from OpenAPI to fieldGroups when excludedFields is empty', function(assert) {
+ let modelFieldGroups = [
+ { default: ['name', 'awesomePeople'] },
+ {
+ Options: ['ttl'],
+ },
+ ];
+ const excludedFields = [];
+ const expectedGroups = [
+ { default: ['name', 'awesomePeople', 'one', 'two', 'three'] },
+ {
+ Options: ['ttl'],
+ },
+ ];
+ const nonExcludedFieldGroups = combineFieldGroups(modelFieldGroups, NEW_FIELDS, excludedFields);
+ for (let groupName in modelFieldGroups) {
+ assert.deepEqual(
+ nonExcludedFieldGroups[groupName],
+ expectedGroups[groupName],
+ 'it incorporates all new fields'
+ );
+ }
+ });
+ test('it keeps fields the same when there are no brand new fields from OpenAPI', function(assert) {
+ let modelFieldGroups = [
+ { default: ['name', 'awesomePeople', 'two', 'one', 'three'] },
+ {
+ Options: ['ttl'],
+ },
+ ];
+ const excludedFields = [];
+ const expectedGroups = [
+ { default: ['name', 'awesomePeople', 'two', 'one', 'three'] },
+ {
+ Options: ['ttl'],
+ },
+ ];
+ const fieldGroups = combineFieldGroups(modelFieldGroups, NEW_FIELDS, excludedFields);
+ for (let groupName in modelFieldGroups) {
+ assert.deepEqual(fieldGroups[groupName], expectedGroups[groupName], 'it incorporates all new fields');
+ }
+ });
+});
From 6710bbe2965c3ac6030c25fb6c93739a6690f292 Mon Sep 17 00:00:00 2001
From: Jim Kalafut
Date: Thu, 14 Feb 2019 11:03:26 -0800
Subject: [PATCH 08/31] Update jwt plugin
---
.../vault-plugin-auth-jwt/Gopkg.lock | 26 +++++++++++--------
.../vault-plugin-auth-jwt/Gopkg.toml | 5 ----
.../hashicorp/vault-plugin-auth-jwt/cli.go | 15 +++++------
.../vault-plugin-auth-jwt/path_config.go | 10 -------
.../vault-plugin-auth-jwt/path_login.go | 4 +--
.../vault-plugin-auth-jwt/path_oidc.go | 12 ++++-----
.../vault-plugin-auth-jwt/path_role.go | 2 +-
.../vault-plugin-auth-jwt/path_ui.go | 2 +-
vendor/vendor.json | 8 +++---
9 files changed, 33 insertions(+), 51 deletions(-)
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock
index e686681ccb2d..d56992e8aeb1 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.lock
@@ -101,11 +101,15 @@
version = "v1.0.0"
[[projects]]
- digest = "1:e18a77f3453d129d9a5b4cac4e912b21c2dd2af52a24a8fe6a8c241476ed7b6b"
+ branch = "master"
+ digest = "1:6c04f2be110107d22b82c18826df6f44004983147636e767e3a8bcd0ed228d16"
name = "github.com/hashicorp/go-plugin"
- packages = ["."]
+ packages = [
+ ".",
+ "internal/plugin",
+ ]
pruneopts = "UT"
- revision = "26219a000dd975abd5140a2ddbe415b366498be7"
+ revision = "b838ffee39ce7c7c38226c1413aa4ecae61b4d27"
[[projects]]
digest = "1:d260503602063d71718eb21f85c02133ad5eac894c2a6f0e0546b7dc017dc97e"
@@ -178,7 +182,7 @@
[[projects]]
branch = "master"
- digest = "1:f5bdd7b0d06bfa965cefa9c52af7f556bd079ff4328d67c89f6afdf4be7eabbe"
+ digest = "1:961541c49385b69f1d3ee6087df21d7d9595f98a8f29170e810267180ff6a9fb"
name = "github.com/hashicorp/vault"
packages = [
"api",
@@ -210,7 +214,7 @@
"version",
]
pruneopts = "UT"
- revision = "b16527d791ba46f74a608527b328957618aa0ae6"
+ revision = "5d444354923ab54c8207f8c8820cfe78c1572656"
[[projects]]
branch = "master"
@@ -309,7 +313,7 @@
"pbkdf2",
]
pruneopts = "UT"
- revision = "193df9c0f06f8bb35fba505183eaf0acc0136505"
+ revision = "74369b46fc6756741c016591724fd1cb8e26845f"
[[projects]]
branch = "master"
@@ -326,29 +330,29 @@
"trace",
]
pruneopts = "UT"
- revision = "65e2d4e15006aab9813ff8769e768bbf4bb667a0"
+ revision = "3a22650c66bd7f4fb6d1e8072ffd7b75c8a27898"
[[projects]]
branch = "master"
- digest = "1:e007b54f54cbd4214aa6d97a67d57bc2539991adb4e22ea92c482bbece8de469"
+ digest = "1:c664d4451770ebc9ab63d54bccb9e4f2c6106cde7e566ea02889930b836c7d4c"
name = "golang.org/x/oauth2"
packages = [
".",
"internal",
]
pruneopts = "UT"
- revision = "99b60b757ec124ebb7d6b7e97f153b19c10ce163"
+ revision = "3e8b2be1363542a95c52ea0796d4a40dacfb5b95"
[[projects]]
branch = "master"
- digest = "1:c9e49928119661a681af4037236af47654d6bd421c0af184962c890d0a61e0fb"
+ digest = "1:83552c87ddffa6a9095b7b4a005fcb8d35ff05108246b247a8bbddc78a91db64"
name = "golang.org/x/sys"
packages = [
"cpu",
"unix",
]
pruneopts = "UT"
- revision = "3b5209105503162ded1863c307ac66fec31120dd"
+ revision = "983097b1a8a340cd1cc7df17d735154d89e10b1a"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.toml b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.toml
index 9b1f3229db99..1752d2434178 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.toml
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/Gopkg.toml
@@ -61,11 +61,6 @@
name = "gopkg.in/square/go-jose.v2"
version = "2.1.8"
-# Remove this once https://github.com/hashicorp/go-plugin/pull/97 is merged
-[[override]]
- name = "github.com/hashicorp/go-plugin"
- revision = "26219a000dd975abd5140a2ddbe415b366498be7"
-
[prune]
go-tests = true
unused-packages = true
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go
index 6f50c59d986a..37b1cf7f2c40 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/cli.go
@@ -18,6 +18,8 @@ import (
const defaultMount = "oidc"
const defaultPort = "8300"
+var errorRegex = regexp.MustCompile(`(?s)Errors:.*\* *(.*)`)
+
type CLIHandler struct{}
type loginResp struct {
@@ -44,9 +46,6 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
}
role := m["role"]
- if role == "" {
- return nil, errors.New("a 'role' must be specified")
- }
authURL, err := fetchAuthURL(c, role, mount, port)
if err != nil {
@@ -54,7 +53,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
}
// Set up callback handler
- http.HandleFunc(fmt.Sprintf("/ui/vault/auth/%s/oidc/callback", mount), func(w http.ResponseWriter, req *http.Request) {
+ http.HandleFunc("/oidc/callback", func(w http.ResponseWriter, req *http.Request) {
var response string
query := req.URL.Query()
@@ -101,14 +100,14 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
case s := <-doneCh:
return s.secret, s.err
case <-sigintCh:
- return nil, errors.New("interrupted")
+ return nil, errors.New("Interrupted")
}
}
func fetchAuthURL(c *api.Client, role, mount, port string) (string, error) {
data := map[string]interface{}{
"role": role,
- "redirect_uri": fmt.Sprintf("http://localhost:%s/ui/vault/auth/%s/oidc/callback", port, mount),
+ "redirect_uri": fmt.Sprintf("http://localhost:%s/oidc/callback", port),
}
secret, err := c.Logical().Write(fmt.Sprintf("auth/%s/oidc/auth_url", mount), data)
@@ -157,9 +156,7 @@ func parseError(err error) (string, string) {
summary := "Login error"
detail := ""
- re := regexp.MustCompile(`(?s)Errors:.*\* *(.*)`)
-
- errorParts := re.FindStringSubmatch(err.Error())
+ errorParts := errorRegex.FindStringSubmatch(err.Error())
switch len(errorParts) {
case 0:
summary = ""
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go
index 8c44519ab088..eede23d39e07 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_config.go
@@ -125,7 +125,6 @@ func (b *jwtAuthBackend) pathConfigRead(ctx context.Context, req *logical.Reques
"oidc_discovery_url": config.OIDCDiscoveryURL,
"oidc_discovery_ca_pem": config.OIDCDiscoveryCAPEM,
"oidc_client_id": config.OIDCClientID,
- "oidc_client_secret": config.OIDCClientSecret,
"default_role": config.DefaultRole,
"jwt_validation_pubkeys": config.JWTValidationPubKeys,
"jwt_supported_algs": config.JWTSupportedAlgs,
@@ -174,15 +173,6 @@ func (b *jwtAuthBackend) pathConfigWrite(ctx context.Context, req *logical.Reque
}
}
- case len(config.JWTSupportedAlgs) != 0:
- for _, a := range config.JWTSupportedAlgs {
- switch a {
- case oidc.RS256, oidc.RS384, oidc.RS512, oidc.ES256, oidc.ES384, oidc.ES512, oidc.PS256, oidc.PS384, oidc.PS512:
- default:
- return logical.ErrorResponse(fmt.Sprintf("Invalid supported algorithm: %s", a)), nil
- }
- }
-
default:
return nil, errors.New("unknown condition")
}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_login.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_login.go
index 70d7e943ad30..06ad379e84bb 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_login.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_login.go
@@ -138,7 +138,7 @@ func (b *jwtAuthBackend) pathLogin(ctx context.Context, req *logical.Request, d
}
case config.OIDCDiscoveryURL != "":
- allClaims, err = b.verifyToken(ctx, config, role, token)
+ allClaims, err = b.verifyOIDCToken(ctx, config, role, token)
if err != nil {
return logical.ErrorResponse(err.Error()), nil
}
@@ -203,7 +203,7 @@ func (b *jwtAuthBackend) pathLoginRenew(ctx context.Context, req *logical.Reques
return resp, nil
}
-func (b *jwtAuthBackend) verifyToken(ctx context.Context, config *jwtConfig, role *jwtRole, rawToken string) (map[string]interface{}, error) {
+func (b *jwtAuthBackend) verifyOIDCToken(ctx context.Context, config *jwtConfig, role *jwtRole, rawToken string) (map[string]interface{}, error) {
allClaims := make(map[string]interface{})
provider, err := b.getProvider(ctx, config)
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_oidc.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_oidc.go
index b437ed6e5474..6e70d7230057 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_oidc.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_oidc.go
@@ -17,7 +17,7 @@ import (
var oidcStateTimeout = 10 * time.Minute
-// OIDC error prefixes. This are searched for specifically by the UI, so any
+// OIDC error prefixes. These are searched for specifically by the UI, so any
// changes to them must be aligned with a UI change.
const errLoginFailed = "Vault login failed."
const errNoResponse = "No response from provider."
@@ -126,7 +126,7 @@ func (b *jwtAuthBackend) pathCallback(ctx context.Context, req *logical.Request,
}
// Parse and verify ID Token payload.
- allClaims, err := b.verifyToken(ctx, config, role, rawToken)
+ allClaims, err := b.verifyOIDCToken(ctx, config, role, rawToken)
if err != nil {
return logical.ErrorResponse("%s %s", errTokenVerification, err.Error()), nil
}
@@ -211,9 +211,9 @@ func (b *jwtAuthBackend) authURL(ctx context.Context, req *logical.Request, d *f
roleName := d.Get("role").(string)
if roleName == "" {
roleName = config.DefaultRole
- if roleName == "" {
- return logical.ErrorResponse("missing role"), nil
- }
+ }
+ if roleName == "" {
+ return logical.ErrorResponse("missing role"), nil
}
redirectURI := d.Get("redirect_uri").(string)
@@ -223,12 +223,10 @@ func (b *jwtAuthBackend) authURL(ctx context.Context, req *logical.Request, d *f
role, err := b.role(ctx, req.Storage, roleName)
if err != nil {
- logger.Warn("error loading role", "error", err)
return resp, nil
}
if role == nil || role.RoleType != "oidc" {
- logger.Warn("invalid role type", "role type", role)
return resp, nil
}
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_role.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_role.go
index 6eb701601c4e..87d623fd4902 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_role.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_role.go
@@ -285,7 +285,7 @@ func (b *jwtAuthBackend) pathRoleCreateUpdate(ctx context.Context, req *logical.
roleType := data.Get("role_type").(string)
if roleType == "" {
- roleType = "jwt"
+ roleType = "oidc"
}
if roleType != "jwt" && roleType != "oidc" {
return logical.ErrorResponse("invalid 'role_type': %s", roleType), nil
diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_ui.go b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_ui.go
index 7930f9ef8be5..8cbe42b468a7 100644
--- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_ui.go
+++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/path_ui.go
@@ -1,4 +1,4 @@
-// A throwaway file for super simple testing via a UI
+// A minimal UI for simple testing via a UI without Vault
package jwtauth
import (
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 85025862f438..87ef6184a3a4 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -1409,12 +1409,10 @@
"revisionTime": "2019-02-01T21:54:14Z"
},
{
- "checksumSHA1": "jCtLHj3YAONxCcV6v6kifTrRJwM=",
+ "checksumSHA1": "9OCfD9jnPcOy6y9j+iyNon6tiZk=",
"path": "github.com/hashicorp/vault-plugin-auth-jwt",
- "revision": "6eaab2ed65f29101381ac871ffb06110b871a742",
- "revisionTime": "2019-02-13T00:58:38Z",
- "version": "=oidc-dev",
- "versionExact": "oidc-dev"
+ "revision": "a61556be673093888dc8119ee7c16ee964419f25",
+ "revisionTime": "2019-02-14T18:54:57Z"
},
{
"checksumSHA1": "NfVgV3CmKXGRsXk1sYVgMMRZ5Zc=",
From 8a59b0ca060b8086bbe6f43c1ab1326912be2094 Mon Sep 17 00:00:00 2001
From: Jeff Mitchell
Date: Thu, 14 Feb 2019 11:54:47 -0800
Subject: [PATCH 09/31] Remove deprecated CLI commands (#6112)
---
command/auth.go | 81 +------
command/auth_test.go | 101 --------
command/commands.go | 359 ----------------------------
command/login.go | 47 ----
command/login_test.go | 45 ----
command/main.go | 7 +-
command/operator_generate_root.go | 25 --
command/operator_init.go | 50 ----
command/operator_rekey.go | 70 ------
command/operator_unseal_test.go | 3 -
command/policies_deprecated.go | 57 -----
command/policies_deprecated_test.go | 96 --------
command/token_create.go | 20 --
command/token_renew.go | 13 -
14 files changed, 2 insertions(+), 972 deletions(-)
delete mode 100644 command/policies_deprecated.go
delete mode 100644 command/policies_deprecated_test.go
diff --git a/command/auth.go b/command/auth.go
index 489047815323..3c47b2b889c4 100644
--- a/command/auth.go
+++ b/command/auth.go
@@ -1,9 +1,6 @@
package command
import (
- "flag"
- "io"
- "io/ioutil"
"strings"
"github.com/mitchellh/cli"
@@ -13,10 +10,6 @@ var _ cli.Command = (*AuthCommand)(nil)
type AuthCommand struct {
*BaseCommand
-
- Handlers map[string]LoginHandler
-
- testStdin io.Reader // for tests
}
func (c *AuthCommand) Synopsis() string {
@@ -52,77 +45,5 @@ Usage: vault auth [options] [args]
}
func (c *AuthCommand) Run(args []string) int {
- // If we entered the run method, none of the subcommands picked up. This
- // means the user is still trying to use auth as "vault auth TOKEN" or
- // similar, so direct them to vault login instead.
- //
- // This run command is a bit messy to maintain BC for a bit. In the future,
- // it will just be a tiny function, but for now we have to maintain bc.
- //
- // Deprecation
- // TODO: remove in 0.9.0
-
- if len(args) == 0 {
- return cli.RunResultHelp
- }
-
- // Parse the args for our deprecations and defer to the proper areas.
- for _, arg := range args {
- switch {
- case strings.HasPrefix(arg, "-methods"):
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength(
- "WARNING! The -methods flag is deprecated. Please use "+
- "\"vault auth list\" instead. This flag will be removed in "+
- "Vault 1.1.") + "\n")
- }
- return (&AuthListCommand{
- BaseCommand: &BaseCommand{
- UI: c.UI,
- client: c.client,
- },
- }).Run(nil)
- case strings.HasPrefix(arg, "-method-help"):
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength(
- "WARNING! The -method-help flag is deprecated. Please use "+
- "\"vault auth help\" instead. This flag will be removed in "+
- "Vault 1.1.") + "\n")
- }
- // Parse the args to pull out the method, suppressing any errors because
- // there could be other flags that we don't care about.
- f := flag.NewFlagSet("", flag.ContinueOnError)
- f.Usage = func() {}
- f.SetOutput(ioutil.Discard)
- flagMethod := f.String("method", "", "")
- f.Parse(args)
-
- return (&AuthHelpCommand{
- BaseCommand: &BaseCommand{
- UI: c.UI,
- client: c.client,
- },
- Handlers: c.Handlers,
- }).Run([]string{*flagMethod})
- }
- }
-
- // If we got this far, we have an arg or a series of args that should be
- // passed directly to the new "vault login" command.
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength(
- "WARNING! The \"vault auth ARG\" command is deprecated and is now a "+
- "subcommand for interacting with auth methods. To authenticate "+
- "locally to Vault, use \"vault login\" instead. This backwards "+
- "compatibility will be removed in Vault 1.1.") + "\n")
- }
- return (&LoginCommand{
- BaseCommand: &BaseCommand{
- UI: c.UI,
- client: c.client,
- tokenHelper: c.tokenHelper,
- flagAddress: c.flagAddress,
- },
- Handlers: c.Handlers,
- }).Run(args)
+ return cli.RunResultHelp
}
diff --git a/command/auth_test.go b/command/auth_test.go
index 5ec0cf60d3ee..f0fd5d065d8b 100644
--- a/command/auth_test.go
+++ b/command/auth_test.go
@@ -1,13 +1,10 @@
package command
import (
- "strings"
"testing"
"github.com/mitchellh/cli"
- credToken "github.com/hashicorp/vault/builtin/credential/token"
- credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
"github.com/hashicorp/vault/command/token"
)
@@ -22,110 +19,12 @@ func testAuthCommand(tb testing.TB) (*cli.MockUi, *AuthCommand) {
// Override to our own token helper
tokenHelper: token.NewTestingTokenHelper(),
},
- Handlers: map[string]LoginHandler{
- "token": &credToken.CLIHandler{},
- "userpass": &credUserpass.CLIHandler{},
- },
}
}
func TestAuthCommand_Run(t *testing.T) {
t.Parallel()
- // TODO: remove in 0.9.0
- t.Run("deprecated_methods", func(t *testing.T) {
- t.Parallel()
-
- client, closer := testVaultServer(t)
- defer closer()
-
- ui, cmd := testAuthCommand(t)
- cmd.client = client
-
- // vault auth -methods -> vault auth list
- code := cmd.Run([]string{"-methods"})
- if exp := 0; code != exp {
- t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
- }
- stdout, stderr := ui.OutputWriter.String(), ui.ErrorWriter.String()
-
- if expected := "WARNING!"; !strings.Contains(stderr, expected) {
- t.Errorf("expected %q to contain %q", stderr, expected)
- }
-
- if expected := "token/"; !strings.Contains(stdout, expected) {
- t.Errorf("expected %q to contain %q", stdout, expected)
- }
- })
-
- t.Run("deprecated_method_help", func(t *testing.T) {
- t.Parallel()
-
- client, closer := testVaultServer(t)
- defer closer()
-
- ui, cmd := testAuthCommand(t)
- cmd.client = client
-
- // vault auth -method=foo -method-help -> vault auth help foo
- code := cmd.Run([]string{
- "-method=userpass",
- "-method-help",
- })
- if exp := 0; code != exp {
- t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
- }
- stdout, stderr := ui.OutputWriter.String(), ui.ErrorWriter.String()
-
- if expected := "WARNING!"; !strings.Contains(stderr, expected) {
- t.Errorf("expected %q to contain %q", stderr, expected)
- }
-
- if expected := "vault login"; !strings.Contains(stdout, expected) {
- t.Errorf("expected %q to contain %q", stdout, expected)
- }
- })
-
- t.Run("deprecated_login", func(t *testing.T) {
- t.Parallel()
-
- client, closer := testVaultServer(t)
- defer closer()
-
- if err := client.Sys().EnableAuth("my-auth", "userpass", ""); err != nil {
- t.Fatal(err)
- }
- if _, err := client.Logical().Write("auth/my-auth/users/test", map[string]interface{}{
- "password": "test",
- "policies": "default",
- }); err != nil {
- t.Fatal(err)
- }
-
- ui, cmd := testAuthCommand(t)
- cmd.client = client
-
- // vault auth ARGS -> vault login ARGS
- code := cmd.Run([]string{
- "-method", "userpass",
- "-path", "my-auth",
- "username=test",
- "password=test",
- })
- if exp := 0; code != exp {
- t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
- }
- stdout, stderr := ui.OutputWriter.String(), ui.ErrorWriter.String()
-
- if expected := "WARNING!"; !strings.Contains(stderr, expected) {
- t.Errorf("expected %q to contain %q", stderr, expected)
- }
-
- if expected := "Success! You are now authenticated."; !strings.Contains(stdout, expected) {
- t.Errorf("expected %q to contain %q", stdout, expected)
- }
- })
-
t.Run("no_tabs", func(t *testing.T) {
t.Parallel()
diff --git a/command/commands.go b/command/commands.go
index 7fa711295568..e009d5733f04 100644
--- a/command/commands.go
+++ b/command/commands.go
@@ -1,7 +1,6 @@
package command
import (
- "fmt"
"os"
"os/signal"
"syscall"
@@ -131,43 +130,8 @@ var (
}
)
-// DeprecatedCommand is a command that wraps an existing command and prints a
-// deprecation notice and points the user to the new command. Deprecated
-// commands are always hidden from help output.
-type DeprecatedCommand struct {
- cli.Command
- UI cli.Ui
-
- // Old is the old command name, New is the new command name.
- Old, New string
-}
-
-// Help wraps the embedded Help command and prints a warning about deprecations.
-func (c *DeprecatedCommand) Help() string {
- c.warn()
- return c.Command.Help()
-}
-
-// Run wraps the embedded Run command and prints a warning about deprecation.
-func (c *DeprecatedCommand) Run(args []string) int {
- if Format(c.UI) == "table" {
- c.warn()
- }
- return c.Command.Run(args)
-}
-
-func (c *DeprecatedCommand) warn() {
- c.UI.Warn(wrapAtLength(fmt.Sprintf(
- "WARNING! The \"vault %s\" command is deprecated. Please use \"vault %s\" "+
- "instead. This command will be removed in Vault 1.1.",
- c.Old,
- c.New)))
- c.UI.Warn("")
-}
-
// Commands is the mapping of all the available commands.
var Commands map[string]cli.CommandFactory
-var DeprecatedCommands map[string]cli.CommandFactory
func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) {
loginHandlers := map[string]LoginHandler{
@@ -235,7 +199,6 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) {
"auth": func() (cli.Command, error) {
return &AuthCommand{
BaseCommand: getBaseCommand(),
- Handlers: loginHandlers,
}, nil
},
"auth disable": func() (cli.Command, error) {
@@ -614,328 +577,6 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) {
}, nil
},
}
-
- // Deprecated commands
- //
- // TODO: Remove not before 0.11.0
- DeprecatedCommands = map[string]cli.CommandFactory{
- "audit-disable": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "audit-disable",
- New: "audit disable",
- UI: ui,
- Command: &AuditDisableCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "audit-enable": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "audit-enable",
- New: "audit enable",
- UI: ui,
- Command: &AuditEnableCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "audit-list": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "audit-list",
- New: "audit list",
- UI: ui,
- Command: &AuditListCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "auth-disable": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "auth-disable",
- New: "auth disable",
- UI: ui,
- Command: &AuthDisableCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "auth-enable": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "auth-enable",
- New: "auth enable",
- UI: ui,
- Command: &AuthEnableCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "capabilities": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "capabilities",
- New: "token capabilities",
- UI: ui,
- Command: &TokenCapabilitiesCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "generate-root": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "generate-root",
- New: "operator generate-root",
- UI: ui,
- Command: &OperatorGenerateRootCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "init": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "init",
- New: "operator init",
- UI: ui,
- Command: &OperatorInitCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "key-status": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "key-status",
- New: "operator key-status",
- UI: ui,
- Command: &OperatorKeyStatusCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "renew": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "renew",
- New: "lease renew",
- UI: ui,
- Command: &LeaseRenewCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "revoke": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "revoke",
- New: "lease revoke",
- UI: ui,
- Command: &LeaseRevokeCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "mount": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "mount",
- New: "secrets enable",
- UI: ui,
- Command: &SecretsEnableCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "mount-tune": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "mount-tune",
- New: "secrets tune",
- UI: ui,
- Command: &SecretsTuneCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "mounts": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "mounts",
- New: "secrets list",
- UI: ui,
- Command: &SecretsListCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "policies": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "policies",
- New: "policy read\" or \"vault policy list", // lol
- UI: ui,
- Command: &PoliciesDeprecatedCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "policy-delete": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "policy-delete",
- New: "policy delete",
- UI: ui,
- Command: &PolicyDeleteCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "policy-write": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "policy-write",
- New: "policy write",
- UI: ui,
- Command: &PolicyWriteCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "rekey": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "rekey",
- New: "operator rekey",
- UI: ui,
- Command: &OperatorRekeyCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "remount": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "remount",
- New: "secrets move",
- UI: ui,
- Command: &SecretsMoveCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "rotate": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "rotate",
- New: "operator rotate",
- UI: ui,
- Command: &OperatorRotateCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "seal": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "seal",
- New: "operator seal",
- UI: ui,
- Command: &OperatorSealCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "step-down": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "step-down",
- New: "operator step-down",
- UI: ui,
- Command: &OperatorStepDownCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "token-create": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "token-create",
- New: "token create",
- UI: ui,
- Command: &TokenCreateCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "token-lookup": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "token-lookup",
- New: "token lookup",
- UI: ui,
- Command: &TokenLookupCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "token-renew": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "token-renew",
- New: "token renew",
- UI: ui,
- Command: &TokenRenewCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "token-revoke": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "token-revoke",
- New: "token revoke",
- UI: ui,
- Command: &TokenRevokeCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "unmount": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "unmount",
- New: "secrets disable",
- UI: ui,
- Command: &SecretsDisableCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
-
- "unseal": func() (cli.Command, error) {
- return &DeprecatedCommand{
- Old: "unseal",
- New: "operator unseal",
- UI: ui,
- Command: &OperatorUnsealCommand{
- BaseCommand: getBaseCommand(),
- },
- }, nil
- },
- }
-
- // Add deprecated commands back to the main commands so they parse.
- for k, v := range DeprecatedCommands {
- if _, ok := Commands[k]; ok {
- // Can't deprecate an existing command...
- panic(fmt.Sprintf("command %q defined as deprecated and not at the same time!", k))
- }
- Commands[k] = v
- }
}
// MakeShutdownCh returns a channel that can be used for shutdown
diff --git a/command/login.go b/command/login.go
index d9c2a3f523c3..bafc670c6d29 100644
--- a/command/login.go
+++ b/command/login.go
@@ -28,10 +28,6 @@ type LoginCommand struct {
flagNoPrint bool
flagTokenOnly bool
- // Deprecations
- // TODO: remove in 0.9.0
- flagNoVerify bool
-
testStdin io.Reader // for tests
}
@@ -132,16 +128,6 @@ func (c *LoginCommand) Flags() *FlagSets {
"values will have no affect.",
})
- // Deprecations
- // TODO: remove in 0.9.0
- f.BoolVar(&BoolVar{
- Name: "no-verify",
- Target: &c.flagNoVerify,
- Hidden: true,
- Default: false,
- Usage: "",
- })
-
return set
}
@@ -163,39 +149,6 @@ func (c *LoginCommand) Run(args []string) int {
args = f.Args()
- // Deprecations
- // TODO: remove in 0.10.0
- switch {
- case c.flagNoVerify:
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength(
- "WARNING! The -no-verify flag is deprecated. In the past, Vault " +
- "performed a lookup on a token after authentication. This is no " +
- "longer the case for all auth methods except \"token\". Vault will " +
- "still attempt to perform a lookup when given a token directly " +
- "because that is how it gets the list of policies, ttl, and other " +
- "metadata. To disable this lookup, specify \"lookup=false\" as a " +
- "configuration option to the token auth method, like this:"))
- c.UI.Warn("")
- c.UI.Warn(" $ vault auth token=ABCD lookup=false")
- c.UI.Warn("")
- c.UI.Warn("Or omit the token and Vault will prompt for it:")
- c.UI.Warn("")
- c.UI.Warn(" $ vault auth lookup=false")
- c.UI.Warn(" Token (will be hidden): ...")
- c.UI.Warn("")
- c.UI.Warn(wrapAtLength(
- "If you are not using token authentication, you can safely omit this " +
- "flag. Vault will not perform a lookup after authentication."))
- c.UI.Warn("")
- }
-
- // There's no point in passing this to other auth handlers...
- if c.flagMethod == "token" {
- args = append(args, "lookup=false")
- }
- }
-
// Set the right flags if the user requested token-only - this overrides
// any previously configured values, as documented.
if c.flagTokenOnly {
diff --git a/command/login_test.go b/command/login_test.go
index 42fac3536b3e..c87add5655f1 100644
--- a/command/login_test.go
+++ b/command/login_test.go
@@ -443,51 +443,6 @@ func TestLoginCommand_Run(t *testing.T) {
}
})
- // Deprecations
- // TODO: remove in 0.9.0
- t.Run("deprecated_no_verify", func(t *testing.T) {
- t.Parallel()
-
- client, closer := testVaultServer(t)
- defer closer()
-
- secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Policies: []string{"default"},
- TTL: "30m",
- NumUses: 1,
- })
- if err != nil {
- t.Fatal(err)
- }
- token := secret.Auth.ClientToken
-
- _, cmd := testLoginCommand(t)
- cmd.client = client
-
- code := cmd.Run([]string{
- "-no-verify",
- token,
- })
- if exp := 0; code != exp {
- t.Errorf("expected %d to be %d", code, exp)
- }
-
- lookup, err := client.Auth().Token().Lookup(token)
- if err != nil {
- t.Fatal(err)
- }
-
- // There was 1 use to start, make sure we didn't use it (verifying would
- // use it).
- uses, err := lookup.TokenRemainingUses()
- if err != nil {
- t.Fatal(err)
- }
- if uses != 1 {
- t.Errorf("expected %d to be %d", uses, 1)
- }
- })
-
t.Run("no_tabs", func(t *testing.T) {
t.Parallel()
diff --git a/command/main.go b/command/main.go
index a41e09af5e01..3e695670d1e5 100644
--- a/command/main.go
+++ b/command/main.go
@@ -159,12 +159,7 @@ func RunCustom(args []string, runOpts *RunOptions) int {
initCommands(ui, serverCmdUi, runOpts)
- // Calculate hidden commands from the deprecated ones
- hiddenCommands := make([]string, 0, len(DeprecatedCommands)+1)
- for k := range DeprecatedCommands {
- hiddenCommands = append(hiddenCommands, k)
- }
- hiddenCommands = append(hiddenCommands, "version")
+ hiddenCommands := []string{"version"}
cli := &cli.CLI{
Name: "vault",
diff --git a/command/operator_generate_root.go b/command/operator_generate_root.go
index ac8b94aac82d..d5c332a1e183 100644
--- a/command/operator_generate_root.go
+++ b/command/operator_generate_root.go
@@ -36,10 +36,6 @@ type OperatorGenerateRootCommand struct {
flagGenerateOTP bool
flagDRToken bool
- // Deprecation
- // TODO: remove in 0.9.0
- flagGenOTP bool
-
testStdin io.Reader // for tests
}
@@ -179,15 +175,6 @@ func (c *OperatorGenerateRootCommand) Flags() *FlagSets {
"must be provided with each unseal key.",
})
- // Deprecations: prefer longer-form, descriptive flags
- // TODO: remove in 0.9.0
- f.BoolVar(&BoolVar{
- Name: "genotp", // -generate-otp
- Target: &c.flagGenOTP,
- Default: false,
- Hidden: true,
- })
-
return set
}
@@ -213,18 +200,6 @@ func (c *OperatorGenerateRootCommand) Run(args []string) int {
return 1
}
- // Deprecations
- // TODO: remove in 0.9.0
- switch {
- case c.flagGenOTP:
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength(
- "WARNING! The -gen-otp flag is deprecated. Please use the -generate-otp flag " +
- "instead."))
- }
- c.flagGenerateOTP = c.flagGenOTP
- }
-
client, err := c.Client()
if err != nil {
c.UI.Error(err.Error())
diff --git a/command/operator_init.go b/command/operator_init.go
index bfe57ccaa2cf..5bf0ec5dfab9 100644
--- a/command/operator_init.go
+++ b/command/operator_init.go
@@ -27,7 +27,6 @@ type OperatorInitCommand struct {
flagRootTokenPGPKey string
// HSM
- flagStoredShares int
flagRecoveryShares int
flagRecoveryThreshold int
flagRecoveryPGPKeys []string
@@ -35,11 +34,6 @@ type OperatorInitCommand struct {
// Consul
flagConsulAuto bool
flagConsulService string
-
- // Deprecations
- // TODO: remove in 0.9.0
- flagAuto bool
- flagCheck bool
}
func (c *OperatorInitCommand) Synopsis() string {
@@ -196,32 +190,6 @@ func (c *OperatorInitCommand) Flags() *FlagSets {
"is only used in HSM mode.",
})
- // Deprecations
- // TODO: remove in 0.9.0
- f.BoolVar(&BoolVar{
- Name: "check", // prefer -status
- Target: &c.flagCheck,
- Default: false,
- Hidden: true,
- Usage: "",
- })
- f.BoolVar(&BoolVar{
- Name: "auto", // prefer -consul-auto
- Target: &c.flagAuto,
- Default: false,
- Hidden: true,
- Usage: "",
- })
-
- // Kept to keep scripts passing the flag working, but not used
- f.IntVar(&IntVar{
- Name: "stored-shares",
- Target: &c.flagStoredShares,
- Default: 0,
- Hidden: true,
- Usage: "",
- })
-
return set
}
@@ -241,23 +209,6 @@ func (c *OperatorInitCommand) Run(args []string) int {
return 1
}
- // Deprecations
- // TODO: remove in 0.9.0
- if c.flagAuto {
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength("WARNING! -auto is deprecated. Please use " +
- "-consul-auto instead. This will be removed in Vault 1.1."))
- }
- c.flagConsulAuto = true
- }
- if c.flagCheck {
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength("WARNING! -check is deprecated. Please use " +
- "-status instead. This will be removed in Vault 1.1."))
- }
- c.flagStatus = true
- }
-
args = f.Args()
if len(args) > 0 {
c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args)))
@@ -271,7 +222,6 @@ func (c *OperatorInitCommand) Run(args []string) int {
PGPKeys: c.flagPGPKeys,
RootTokenPGPKey: c.flagRootTokenPGPKey,
- StoredShares: c.flagStoredShares,
RecoveryShares: c.flagRecoveryShares,
RecoveryThreshold: c.flagRecoveryThreshold,
RecoveryPGPKeys: c.flagRecoveryPGPKeys,
diff --git a/command/operator_rekey.go b/command/operator_rekey.go
index b9ee35763b80..f56731c4f202 100644
--- a/command/operator_rekey.go
+++ b/command/operator_rekey.go
@@ -36,13 +36,6 @@ type OperatorRekeyCommand struct {
flagBackupDelete bool
flagBackupRetrieve bool
- // Deprecations
- // TODO: remove in 0.9.0
- flagDelete bool
- flagRecoveryKey bool
- flagRetrieve bool
- flagStoredShares int
-
testStdin io.Reader // for tests
}
@@ -216,41 +209,6 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets {
"if the PGP keys were provided and the backup has not been deleted.",
})
- // Deprecations
- // TODO: remove in 0.9.0
- f.BoolVar(&BoolVar{
- Name: "delete", // prefer -backup-delete
- Target: &c.flagDelete,
- Default: false,
- Hidden: true,
- Usage: "",
- })
-
- f.BoolVar(&BoolVar{
- Name: "retrieve", // prefer -backup-retrieve
- Target: &c.flagRetrieve,
- Default: false,
- Hidden: true,
- Usage: "",
- })
-
- f.BoolVar(&BoolVar{
- Name: "recovery-key", // prefer -target=recovery
- Target: &c.flagRecoveryKey,
- Default: false,
- Hidden: true,
- Usage: "",
- })
-
- // Kept to keep scripts passing the flag working, but not used
- f.IntVar(&IntVar{
- Name: "stored-shares",
- Target: &c.flagStoredShares,
- Default: 0,
- Hidden: true,
- Usage: "",
- })
-
return set
}
@@ -276,33 +234,6 @@ func (c *OperatorRekeyCommand) Run(args []string) int {
return 1
}
- // Deprecations
- // TODO: remove in 0.9.0
- if c.flagDelete {
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength(
- "WARNING! The -delete flag is deprecated. Please use -backup-delete " +
- "instead. This flag will be removed in Vault 1.1."))
- }
- c.flagBackupDelete = true
- }
- if c.flagRetrieve {
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength(
- "WARNING! The -retrieve flag is deprecated. Please use -backup-retrieve " +
- "instead. This flag will be removed in Vault 1.1."))
- }
- c.flagBackupRetrieve = true
- }
- if c.flagRecoveryKey {
- if Format(c.UI) == "table" {
- c.UI.Warn(wrapAtLength(
- "WARNING! The -recovery-key flag is deprecated. Please use -target=recovery " +
- "instead. This flag will be removed in Vault 1.1."))
- }
- c.flagTarget = "recovery"
- }
-
// Create the client
client, err := c.Client()
if err != nil {
@@ -349,7 +280,6 @@ func (c *OperatorRekeyCommand) init(client *api.Client) int {
status, err := fn(&api.RekeyInitRequest{
SecretShares: c.flagKeyShares,
SecretThreshold: c.flagKeyThreshold,
- StoredShares: c.flagStoredShares,
PGPKeys: c.flagPGPKeys,
Backup: c.flagBackup,
RequireVerification: c.flagVerify,
diff --git a/command/operator_unseal_test.go b/command/operator_unseal_test.go
index 37ca522e1c7a..1621e60d6f2a 100644
--- a/command/operator_unseal_test.go
+++ b/command/operator_unseal_test.go
@@ -1,10 +1,7 @@
package command
import (
- "bytes"
- "encoding/json"
"io/ioutil"
- "os"
"strings"
"testing"
diff --git a/command/policies_deprecated.go b/command/policies_deprecated.go
deleted file mode 100644
index 79c87224914a..000000000000
--- a/command/policies_deprecated.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package command
-
-import (
- "github.com/mitchellh/cli"
-)
-
-// Deprecation
-// TODO: remove in 0.9.0
-
-var _ cli.Command = (*PoliciesDeprecatedCommand)(nil)
-
-type PoliciesDeprecatedCommand struct {
- *BaseCommand
-}
-
-func (c *PoliciesDeprecatedCommand) Synopsis() string { return "" }
-
-func (c *PoliciesDeprecatedCommand) Help() string {
- return (&PolicyListCommand{
- BaseCommand: c.BaseCommand,
- }).Help()
-}
-
-func (c *PoliciesDeprecatedCommand) Run(args []string) int {
- oargs := args
-
- f := c.flagSet(FlagSetHTTP)
- if err := f.Parse(args); err != nil {
- c.UI.Error(err.Error())
- return 1
- }
-
- args = f.Args()
-
- // Got an arg, this is trying to read a policy
- if len(args) > 0 {
- return (&PolicyReadCommand{
- BaseCommand: &BaseCommand{
- UI: c.UI,
- client: c.client,
- tokenHelper: c.tokenHelper,
- flagAddress: c.flagAddress,
- },
- }).Run(oargs)
- }
-
- // No args, probably ran "vault policies" and we want to translate that to
- // "vault policy list"
- return (&PolicyListCommand{
- BaseCommand: &BaseCommand{
- UI: c.UI,
- client: c.client,
- tokenHelper: c.tokenHelper,
- flagAddress: c.flagAddress,
- },
- }).Run(oargs)
-}
diff --git a/command/policies_deprecated_test.go b/command/policies_deprecated_test.go
deleted file mode 100644
index de8ff7a3290a..000000000000
--- a/command/policies_deprecated_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package command
-
-import (
- "strings"
- "testing"
-
- "github.com/mitchellh/cli"
-)
-
-func testPoliciesDeprecatedCommand(tb testing.TB) (*cli.MockUi, *PoliciesDeprecatedCommand) {
- tb.Helper()
-
- ui := cli.NewMockUi()
- return ui, &PoliciesDeprecatedCommand{
- BaseCommand: &BaseCommand{
- UI: ui,
- },
- }
-}
-
-func TestPoliciesDeprecatedCommand_Run(t *testing.T) {
- t.Parallel()
-
- // TODO: remove in 0.9.0
- t.Run("deprecated_arg", func(t *testing.T) {
- t.Parallel()
-
- client, closer := testVaultServer(t)
- defer closer()
-
- ui, cmd := testPoliciesDeprecatedCommand(t)
- cmd.client = client
-
- // vault policies ARG -> vault policy read ARG
- code := cmd.Run([]string{"default"})
- if exp := 0; code != exp {
- t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
- }
- stdout := ui.OutputWriter.String()
-
- if expected := "token/"; !strings.Contains(stdout, expected) {
- t.Errorf("expected %q to contain %q", stdout, expected)
- }
- })
-
- t.Run("deprecated_no_args", func(t *testing.T) {
- t.Parallel()
-
- client, closer := testVaultServer(t)
- defer closer()
-
- ui, cmd := testPoliciesDeprecatedCommand(t)
- cmd.client = client
-
- // vault policies -> vault policy list
- code := cmd.Run([]string{})
- if exp := 0; code != exp {
- t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
- }
- stdout := ui.OutputWriter.String()
-
- if expected := "root"; !strings.Contains(stdout, expected) {
- t.Errorf("expected %q to contain %q", stdout, expected)
- }
- })
-
- t.Run("deprecated_with_flags", func(t *testing.T) {
- t.Parallel()
-
- client, closer := testVaultServer(t)
- defer closer()
-
- ui, cmd := testPoliciesDeprecatedCommand(t)
- cmd.client = client
-
- // vault policies -flag -> vault policy list
- code := cmd.Run([]string{
- "-address", client.Address(),
- })
- if exp := 0; code != exp {
- t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
- }
- stdout := ui.OutputWriter.String()
-
- if expected := "root"; !strings.Contains(stdout, expected) {
- t.Errorf("expected %q to contain %q", stdout, expected)
- }
- })
-
- t.Run("no_tabs", func(t *testing.T) {
- t.Parallel()
-
- _, cmd := testPoliciesDeprecatedCommand(t)
- assertNoTabs(t, cmd)
- })
-}
diff --git a/command/token_create.go b/command/token_create.go
index 207612a577e1..99aa796f7daf 100644
--- a/command/token_create.go
+++ b/command/token_create.go
@@ -29,9 +29,6 @@ type TokenCreateCommand struct {
flagType string
flagMetadata map[string]string
flagPolicies []string
-
- // Deprecated flags
- flagLease time.Duration
}
func (c *TokenCreateCommand) Synopsis() string {
@@ -179,15 +176,6 @@ func (c *TokenCreateCommand) Flags() *FlagSets {
"specified multiple times to attach multiple policies.",
})
- // Deprecated flags
- // TODO: remove in 0.9.0
- f.DurationVar(&DurationVar{
- Name: "lease", // prefer -ttl
- Target: &c.flagLease,
- Default: 0,
- Hidden: true,
- })
-
return set
}
@@ -213,14 +201,6 @@ func (c *TokenCreateCommand) Run(args []string) int {
return 1
}
- // TODO: remove in 0.9.0
- if c.flagLease != 0 {
- if Format(c.UI) == "table" {
- c.UI.Warn("The -lease flag is deprecated. Please use -ttl instead.")
- c.flagTTL = c.flagLease
- }
- }
-
if c.flagType == "batch" {
c.flagRenewable = false
}
diff --git a/command/token_renew.go b/command/token_renew.go
index ee6a62602892..c1887d1b3e66 100644
--- a/command/token_renew.go
+++ b/command/token_renew.go
@@ -97,19 +97,6 @@ func (c *TokenRenewCommand) Run(args []string) int {
// Use the local token
case 1:
token = strings.TrimSpace(args[0])
- case 2:
- // TODO: remove in 0.9.0 - backwards compat
- if Format(c.UI) == "table" {
- c.UI.Warn("Specifying increment as a second argument is deprecated. " +
- "Please use -increment instead.")
- }
- token = strings.TrimSpace(args[0])
- parsed, err := time.ParseDuration(appendDurationSuffix(args[1]))
- if err != nil {
- c.UI.Error(fmt.Sprintf("Invalid increment: %s", err))
- return 1
- }
- increment = parsed
default:
c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args)))
return 1
From c11d7189de5b1700e1ecd56d66ae2523d71f2272 Mon Sep 17 00:00:00 2001
From: Jeff Mitchell
Date: Thu, 14 Feb 2019 11:55:32 -0800
Subject: [PATCH 10/31] Don't add kv by default in non-dev scenarios (#6109)
It's retained for tests though since most rely on it.
---
command/server.go | 39 ++++++++++++++++++--------------
vault/core_test.go | 2 ++
vault/mount.go | 55 +++++++++++++++++++++++----------------------
vault/mount_test.go | 24 +++++++++++++++-----
vault/testing.go | 50 +++++++++++++++++++++++++++++++++++++++++
5 files changed, 120 insertions(+), 50 deletions(-)
diff --git a/command/server.go b/command/server.go
index af7cb38c5a5c..e8b14483f96f 100644
--- a/command/server.go
+++ b/command/server.go
@@ -1363,24 +1363,29 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
}
// Upgrade the default K/V store
- if !c.flagDevLeasedKV && !c.flagDevKVV1 {
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- ClientToken: init.RootToken,
- Path: "sys/mounts/secret/tune",
- Data: map[string]interface{}{
- "options": map[string]string{
- "version": "2",
- },
+ kvVer := "2"
+ if c.flagDevKVV1 {
+ kvVer = "1"
+ }
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ ClientToken: init.RootToken,
+ Path: "sys/mounts/secret",
+ Data: map[string]interface{}{
+ "type": "kv",
+ "path": "secret/",
+ "description": "key/value secret storage",
+ "options": map[string]string{
+ "version": kvVer,
},
- }
- resp, err := core.HandleRequest(ctx, req)
- if err != nil {
- return nil, errwrap.Wrapf("error upgrading default K/V store: {{err}}", err)
- }
- if resp.IsError() {
- return nil, errwrap.Wrapf("failed to upgrade default K/V store: {{err}}", resp.Error())
- }
+ },
+ }
+ resp, err := core.HandleRequest(ctx, req)
+ if err != nil {
+ return nil, errwrap.Wrapf("error creating default K/V store: {{err}}", err)
+ }
+ if resp.IsError() {
+ return nil, errwrap.Wrapf("failed to create default K/V store: {{err}}", resp.Error())
}
return init, nil
diff --git a/vault/core_test.go b/vault/core_test.go
index 0f5d3c739207..4bf7e8c0ce77 100644
--- a/vault/core_test.go
+++ b/vault/core_test.go
@@ -1634,6 +1634,8 @@ func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical.
// Wait for core to become active
TestWaitActive(t, core)
+ testCoreAddSecretMount(t, core, root)
+
// Put a secret
req := &logical.Request{
Operation: logical.UpdateOperation,
diff --git a/vault/mount.go b/vault/mount.go
index a29c05031ddf..d12ae08a3d5c 100644
--- a/vault/mount.go
+++ b/vault/mount.go
@@ -1193,36 +1193,37 @@ func (c *Core) defaultMountTable() *MountTable {
table := &MountTable{
Type: mountTableType,
}
- mountUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create default secret mount UUID: %v", err))
- }
- mountAccessor, err := c.generateMountAccessor("kv")
- if err != nil {
- panic(fmt.Sprintf("could not generate default secret mount accessor: %v", err))
- }
- bUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create default secret mount backend UUID: %v", err))
- }
+ table.Entries = append(table.Entries, c.requiredMountTable().Entries...)
- kvMount := &MountEntry{
- Table: mountTableType,
- Path: "secret/",
- Type: "kv",
- Description: "key/value secret storage",
- UUID: mountUUID,
- Accessor: mountAccessor,
- BackendAwareUUID: bUUID,
- Options: map[string]string{
- "version": "1",
- },
- }
if os.Getenv("VAULT_INTERACTIVE_DEMO_SERVER") != "" {
- kvMount.Options["version"] = "2"
+ mountUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ panic(fmt.Sprintf("could not create default secret mount UUID: %v", err))
+ }
+ mountAccessor, err := c.generateMountAccessor("kv")
+ if err != nil {
+ panic(fmt.Sprintf("could not generate default secret mount accessor: %v", err))
+ }
+ bUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ panic(fmt.Sprintf("could not create default secret mount backend UUID: %v", err))
+ }
+
+ kvMount := &MountEntry{
+ Table: mountTableType,
+ Path: "secret/",
+ Type: "kv",
+ Description: "key/value secret storage",
+ UUID: mountUUID,
+ Accessor: mountAccessor,
+ BackendAwareUUID: bUUID,
+ Options: map[string]string{
+ "version": "2",
+ },
+ }
+ table.Entries = append(table.Entries, kvMount)
}
- table.Entries = append(table.Entries, kvMount)
- table.Entries = append(table.Entries, c.requiredMountTable().Entries...)
+
return table
}
diff --git a/vault/mount_test.go b/vault/mount_test.go
index 731c9780cede..60bee0c7e375 100644
--- a/vault/mount_test.go
+++ b/vault/mount_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ "github.com/go-test/deep"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/compressutil"
"github.com/hashicorp/vault/helper/jsonutil"
@@ -41,7 +42,7 @@ func TestMount_ReadOnlyViewDuringMount(t *testing.T) {
func TestCore_DefaultMountTable(t *testing.T) {
c, keys, _ := TestCoreUnsealed(t)
- verifyDefaultTable(t, c.mounts)
+ verifyDefaultTable(t, c.mounts, 4)
// Start a second core with same physical
conf := &CoreConfig{
@@ -363,8 +364,19 @@ func TestCore_Remount(t *testing.T) {
}
// Verify matching mount tables
- if !reflect.DeepEqual(c.mounts, c2.mounts) {
- t.Fatalf("mismatch: %v %v", c.mounts, c2.mounts)
+ if c.mounts.Type != c2.mounts.Type {
+ t.Fatal("types don't match")
+ }
+ cMountMap := map[string]interface{}{}
+ for _, v := range c.mounts.Entries {
+ cMountMap[v.Path] = v
+ }
+ c2MountMap := map[string]interface{}{}
+ for _, v := range c2.mounts.Entries {
+ c2MountMap[v.Path] = v
+ }
+ if diff := deep.Equal(cMountMap, c2MountMap); diff != nil {
+ t.Fatal(diff)
}
}
@@ -464,7 +476,7 @@ func TestCore_Remount_Protected(t *testing.T) {
func TestDefaultMountTable(t *testing.T) {
c, _, _ := TestCoreUnsealed(t)
table := c.defaultMountTable()
- verifyDefaultTable(t, table)
+ verifyDefaultTable(t, table, 3)
}
func TestCore_MountTable_UpgradeToTyped(t *testing.T) {
@@ -633,8 +645,8 @@ func testCore_MountTable_UpgradeToTyped_Common(
}
}
-func verifyDefaultTable(t *testing.T, table *MountTable) {
- if len(table.Entries) != 4 {
+func verifyDefaultTable(t *testing.T, table *MountTable, expected int) {
+ if len(table.Entries) != expected {
t.Fatalf("bad: %v", table.Entries)
}
table.sortEntriesByPath()
diff --git a/vault/testing.go b/vault/testing.go
index c5e9c4bce2e2..ff26ae5f7111 100644
--- a/vault/testing.go
+++ b/vault/testing.go
@@ -38,6 +38,7 @@ import (
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/logging"
+ "github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/reload"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
@@ -310,9 +311,35 @@ func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) {
t.Fatal("should not be sealed")
}
+ testCoreAddSecretMount(t, core, token)
+
return core, keys, token
}
+func testCoreAddSecretMount(t testing.T, core *Core, token string) {
+ kvReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ ClientToken: token,
+ Path: "sys/mounts/secret",
+ Data: map[string]interface{}{
+ "type": "kv",
+ "path": "secret/",
+ "description": "key/value secret storage",
+ "options": map[string]string{
+ "version": "1",
+ },
+ },
+ }
+ resp, err := core.HandleRequest(namespace.RootContext(nil), kvReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.IsError() {
+ t.Fatal(err)
+ }
+
+}
+
func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) {
t.Helper()
logger := logging.NewVaultLogger(log.Trace)
@@ -1404,6 +1431,29 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
TestWaitActive(t, cores[0])
+ // Existing tests rely on this; we can make a toggle to disable it
+ // later if we want
+ kvReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ ClientToken: testCluster.RootToken,
+ Path: "sys/mounts/secret",
+ Data: map[string]interface{}{
+ "type": "kv",
+ "path": "secret/",
+ "description": "key/value secret storage",
+ "options": map[string]string{
+ "version": "1",
+ },
+ },
+ }
+ resp, err := cores[0].HandleRequest(namespace.RootContext(ctx), kvReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.IsError() {
+ t.Fatal(err)
+ }
+
// Unseal other cores unless otherwise specified
if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 {
for i := 1; i < numCores; i++ {
From 9408c3b8a23749a286d7dcbda677d1477257333a Mon Sep 17 00:00:00 2001
From: Jim Kalafut
Date: Thu, 14 Feb 2019 12:13:34 -0800
Subject: [PATCH 11/31] Fix unit tests broken with JWT plugin update
---
command/agent_test.go | 1 +
command/auth_enable_test.go | 3 ++-
command/base_predict_test.go | 1 +
3 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/command/agent_test.go b/command/agent_test.go
index d7281fc38ea1..386ad47799b7 100644
--- a/command/agent_test.go
+++ b/command/agent_test.go
@@ -64,6 +64,7 @@ func TestExitAfterAuth(t *testing.T) {
}
_, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{
+ "role_type": "jwt",
"bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients",
"bound_audiences": "https://vault.plugin.auth.jwt.test",
"user_claim": "https://vault/user",
diff --git a/command/auth_enable_test.go b/command/auth_enable_test.go
index 4c94cee3da53..6813ded4786d 100644
--- a/command/auth_enable_test.go
+++ b/command/auth_enable_test.go
@@ -175,7 +175,8 @@ func TestAuthEnableCommand_Run(t *testing.T) {
// Add 1 to account for the "token" backend, which is visible when you walk the filesystem but
// is treated as special and excluded from the registry.
- expected := len(builtinplugins.Registry.Keys(consts.PluginTypeCredential)) + 1
+ // Subtract 1 to account for "oidc" which is an alias of "jwt" and not a separate plugin.
+ expected := len(builtinplugins.Registry.Keys(consts.PluginTypeCredential))
if len(backends) != expected {
t.Fatalf("expected %d credential backends, got %d", expected, len(backends))
}
diff --git a/command/base_predict_test.go b/command/base_predict_test.go
index b4f3264d19b7..c09dc9f4cc21 100644
--- a/command/base_predict_test.go
+++ b/command/base_predict_test.go
@@ -352,6 +352,7 @@ func TestPredict_Plugins(t *testing.T) {
"mysql-legacy-database-plugin",
"mysql-rds-database-plugin",
"nomad",
+ "oidc",
"okta",
"pki",
"postgresql",
From 5dd50ef281b54d5985c8ec968b1f1e89e0ca438f Mon Sep 17 00:00:00 2001
From: Martin
Date: Thu, 14 Feb 2019 21:46:59 +0100
Subject: [PATCH 12/31] Prometheus support on v1/sys/metrics endpoint (#5308)
* initial commit for prometheus and sys/metrics support
* Throw an error if prometheusRetentionTime is 0,add prometheus in devmode
* return when format=prometheus is used and prom is disable
* parse prometheus_retention_time from string instead of int
* Initialize config.Telemetry if nil
* address PR issues
* add sys/metrics framework.Path in a factory
* Apply requiredMountTable entries's MountConfig to existing core table
* address pr comments
* enable prometheus sink by default
* Move Metric-related code in a separate metricsutil helper
---
command/server.go | 61 ++-
command/server/config.go | 25 +-
helper/metricsutil/metricsutil.go | 104 +++++
vault/core.go | 10 +-
vault/logical_system.go | 14 +
vault/logical_system_paths.go | 18 +
vault/mount.go | 4 +
.../prometheus/promhttp/delegator.go | 199 ++++++++
.../prometheus/promhttp/delegator_1_8.go | 181 +++++++
.../prometheus/promhttp/delegator_pre_1_8.go | 44 ++
.../client_golang/prometheus/promhttp/http.go | 204 ++++++++
.../prometheus/promhttp/instrument_client.go | 98 ++++
.../promhttp/instrument_client_1_8.go | 144 ++++++
.../prometheus/promhttp/instrument_server.go | 440 ++++++++++++++++++
14 files changed, 1529 insertions(+), 17 deletions(-)
create mode 100644 helper/metricsutil/metricsutil.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
diff --git a/command/server.go b/command/server.go
index e8b14483f96f..0540932b1fb2 100644
--- a/command/server.go
+++ b/command/server.go
@@ -6,6 +6,7 @@ import (
"encoding/base64"
"encoding/hex"
"fmt"
+ "github.com/hashicorp/vault/helper/metricsutil"
"io"
"io/ioutil"
"net"
@@ -23,6 +24,7 @@ import (
metrics "github.com/armon/go-metrics"
"github.com/armon/go-metrics/circonus"
"github.com/armon/go-metrics/datadog"
+ "github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
multierror "github.com/hashicorp/go-multierror"
@@ -469,7 +471,8 @@ func (c *ServerCommand) Run(args []string) int {
"in a Docker container, provide the IPC_LOCK cap to the container."))
}
- if err := c.setupTelemetry(config); err != nil {
+ metricsHelper, err := c.setupTelemetry(config)
+ if err != nil {
c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err))
return 1
}
@@ -563,6 +566,7 @@ func (c *ServerCommand) Run(args []string) int {
AllLoggers: allLoggers,
BuiltinRegistry: builtinplugins.Registry,
DisableKeyEncodingChecks: config.DisablePrintableCheck,
+ MetricsHelper: metricsHelper,
}
if c.flagDev {
coreConfig.DevToken = c.flagDevRootTokenID
@@ -1691,8 +1695,8 @@ func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
return url.String(), nil
}
-// setupTelemetry is used to setup the telemetry sub-systems
-func (c *ServerCommand) setupTelemetry(config *server.Config) error {
+// setupTelemetry is used to setup the telemetry sub-systems and returns the in-memory sink to be used in http configuration
+func (c *ServerCommand) setupTelemetry(config *server.Config) (*metricsutil.MetricsHelper, error) {
/* Setup telemetry
Aggregate on 10 second intervals for 1 minute. Expose the
metrics over stderr when there is a SIGUSR1 received.
@@ -1701,10 +1705,10 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error {
metrics.DefaultInmemSignal(inm)
var telConfig *server.Telemetry
- if config.Telemetry == nil {
- telConfig = &server.Telemetry{}
- } else {
+ if config.Telemetry != nil {
telConfig = config.Telemetry
+ } else {
+ telConfig = &server.Telemetry{}
}
metricsConf := metrics.DefaultConfig("vault")
@@ -1712,10 +1716,29 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error {
// Configure the statsite sink
var fanout metrics.FanoutSink
+ var prometheusEnabled bool
+
+ // Configure the Prometheus sink
+ if telConfig.PrometheusRetentionTime != 0 {
+ prometheusEnabled = true
+ prometheusOpts := prometheus.PrometheusOpts{
+ Expiration: telConfig.PrometheusRetentionTime,
+ }
+
+ sink, err := prometheus.NewPrometheusSinkFrom(prometheusOpts)
+ if err != nil {
+ return nil, err
+ }
+ fanout = append(fanout, sink)
+ }
+
+ metricHelper := metricsutil.NewMetricsHelper(inm, prometheusEnabled)
+
+
if telConfig.StatsiteAddr != "" {
sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr)
if err != nil {
- return err
+ return nil, err
}
fanout = append(fanout, sink)
}
@@ -1724,7 +1747,7 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error {
if telConfig.StatsdAddr != "" {
sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr)
if err != nil {
- return err
+ return nil, err
}
fanout = append(fanout, sink)
}
@@ -1760,7 +1783,7 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error {
sink, err := circonus.NewCirconusSink(cfg)
if err != nil {
- return err
+ return nil, err
}
sink.Start()
fanout = append(fanout, sink)
@@ -1775,21 +1798,29 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error {
sink, err := datadog.NewDogStatsdSink(telConfig.DogStatsDAddr, metricsConf.HostName)
if err != nil {
- return errwrap.Wrapf("failed to start DogStatsD sink: {{err}}", err)
+ return nil, errwrap.Wrapf("failed to start DogStatsD sink: {{err}}", err)
}
sink.SetTags(tags)
fanout = append(fanout, sink)
}
// Initialize the global sink
- if len(fanout) > 0 {
- fanout = append(fanout, inm)
- metrics.NewGlobal(metricsConf, fanout)
+ if len(fanout) > 1 {
+ // Hostname enabled will create poor quality metrics name for prometheus
+ if !telConfig.DisableHostname {
+ c.UI.Warn("telemetry.disable_hostname has been set to false. Recommended setting is true for Prometheus to avoid poorly named metrics.")
+ }
} else {
metricsConf.EnableHostname = false
- metrics.NewGlobal(metricsConf, inm)
}
- return nil
+ fanout = append(fanout, inm)
+ _, err := metrics.NewGlobal(metricsConf, fanout)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return metricHelper, nil
}
func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reload.ReloadFunc, configPath []string) error {
diff --git a/command/server/config.go b/command/server/config.go
index 1a6498c3e74f..cf9e41d840d7 100644
--- a/command/server/config.go
+++ b/command/server/config.go
@@ -19,6 +19,10 @@ import (
"github.com/hashicorp/vault/helper/parseutil"
)
+const (
+ prometheusDefaultRetentionTime = 24 * time.Hour
+)
+
// Config is the configuration for the vault server.
type Config struct {
Listeners []*Listener `hcl:"-"`
@@ -98,7 +102,10 @@ func DevConfig(ha, transactional bool) *Config {
EnableUI: true,
- Telemetry: &Telemetry{},
+ Telemetry: &Telemetry{
+ PrometheusRetentionTime: prometheusDefaultRetentionTime,
+ DisableHostname: true,
+ },
}
switch {
@@ -233,6 +240,12 @@ type Telemetry struct {
// DogStatsdTags are the global tags that should be sent with each packet to dogstatsd
// It is a list of strings, where each string looks like "my_tag_name:my_tag_value"
DogStatsDTags []string `hcl:"dogstatsd_tags"`
+
+ // Prometheus:
+ // PrometheusRetentionTime is the retention time for prometheus metrics if greater than 0.
+ // Default: 24h
+ PrometheusRetentionTime time.Duration `hcl:-`
+ PrometheusRetentionTimeRaw interface{} `hcl:"prometheus_retention_time"`
}
func (s *Telemetry) GoString() string {
@@ -864,5 +877,15 @@ func parseTelemetry(result *Config, list *ast.ObjectList) error {
if err := hcl.DecodeObject(&result.Telemetry, item.Val); err != nil {
return multierror.Prefix(err, "telemetry:")
}
+
+ if result.Telemetry.PrometheusRetentionTimeRaw != nil {
+ var err error
+ if result.Telemetry.PrometheusRetentionTime, err = parseutil.ParseDurationSecond(result.Telemetry.PrometheusRetentionTimeRaw); err != nil {
+ return err
+ }
+ } else {
+ result.Telemetry.PrometheusRetentionTime = prometheusDefaultRetentionTime
+ }
+
return nil
}
diff --git a/helper/metricsutil/metricsutil.go b/helper/metricsutil/metricsutil.go
new file mode 100644
index 000000000000..3083d2dc10ca
--- /dev/null
+++ b/helper/metricsutil/metricsutil.go
@@ -0,0 +1,104 @@
+package metricsutil
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/vault/logical"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/expfmt"
+ "strings"
+)
+
+const (
+ OpenMetricsMIMEType = "application/openmetrics-text"
+)
+
+const (
+ PrometheusMetricFormat = "prometheus"
+)
+
+type MetricsHelper struct {
+ inMemSink *metrics.InmemSink
+ PrometheusEnabled bool
+}
+
+func NewMetricsHelper(inMem *metrics.InmemSink, enablePrometheus bool) *MetricsHelper{
+ return &MetricsHelper{inMem, enablePrometheus}
+}
+
+func FormatFromRequest(req *logical.Request) (string) {
+ acceptHeaders := req.Headers["Accept"]
+ if len(acceptHeaders) > 0 {
+ acceptHeader := acceptHeaders[0]
+ if strings.HasPrefix(acceptHeader, OpenMetricsMIMEType) {
+ return "prometheus"
+ }
+ }
+ return ""
+}
+
+func (m *MetricsHelper) ResponseForFormat(format string) (*logical.Response, error) {
+ switch format {
+ case PrometheusMetricFormat:
+ return m.PrometheusResponse()
+ case "":
+ return m.GenericResponse()
+ default:
+ return nil, fmt.Errorf("metric response format \"%s\" unknown", format)
+ }
+}
+
+func (m *MetricsHelper) PrometheusResponse() (*logical.Response, error) {
+ if !m.PrometheusEnabled {
+ return &logical.Response{
+ Data: map[string]interface{}{
+ logical.HTTPContentType: "text/plain",
+ logical.HTTPRawBody: "prometheus is not enabled",
+ logical.HTTPStatusCode: 400,
+ },
+ }, nil
+ }
+ metricsFamilies, err := prometheus.DefaultGatherer.Gather()
+ if err != nil && len(metricsFamilies) == 0 {
+ return nil, fmt.Errorf("no prometheus metrics could be decoded: %s", err)
+ }
+
+ // Initialize a byte buffer.
+ buf := &bytes.Buffer{}
+ defer buf.Reset()
+
+ e := expfmt.NewEncoder(buf, expfmt.FmtText)
+ for _, mf := range metricsFamilies {
+ err := e.Encode(mf)
+ if err != nil {
+ return nil, fmt.Errorf("error during the encoding of metrics: %s", err)
+ }
+ }
+ return &logical.Response{
+ Data: map[string]interface{}{
+ logical.HTTPContentType: string(expfmt.FmtText),
+ logical.HTTPRawBody: buf.Bytes(),
+ logical.HTTPStatusCode: 200,
+ },
+ }, nil
+}
+
+func (m *MetricsHelper) GenericResponse() (*logical.Response, error) {
+ summary, err := m.inMemSink.DisplayMetrics(nil,nil)
+ if err != nil {
+ return nil, fmt.Errorf("error while fetching the in-memory metrics: %s", err)
+ }
+ content, err := json.Marshal(summary)
+ if err != nil {
+ return nil, fmt.Errorf("error while marshalling the in-memory metrics: %s", err)
+ }
+ return &logical.Response{
+ Data: map[string]interface{}{
+ logical.HTTPContentType: "application/json",
+ logical.HTTPRawBody: content,
+ logical.HTTPStatusCode: 200,
+ },
+ }, nil
+}
diff --git a/vault/core.go b/vault/core.go
index aa7b4a38b79c..8807af66b8b2 100644
--- a/vault/core.go
+++ b/vault/core.go
@@ -7,6 +7,7 @@ import (
"crypto/x509"
"errors"
"fmt"
+ "github.com/hashicorp/vault/helper/metricsutil"
"net"
"net/http"
"net/url"
@@ -419,7 +420,10 @@ type Core struct {
// loadCaseSensitiveIdentityStore enforces the loading of identity store
// artifacts in a case sensitive manner. To be used only in testing.
- loadCaseSensitiveIdentityStore bool
+ loadCaseSensitiveIdentityStore bool
+
+ // Telemetry objects
+ metricsHelper *metricsutil.MetricsHelper
}
// CoreConfig is used to parameterize a core
@@ -488,6 +492,9 @@ type CoreConfig struct {
DisableKeyEncodingChecks bool
AllLoggers []log.Logger
+
+ // Telemetry objects
+ MetricsHelper *metricsutil.MetricsHelper
}
func (c *CoreConfig) Clone() *CoreConfig {
@@ -596,6 +603,7 @@ func NewCore(conf *CoreConfig) (*Core, error) {
builtinRegistry: conf.BuiltinRegistry,
neverBecomeActive: new(uint32),
clusterLeaderParams: new(atomic.Value),
+ metricsHelper: conf.MetricsHelper,
}
atomic.StoreUint32(c.sealed, 1)
diff --git a/vault/logical_system.go b/vault/logical_system.go
index f6806d876a6b..ced6f90163ae 100644
--- a/vault/logical_system.go
+++ b/vault/logical_system.go
@@ -26,6 +26,7 @@ import (
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/identity"
"github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/metricsutil"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/strutil"
@@ -145,6 +146,7 @@ func NewSystemBackend(core *Core, logger log.Logger) *SystemBackend {
b.Backend.Paths = append(b.Backend.Paths, b.capabilitiesPaths()...)
b.Backend.Paths = append(b.Backend.Paths, b.internalPaths()...)
b.Backend.Paths = append(b.Backend.Paths, b.remountPath())
+ b.Backend.Paths = append(b.Backend.Paths, b.metricsPath())
if core.rawEnabled {
b.Backend.Paths = append(b.Backend.Paths, &framework.Path{
@@ -2512,6 +2514,14 @@ func (b *SystemBackend) responseWrappingUnwrap(ctx context.Context, te *logical.
return response, nil
}
+func (b *SystemBackend) handleMetrics(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ format := data.Get("format").(string)
+ if format == "" {
+ format = metricsutil.FormatFromRequest(req)
+ }
+ return b.Core.metricsHelper.ResponseForFormat(format)
+}
+
func (b *SystemBackend) handleWrappingLookup(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
// This ordering of lookups has been validated already in the wrapping
// validation func, we're just doing this for a safety check
@@ -3884,4 +3894,8 @@ This path responds to the following HTTP methods.
"Information about a token's resultant ACL. Internal API; its location, inputs, and outputs may change.",
"",
},
+ "metrics": {
+ "Export the metrics aggregated for telemetry purpose.",
+ "",
+ },
}
diff --git a/vault/logical_system_paths.go b/vault/logical_system_paths.go
index 0a81acd58ec9..1e3be7e37fa3 100644
--- a/vault/logical_system_paths.go
+++ b/vault/logical_system_paths.go
@@ -1100,6 +1100,24 @@ func (b *SystemBackend) remountPath() *framework.Path {
}
}
+func (b *SystemBackend) metricsPath() *framework.Path {
+ return &framework.Path{
+ Pattern: "metrics",
+ Fields: map[string]*framework.FieldSchema{
+ "format": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Format to export metrics into. Currently accept only \"prometheus\"",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleMetrics,
+ },
+ HelpSynopsis: strings.TrimSpace(sysHelp["metrics"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["metrics"][1]),
+ }
+
+}
+
func (b *SystemBackend) authPaths() []*framework.Path {
return []*framework.Path{
{
diff --git a/vault/mount.go b/vault/mount.go
index d12ae08a3d5c..aab94905a066 100644
--- a/vault/mount.go
+++ b/vault/mount.go
@@ -851,6 +851,7 @@ func (c *Core) loadMounts(ctx context.Context) error {
for _, coreMount := range c.mounts.Entries {
if coreMount.Type == requiredMount.Type {
foundRequired = true
+ coreMount.Config = requiredMount.Config
break
}
}
@@ -1276,6 +1277,9 @@ func (c *Core) requiredMountTable() *MountTable {
UUID: sysUUID,
Accessor: sysAccessor,
BackendAwareUUID: sysBackendUUID,
+ Config: MountConfig{
+ PassthroughRequestHeaders: []string{"Accept"},
+ },
}
identityUUID, err := uuid.GenerateUUID()
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 000000000000..5ee095b09e96
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ closeNotifier = 1 << iota
+ flusher
+ hijacker
+ readerFrom
+ pusher
+)
+
+type delegator interface {
+ http.ResponseWriter
+
+ Status() int
+ Written() int64
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+ observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+ return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+ return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+ if r.observeWriteHeader != nil {
+ r.observeWriteHeader(code)
+ }
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+
+func (d *closeNotifierDelegator) CloseNotify() <-chan bool {
+ return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d *flusherDelegator) Flush() {
+ d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+ if !d.wroteHeader {
+ d.WriteHeader(http.StatusOK)
+ }
+ n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+ d.written += n
+ return n, err
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+ // TODO(beorn7): Code generation would help here.
+ pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+ return d
+ }
+ pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+ return closeNotifierDelegator{d}
+ }
+ pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+ return flusherDelegator{d}
+ }
+ pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+ return struct {
+ *responseWriterDelegator
+ http.Flusher
+ http.CloseNotifier
+ }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+ return hijackerDelegator{d}
+ }
+ pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ }{d, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+ return readerFromDelegator{d}
+ }
+ pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ }{d, &readerFromDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
new file mode 100644
index 000000000000..f4d386f7a393
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {
+ return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func init() {
+ pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+ return pusherDelegator{d}
+ }
+ pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ }{d, &pusherDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+ if _, ok := w.(http.Pusher); ok {
+ id += pusher
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
new file mode 100644
index 000000000000..8bb9b8b68f8b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 000000000000..2d67f2496293
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,204 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promhttp provides tooling around HTTP servers and clients.
+//
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
+//
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
+package promhttp
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
+// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
+// error, no error logging, and compression if requested by the client.
+//
+// If you want to create a Handler for the DefaultGatherer with different
+// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
+// your desired HandlerOpts.
+func Handler() http.Handler {
+ return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
+}
+
+// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
+// of the Handler is defined by the provided HandlerOpts.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := reg.Gather()
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error gathering metrics:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ if len(mfs) == 0 {
+ http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error encoding metric family:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ // Handled later.
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ // TODO(beorn7): Consider streaming serving of metrics.
+ })
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError HandlerErrorHandling = iota
+ // Ignore errors and try to serve as many metrics as possible. However,
+ // if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. It is recommended to at least
+ // log errors (by providing an ErrorLog in HandlerOpts) to not mask
+ // errors completely.
+ ContinueOnError
+ // Panic upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+ // ErrorLog specifies an optional logger for errors collecting and
+ // serving metrics. If nil, errors are not logged at all.
+ ErrorLog Logger
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided ErrorLog
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+ // If DisableCompression is true, the handler will never compress the
+ // response, even if requested by the client.
+ DisableCompression bool
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
+ if compressionDisabled {
+ return writer, ""
+ }
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 000000000000..65f942544549
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,98 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+ return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ gauge.Inc()
+ defer gauge.Dec()
+ return next.RoundTrip(r)
+ })
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. Partitioning of the CounterVec happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present
+// in the CounterVec. For unpartitioned counting, use a CounterVec with
+// zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(counter)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+ }
+ return resp, err
+ })
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(obs)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+ }
+ return resp, err
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
new file mode 100644
index 000000000000..0bd80c35521d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+ GotConn func(float64)
+ PutIdleConn func(float64)
+ GotFirstResponseByte func(float64)
+ Got100Continue func(float64)
+ DNSStart func(float64)
+ DNSDone func(float64)
+ ConnectStart func(float64)
+ ConnectDone func(float64)
+ TLSHandshakeStart func(float64)
+ TLSHandshakeDone func(float64)
+ WroteHeaders func(float64)
+ Wait100Continue func(float64)
+ WroteRequest func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+
+ trace := &httptrace.ClientTrace{
+ GotConn: func(_ httptrace.GotConnInfo) {
+ if it.GotConn != nil {
+ it.GotConn(time.Since(start).Seconds())
+ }
+ },
+ PutIdleConn: func(err error) {
+ if err != nil {
+ return
+ }
+ if it.PutIdleConn != nil {
+ it.PutIdleConn(time.Since(start).Seconds())
+ }
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectStart: func(_, _ string) {
+ if it.ConnectStart != nil {
+ it.ConnectStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectDone: func(_, _ string, err error) {
+ if err != nil {
+ return
+ }
+ if it.ConnectDone != nil {
+ it.ConnectDone(time.Since(start).Seconds())
+ }
+ },
+ GotFirstResponseByte: func() {
+ if it.GotFirstResponseByte != nil {
+ it.GotFirstResponseByte(time.Since(start).Seconds())
+ }
+ },
+ Got100Continue: func() {
+ if it.Got100Continue != nil {
+ it.Got100Continue(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeStart: func() {
+ if it.TLSHandshakeStart != nil {
+ it.TLSHandshakeStart(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+ if err != nil {
+ return
+ }
+ if it.TLSHandshakeDone != nil {
+ it.TLSHandshakeDone(time.Since(start).Seconds())
+ }
+ },
+ WroteHeaders: func() {
+ if it.WroteHeaders != nil {
+ it.WroteHeaders(time.Since(start).Seconds())
+ }
+ },
+ Wait100Continue: func() {
+ if it.Wait100Continue != nil {
+ it.Wait100Continue(time.Since(start).Seconds())
+ }
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ if it.WroteRequest != nil {
+ it.WroteRequest(time.Since(start).Seconds())
+ }
+ },
+ }
+ r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
+
+ return next.RoundTrip(r)
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 000000000000..3d145adbf096
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,440 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ g.Inc()
+ defer g.Dec()
+ next.ServeHTTP(w, r)
+ })
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ next.ServeHTTP(w, r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+ })
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided
+// http.Handler to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. Partitioning of the CounterVec happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present
+// in the CounterVec. For unpartitioned counting, use a CounterVec with
+// zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(counter)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ counter.With(labels(code, method, r.Method, d.Status())).Inc()
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ counter.With(labels(code, method, r.Method, 0)).Inc()
+ })
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two labels. The only allowed label names are "code" and "method". The
+// function panics if any other instance labels are provided. The Observe
+// method of the Observer in the ObserverVec is called with the request
+// duration in seconds. Partitioning happens by HTTP status code and/or HTTP
+// method if the respective instance label names are present in the
+// ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, func(status int) {
+ obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+ })
+ next.ServeHTTP(d, r)
+ })
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the request size in bytes. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+ })
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the response size in bytes. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+ code, method := checkLabels(obs)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+ })
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+ // TODO(beorn7): Remove this hacky way to check for instance labels
+ // once Descriptors can have their dimensionality queried.
+ var (
+ desc *prometheus.Desc
+ pm dto.Metric
+ )
+
+ descc := make(chan *prometheus.Desc, 1)
+ c.Describe(descc)
+
+ select {
+ case desc = <-descc:
+ default:
+ panic("no description provided by collector")
+ }
+ select {
+ case <-descc:
+ panic("more than one description provided by collector")
+ default:
+ }
+
+ close(descc)
+
+ if _, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0); err == nil {
+ return
+ }
+ if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString); err == nil {
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+ for _, label := range pm.Label {
+ name, value := label.GetName(), label.GetValue()
+ if value != magicString {
+ continue
+ }
+ switch name {
+ case "code":
+ code = true
+ case "method":
+ method = true
+ default:
+ panic("metric partitioned with non-supported labels")
+ }
+ return
+ }
+ panic("previously set label not found – this must never happen")
+ }
+ if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString, magicString); err == nil {
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+ for _, label := range pm.Label {
+ name, value := label.GetName(), label.GetValue()
+ if value != magicString {
+ continue
+ }
+ if name == "code" || name == "method" {
+ continue
+ }
+ panic("metric partitioned with non-supported labels")
+ }
+ code = true
+ method = true
+ return
+ }
+ panic("metric partitioned with non-supported labels")
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+ if !(code || method) {
+ return emptyLabels
+ }
+ labels := prometheus.Labels{}
+
+ if code {
+ labels["code"] = sanitizeCode(status)
+ }
+ if method {
+ labels["method"] = sanitizeMethod(reqMethod)
+ }
+
+ return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200, 0:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
From e39a5f28dfe0d5ce8e633f215ee8a8abc68a7f08 Mon Sep 17 00:00:00 2001
From: Vishal Nayak
Date: Thu, 14 Feb 2019 20:10:36 -0500
Subject: [PATCH 13/31] Vault Agent Cache (#6220)
* vault-agent-cache: squashed 250+ commits
* Add proper token revocation validations to the tests
* Add more test cases
* Avoid leaking by not closing request/response bodies; add comments
* Fix revoke orphan use case; update tests
* Add CLI test for making request over unix socket
* agent/cache: remove namespace-related tests
* Strip-off the auto-auth token from the lookup response
* Output listener details along with configuration
* Add scheme to API address output
* leasecache: use IndexNameLease for prefix lease revocations
* Make CLI accept the fully qualified unix address
* export VAULT_AGENT_ADDR=unix://path/to/socket
* unix:/ to unix://
---
.gitignore | 2 +
api/client.go | 22 +-
api/request.go | 4 +-
api/secret.go | 1 +
command/agent.go | 102 +-
command/agent/cache/api_proxy.go | 61 ++
command/agent/cache/api_proxy_test.go | 43 +
command/agent/cache/cache_test.go | 926 ++++++++++++++++++
command/agent/cache/cachememdb/cache_memdb.go | 265 +++++
.../cache/cachememdb/cache_memdb_test.go | 388 ++++++++
command/agent/cache/cachememdb/index.go | 97 ++
command/agent/cache/handler.go | 155 +++
command/agent/cache/lease_cache.go | 813 +++++++++++++++
command/agent/cache/lease_cache_test.go | 507 ++++++++++
command/agent/cache/listener.go | 105 ++
command/agent/cache/proxy.go | 28 +
command/agent/cache/testing.go | 36 +
command/agent/cache_end_to_end_test.go | 280 ++++++
command/agent/config/config.go | 104 ++
command/agent/config/config_test.go | 74 ++
.../config-cache-embedded-type.hcl | 44 +
.../config/test-fixtures/config-cache.hcl | 41 +
command/agent_test.go | 183 ++++
command/base.go | 13 +
command/server/listener.go | 2 +-
command/server/listener_tcp.go | 10 +-
26 files changed, 4283 insertions(+), 23 deletions(-)
create mode 100644 command/agent/cache/api_proxy.go
create mode 100644 command/agent/cache/api_proxy_test.go
create mode 100644 command/agent/cache/cache_test.go
create mode 100644 command/agent/cache/cachememdb/cache_memdb.go
create mode 100644 command/agent/cache/cachememdb/cache_memdb_test.go
create mode 100644 command/agent/cache/cachememdb/index.go
create mode 100644 command/agent/cache/handler.go
create mode 100644 command/agent/cache/lease_cache.go
create mode 100644 command/agent/cache/lease_cache_test.go
create mode 100644 command/agent/cache/listener.go
create mode 100644 command/agent/cache/proxy.go
create mode 100644 command/agent/cache/testing.go
create mode 100644 command/agent/cache_end_to_end_test.go
create mode 100644 command/agent/config/test-fixtures/config-cache-embedded-type.hcl
create mode 100644 command/agent/config/test-fixtures/config-cache.hcl
diff --git a/.gitignore b/.gitignore
index 62db80cd9b4d..101ddbbf2631 100644
--- a/.gitignore
+++ b/.gitignore
@@ -48,7 +48,9 @@ Vagrantfile
# Configs
*.hcl
!command/agent/config/test-fixtures/config.hcl
+!command/agent/config/test-fixtures/config-cache.hcl
!command/agent/config/test-fixtures/config-embedded-type.hcl
+!command/agent/config/test-fixtures/config-cache-embedded-type.hcl
.DS_Store
.idea
diff --git a/api/client.go b/api/client.go
index 80ccd7d50290..432624dd0379 100644
--- a/api/client.go
+++ b/api/client.go
@@ -25,6 +25,7 @@ import (
"golang.org/x/time/rate"
)
+const EnvVaultAgentAddress = "VAULT_AGENT_ADDR"
const EnvVaultAddress = "VAULT_ADDR"
const EnvVaultCACert = "VAULT_CACERT"
const EnvVaultCAPath = "VAULT_CAPATH"
@@ -237,6 +238,10 @@ func (c *Config) ReadEnvironment() error {
if v := os.Getenv(EnvVaultAddress); v != "" {
envAddress = v
}
+ // Agent's address will take precedence over Vault's address
+ if v := os.Getenv(EnvVaultAgentAddress); v != "" {
+ envAddress = v
+ }
if v := os.Getenv(EnvVaultMaxRetries); v != "" {
maxRetries, err := strconv.ParseUint(v, 10, 32)
if err != nil {
@@ -366,6 +371,21 @@ func NewClient(c *Config) (*Client, error) {
c.modifyLock.Lock()
defer c.modifyLock.Unlock()
+ // If address begins with a `unix://`, treat it as a socket file path and set
+ // the HttpClient's transport to the corresponding socket dialer.
+ if strings.HasPrefix(c.Address, "unix://") {
+ socketFilePath := strings.TrimPrefix(c.Address, "unix://")
+ c.HttpClient = &http.Client{
+ Transport: &http.Transport{
+ DialContext: func(context.Context, string, string) (net.Conn, error) {
+ return net.Dial("unix", socketFilePath)
+ },
+ },
+ }
+ // Set the unix address for URL parsing below
+ c.Address = "http://unix"
+ }
+
u, err := url.Parse(c.Address)
if err != nil {
return nil, err
@@ -707,7 +727,7 @@ func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Respon
redirectCount := 0
START:
- req, err := r.toRetryableHTTP()
+ req, err := r.ToRetryableHTTP()
if err != nil {
return nil, err
}
diff --git a/api/request.go b/api/request.go
index 4efa2aa84177..41d45720fea7 100644
--- a/api/request.go
+++ b/api/request.go
@@ -62,7 +62,7 @@ func (r *Request) ResetJSONBody() error {
// DEPRECATED: ToHTTP turns this request into a valid *http.Request for use
// with the net/http package.
func (r *Request) ToHTTP() (*http.Request, error) {
- req, err := r.toRetryableHTTP()
+ req, err := r.ToRetryableHTTP()
if err != nil {
return nil, err
}
@@ -85,7 +85,7 @@ func (r *Request) ToHTTP() (*http.Request, error) {
return req.Request, nil
}
-func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) {
+func (r *Request) ToRetryableHTTP() (*retryablehttp.Request, error) {
// Encode the query parameters
r.URL.RawQuery = r.Params.Encode()
diff --git a/api/secret.go b/api/secret.go
index e25962604b4e..c8a0ba3d9d2c 100644
--- a/api/secret.go
+++ b/api/secret.go
@@ -292,6 +292,7 @@ type SecretAuth struct {
TokenPolicies []string `json:"token_policies"`
IdentityPolicies []string `json:"identity_policies"`
Metadata map[string]string `json:"metadata"`
+ Orphan bool `json:"orphan"`
LeaseDuration int `json:"lease_duration"`
Renewable bool `json:"renewable"`
diff --git a/command/agent.go b/command/agent.go
index 92c93c70c2e1..4fb8f09f2a31 100644
--- a/command/agent.go
+++ b/command/agent.go
@@ -4,6 +4,10 @@ import (
"context"
"fmt"
"io"
+ "net"
+ "net/http"
+ "time"
+
"os"
"sort"
"strings"
@@ -23,6 +27,7 @@ import (
"github.com/hashicorp/vault/command/agent/auth/gcp"
"github.com/hashicorp/vault/command/agent/auth/jwt"
"github.com/hashicorp/vault/command/agent/auth/kubernetes"
+ "github.com/hashicorp/vault/command/agent/cache"
"github.com/hashicorp/vault/command/agent/config"
"github.com/hashicorp/vault/command/agent/sink"
"github.com/hashicorp/vault/command/agent/sink/file"
@@ -218,19 +223,6 @@ func (c *AgentCommand) Run(args []string) int {
info["cgo"] = "enabled"
}
- // Server configuration output
- padding := 24
- sort.Strings(infoKeys)
- c.UI.Output("==> Vault agent configuration:\n")
- for _, k := range infoKeys {
- c.UI.Output(fmt.Sprintf(
- "%s%s: %s",
- strings.Repeat(" ", padding-len(k)),
- strings.Title(k),
- info[k]))
- }
- c.UI.Output("")
-
// Tests might not want to start a vault server and just want to verify
// the configuration.
if c.flagTestVerifyOnly {
@@ -332,10 +324,92 @@ func (c *AgentCommand) Run(args []string) int {
EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials,
})
- // Start things running
+ // Start auto-auth and sink servers
go ah.Run(ctx, method)
go ss.Run(ctx, ah.OutputCh, sinks)
+ // Parse agent listener configurations
+ if config.Cache != nil && len(config.Cache.Listeners) != 0 {
+ cacheLogger := c.logger.Named("cache")
+
+ // Create the API proxier
+ apiProxy := cache.NewAPIProxy(&cache.APIProxyConfig{
+ Logger: cacheLogger.Named("apiproxy"),
+ })
+
+ // Create the lease cache proxier and set its underlying proxier to
+ // the API proxier.
+ leaseCache, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{
+ BaseContext: ctx,
+ Proxier: apiProxy,
+ Logger: cacheLogger.Named("leasecache"),
+ })
+ if err != nil {
+ c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err))
+ return 1
+ }
+
+ // Create a muxer and add paths relevant for the lease cache layer
+ mux := http.NewServeMux()
+ mux.Handle("/v1/agent/cache-clear", leaseCache.HandleCacheClear(ctx))
+
+ mux.Handle("/", cache.Handler(ctx, cacheLogger, leaseCache, config.Cache.UseAutoAuthToken, c.client))
+
+ var listeners []net.Listener
+ for i, lnConfig := range config.Cache.Listeners {
+ listener, props, _, err := cache.ServerListener(lnConfig, c.logWriter, c.UI)
+ if err != nil {
+ c.UI.Error(fmt.Sprintf("Error parsing listener configuration: %v", err))
+ return 1
+ }
+
+ listeners = append(listeners, listener)
+
+ scheme := "https://"
+ if props["tls"] == "disabled" {
+ scheme = "http://"
+ }
+ if lnConfig.Type == "unix" {
+ scheme = "unix://"
+ }
+
+ infoKey := fmt.Sprintf("api address %d", i+1)
+ info[infoKey] = scheme + listener.Addr().String()
+ infoKeys = append(infoKeys, infoKey)
+
+ cacheLogger.Info("starting listener", "addr", listener.Addr().String())
+ server := &http.Server{
+ Handler: mux,
+ ReadHeaderTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ IdleTimeout: 5 * time.Minute,
+ ErrorLog: cacheLogger.StandardLogger(nil),
+ }
+ go server.Serve(listener)
+ }
+
+ // Ensure that listeners are closed at all the exits
+ listenerCloseFunc := func() {
+ for _, ln := range listeners {
+ ln.Close()
+ }
+ }
+ defer c.cleanupGuard.Do(listenerCloseFunc)
+ }
+
+ // Server configuration output
+ padding := 24
+ sort.Strings(infoKeys)
+ c.UI.Output("==> Vault agent configuration:\n")
+ for _, k := range infoKeys {
+ c.UI.Output(fmt.Sprintf(
+ "%s%s: %s",
+ strings.Repeat(" ", padding-len(k)),
+ strings.Title(k),
+ info[k]))
+ }
+ c.UI.Output("")
+
// Release the log gate.
c.logGate.Flush()
diff --git a/command/agent/cache/api_proxy.go b/command/agent/cache/api_proxy.go
new file mode 100644
index 000000000000..43469a8ce369
--- /dev/null
+++ b/command/agent/cache/api_proxy.go
@@ -0,0 +1,61 @@
+package cache
+
+import (
+ "bytes"
+ "context"
+ "io/ioutil"
+
+ hclog "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/vault/api"
+)
+
+// APIProxy is an implementation of the proxier interface that is used to
+// forward the request to Vault and get the response.
+type APIProxy struct {
+ logger hclog.Logger
+}
+
+type APIProxyConfig struct {
+ Logger hclog.Logger
+}
+
+func NewAPIProxy(config *APIProxyConfig) Proxier {
+ return &APIProxy{
+ logger: config.Logger,
+ }
+}
+
+func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) {
+ client, err := api.NewClient(api.DefaultConfig())
+ if err != nil {
+ return nil, err
+ }
+ client.SetToken(req.Token)
+ client.SetHeaders(req.Request.Header)
+
+ fwReq := client.NewRequest(req.Request.Method, req.Request.URL.Path)
+ fwReq.BodyBytes = req.RequestBody
+
+ // Make the request to Vault and get the response
+ ap.logger.Info("forwarding request", "path", req.Request.URL.Path, "method", req.Request.Method)
+ resp, err := client.RawRequestWithContext(ctx, fwReq)
+ if err != nil {
+ return nil, err
+ }
+
+ // Parse and reset response body
+ respBody, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ ap.logger.Error("failed to read request body", "error", err)
+ return nil, err
+ }
+ if resp.Body != nil {
+ resp.Body.Close()
+ }
+ resp.Body = ioutil.NopCloser(bytes.NewBuffer(respBody))
+
+ return &SendResponse{
+ Response: resp,
+ ResponseBody: respBody,
+ }, nil
+}
diff --git a/command/agent/cache/api_proxy_test.go b/command/agent/cache/api_proxy_test.go
new file mode 100644
index 000000000000..9a68acd36d31
--- /dev/null
+++ b/command/agent/cache/api_proxy_test.go
@@ -0,0 +1,43 @@
+package cache
+
+import (
+ "testing"
+
+ hclog "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/logging"
+ "github.com/hashicorp/vault/helper/namespace"
+)
+
+func TestCache_APIProxy(t *testing.T) {
+ cleanup, client, _, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil)
+ defer cleanup()
+
+ proxier := NewAPIProxy(&APIProxyConfig{
+ Logger: logging.NewVaultLogger(hclog.Trace),
+ })
+
+ r := client.NewRequest("GET", "/v1/sys/health")
+ req, err := r.ToRetryableHTTP()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{
+ Request: req.Request,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var result api.HealthResponse
+ err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !result.Initialized || result.Sealed || result.Standby {
+ t.Fatalf("bad sys/health response")
+ }
+}
diff --git a/command/agent/cache/cache_test.go b/command/agent/cache/cache_test.go
new file mode 100644
index 000000000000..34f6b4b853f7
--- /dev/null
+++ b/command/agent/cache/cache_test.go
@@ -0,0 +1,926 @@
+package cache
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+
+ "github.com/go-test/deep"
+ hclog "github.com/hashicorp/go-hclog"
+ kv "github.com/hashicorp/vault-plugin-secrets-kv"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/credential/userpass"
+ "github.com/hashicorp/vault/helper/logging"
+ "github.com/hashicorp/vault/helper/namespace"
+ vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/vault"
+)
+
+const policyAdmin = `
+path "*" {
+ capabilities = ["sudo", "create", "read", "update", "delete", "list"]
+}
+`
+
+// setupClusterAndAgent is a helper func used to set up a test cluster and
+// caching agent. It returns a cleanup func that should be deferred immediately
+// along with two clients, one for direct cluster communication and another to
+// talk to the caching agent.
+func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) {
+ t.Helper()
+
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ // Handle sane defaults
+ if coreConfig == nil {
+ coreConfig = &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: logging.NewVaultLogger(hclog.Trace),
+ CredentialBackends: map[string]logical.Factory{
+ "userpass": userpass.Factory,
+ },
+ }
+ }
+
+ if coreConfig.CredentialBackends == nil {
+ coreConfig.CredentialBackends = map[string]logical.Factory{
+ "userpass": userpass.Factory,
+ }
+ }
+
+ // Init new test cluster
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+
+ cores := cluster.Cores
+ vault.TestWaitActive(t, cores[0].Core)
+
+ // clusterClient is the client that is used to talk directly to the cluster.
+ clusterClient := cores[0].Client
+
+ // Add an admin policy
+ if err := clusterClient.Sys().PutPolicy("admin", policyAdmin); err != nil {
+ t.Fatal(err)
+ }
+
+ // Set up the userpass auth backend and an admin user. Used for getting a token
+ // for the agent later down in this func.
+ clusterClient.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{
+ Type: "userpass",
+ })
+
+ _, err := clusterClient.Logical().Write("auth/userpass/users/foo", map[string]interface{}{
+ "password": "bar",
+ "policies": []string{"admin"},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Set up env vars for agent consumption
+ origEnvVaultAddress := os.Getenv(api.EnvVaultAddress)
+ os.Setenv(api.EnvVaultAddress, clusterClient.Address())
+
+ origEnvVaultCACert := os.Getenv(api.EnvVaultCACert)
+ os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir))
+
+ cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache")
+
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create the API proxier
+ apiProxy := NewAPIProxy(&APIProxyConfig{
+ Logger: cacheLogger.Named("apiproxy"),
+ })
+
+ // Create the lease cache proxier and set its underlying proxier to
+ // the API proxier.
+ leaseCache, err := NewLeaseCache(&LeaseCacheConfig{
+ BaseContext: ctx,
+ Proxier: apiProxy,
+ Logger: cacheLogger.Named("leasecache"),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a muxer and add paths relevant for the lease cache layer
+ mux := http.NewServeMux()
+ mux.Handle("/v1/agent/cache-clear", leaseCache.HandleCacheClear(ctx))
+
+ mux.Handle("/", Handler(ctx, cacheLogger, leaseCache, false, clusterClient))
+ server := &http.Server{
+ Handler: mux,
+ ReadHeaderTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ IdleTimeout: 5 * time.Minute,
+ ErrorLog: cacheLogger.StandardLogger(nil),
+ }
+ go server.Serve(listener)
+
+ // testClient is the client that is used to talk to the agent for proxying/caching behavior.
+ testClient, err := clusterClient.Clone()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Login via userpass method to derive a managed token. Set that token as the
+ // testClient's token
+ resp, err := testClient.Logical().Write("auth/userpass/login/foo", map[string]interface{}{
+ "password": "bar",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ testClient.SetToken(resp.Auth.ClientToken)
+
+ cleanup := func() {
+ cluster.Cleanup()
+ os.Setenv(api.EnvVaultAddress, origEnvVaultAddress)
+ os.Setenv(api.EnvVaultCACert, origEnvVaultCACert)
+ listener.Close()
+ }
+
+ return cleanup, clusterClient, testClient, leaseCache
+}
+
+func tokenRevocationValidation(t *testing.T, sampleSpace map[string]string, expected map[string]string, leaseCache *LeaseCache) {
+ t.Helper()
+ for val, valType := range sampleSpace {
+ index, err := leaseCache.db.Get(valType, val)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if expected[val] == "" && index != nil {
+ t.Fatalf("failed to evict index from the cache: type: %q, value: %q", valType, val)
+ }
+ if expected[val] != "" && index == nil {
+ t.Fatalf("evicted an undesired index from cache: type: %q, value: %q", valType, val)
+ }
+ }
+}
+
+func TestCache_TokenRevocations_RevokeOrphan(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: hclog.NewNullLogger(),
+ LogicalBackends: map[string]logical.Factory{
+ "kv": vault.LeasedPassthroughBackendFactory,
+ },
+ }
+
+ sampleSpace := make(map[string]string)
+
+ cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig)
+ defer cleanup()
+
+ token1 := testClient.Token()
+ sampleSpace[token1] = "token"
+
+ // Mount the kv backend
+ err := testClient.Sys().Mount("kv", &api.MountInput{
+ Type: "kv",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a secret in the backend
+ _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{
+ "value": "bar",
+ "ttl": "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the secret and create a lease
+ leaseResp, err := testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease1 := leaseResp.LeaseID
+ sampleSpace[lease1] = "lease"
+
+ resp, err := testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token2 := resp.Auth.ClientToken
+ sampleSpace[token2] = "token"
+
+ testClient.SetToken(token2)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease2 := leaseResp.LeaseID
+ sampleSpace[lease2] = "lease"
+
+ resp, err = testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token3 := resp.Auth.ClientToken
+ sampleSpace[token3] = "token"
+
+ testClient.SetToken(token3)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease3 := leaseResp.LeaseID
+ sampleSpace[lease3] = "lease"
+
+ expected := make(map[string]string)
+ for k, v := range sampleSpace {
+ expected[k] = v
+ }
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+
+ // Revoke-orphan the intermediate token. This should result in its own
+ // eviction and evictions of the revoked token's leases. All other things
+ // including the child tokens and leases of the child tokens should be
+ // untouched.
+ testClient.SetToken(token2)
+ err = testClient.Auth().Token().RevokeOrphan(token2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(1 * time.Second)
+
+ expected = map[string]string{
+ token1: "token",
+ lease1: "lease",
+ token3: "token",
+ lease3: "lease",
+ }
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+}
+
+func TestCache_TokenRevocations_LeafLevelToken(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: hclog.NewNullLogger(),
+ LogicalBackends: map[string]logical.Factory{
+ "kv": vault.LeasedPassthroughBackendFactory,
+ },
+ }
+
+ sampleSpace := make(map[string]string)
+
+ cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig)
+ defer cleanup()
+
+ token1 := testClient.Token()
+ sampleSpace[token1] = "token"
+
+ // Mount the kv backend
+ err := testClient.Sys().Mount("kv", &api.MountInput{
+ Type: "kv",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a secret in the backend
+ _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{
+ "value": "bar",
+ "ttl": "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the secret and create a lease
+ leaseResp, err := testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease1 := leaseResp.LeaseID
+ sampleSpace[lease1] = "lease"
+
+ resp, err := testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token2 := resp.Auth.ClientToken
+ sampleSpace[token2] = "token"
+
+ testClient.SetToken(token2)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease2 := leaseResp.LeaseID
+ sampleSpace[lease2] = "lease"
+
+ resp, err = testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token3 := resp.Auth.ClientToken
+ sampleSpace[token3] = "token"
+
+ testClient.SetToken(token3)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease3 := leaseResp.LeaseID
+ sampleSpace[lease3] = "lease"
+
+ expected := make(map[string]string)
+ for k, v := range sampleSpace {
+ expected[k] = v
+ }
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+
+ // Revoke the lef token. This should evict all the leases belonging to this
+ // token, evict entries for all the child tokens and their respective
+ // leases.
+ testClient.SetToken(token3)
+ err = testClient.Auth().Token().RevokeSelf("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(1 * time.Second)
+
+ expected = map[string]string{
+ token1: "token",
+ lease1: "lease",
+ token2: "token",
+ lease2: "lease",
+ }
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+}
+
+func TestCache_TokenRevocations_IntermediateLevelToken(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: hclog.NewNullLogger(),
+ LogicalBackends: map[string]logical.Factory{
+ "kv": vault.LeasedPassthroughBackendFactory,
+ },
+ }
+
+ sampleSpace := make(map[string]string)
+
+ cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig)
+ defer cleanup()
+
+ token1 := testClient.Token()
+ sampleSpace[token1] = "token"
+
+ // Mount the kv backend
+ err := testClient.Sys().Mount("kv", &api.MountInput{
+ Type: "kv",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a secret in the backend
+ _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{
+ "value": "bar",
+ "ttl": "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the secret and create a lease
+ leaseResp, err := testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease1 := leaseResp.LeaseID
+ sampleSpace[lease1] = "lease"
+
+ resp, err := testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token2 := resp.Auth.ClientToken
+ sampleSpace[token2] = "token"
+
+ testClient.SetToken(token2)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease2 := leaseResp.LeaseID
+ sampleSpace[lease2] = "lease"
+
+ resp, err = testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token3 := resp.Auth.ClientToken
+ sampleSpace[token3] = "token"
+
+ testClient.SetToken(token3)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease3 := leaseResp.LeaseID
+ sampleSpace[lease3] = "lease"
+
+ expected := make(map[string]string)
+ for k, v := range sampleSpace {
+ expected[k] = v
+ }
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+
+ // Revoke the second level token. This should evict all the leases
+ // belonging to this token, evict entries for all the child tokens and
+ // their respective leases.
+ testClient.SetToken(token2)
+ err = testClient.Auth().Token().RevokeSelf("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(1 * time.Second)
+
+ expected = map[string]string{
+ token1: "token",
+ lease1: "lease",
+ }
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+}
+
+func TestCache_TokenRevocations_TopLevelToken(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: hclog.NewNullLogger(),
+ LogicalBackends: map[string]logical.Factory{
+ "kv": vault.LeasedPassthroughBackendFactory,
+ },
+ }
+
+ sampleSpace := make(map[string]string)
+
+ cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig)
+ defer cleanup()
+
+ token1 := testClient.Token()
+ sampleSpace[token1] = "token"
+
+ // Mount the kv backend
+ err := testClient.Sys().Mount("kv", &api.MountInput{
+ Type: "kv",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a secret in the backend
+ _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{
+ "value": "bar",
+ "ttl": "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the secret and create a lease
+ leaseResp, err := testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease1 := leaseResp.LeaseID
+ sampleSpace[lease1] = "lease"
+
+ resp, err := testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token2 := resp.Auth.ClientToken
+ sampleSpace[token2] = "token"
+
+ testClient.SetToken(token2)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease2 := leaseResp.LeaseID
+ sampleSpace[lease2] = "lease"
+
+ resp, err = testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token3 := resp.Auth.ClientToken
+ sampleSpace[token3] = "token"
+
+ testClient.SetToken(token3)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease3 := leaseResp.LeaseID
+ sampleSpace[lease3] = "lease"
+
+ expected := make(map[string]string)
+ for k, v := range sampleSpace {
+ expected[k] = v
+ }
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+
+ // Revoke the top level token. This should evict all the leases belonging
+ // to this token, evict entries for all the child tokens and their
+ // respective leases.
+ testClient.SetToken(token1)
+ err = testClient.Auth().Token().RevokeSelf("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(1 * time.Second)
+
+ expected = make(map[string]string)
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+}
+
+func TestCache_TokenRevocations_Shutdown(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: hclog.NewNullLogger(),
+ LogicalBackends: map[string]logical.Factory{
+ "kv": vault.LeasedPassthroughBackendFactory,
+ },
+ }
+
+ sampleSpace := make(map[string]string)
+
+ ctx, rootCancelFunc := context.WithCancel(namespace.RootContext(nil))
+ cleanup, _, testClient, leaseCache := setupClusterAndAgent(ctx, t, coreConfig)
+ defer cleanup()
+
+ token1 := testClient.Token()
+ sampleSpace[token1] = "token"
+
+ // Mount the kv backend
+ err := testClient.Sys().Mount("kv", &api.MountInput{
+ Type: "kv",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a secret in the backend
+ _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{
+ "value": "bar",
+ "ttl": "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the secret and create a lease
+ leaseResp, err := testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease1 := leaseResp.LeaseID
+ sampleSpace[lease1] = "lease"
+
+ resp, err := testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token2 := resp.Auth.ClientToken
+ sampleSpace[token2] = "token"
+
+ testClient.SetToken(token2)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease2 := leaseResp.LeaseID
+ sampleSpace[lease2] = "lease"
+
+ resp, err = testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token3 := resp.Auth.ClientToken
+ sampleSpace[token3] = "token"
+
+ testClient.SetToken(token3)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease3 := leaseResp.LeaseID
+ sampleSpace[lease3] = "lease"
+
+ expected := make(map[string]string)
+ for k, v := range sampleSpace {
+ expected[k] = v
+ }
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+
+ rootCancelFunc()
+ time.Sleep(1 * time.Second)
+
+ // Ensure that all the entries are now gone
+ expected = make(map[string]string)
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+}
+
+func TestCache_TokenRevocations_BaseContextCancellation(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: hclog.NewNullLogger(),
+ LogicalBackends: map[string]logical.Factory{
+ "kv": vault.LeasedPassthroughBackendFactory,
+ },
+ }
+
+ sampleSpace := make(map[string]string)
+
+ cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig)
+ defer cleanup()
+
+ token1 := testClient.Token()
+ sampleSpace[token1] = "token"
+
+ // Mount the kv backend
+ err := testClient.Sys().Mount("kv", &api.MountInput{
+ Type: "kv",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a secret in the backend
+ _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{
+ "value": "bar",
+ "ttl": "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the secret and create a lease
+ leaseResp, err := testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease1 := leaseResp.LeaseID
+ sampleSpace[lease1] = "lease"
+
+ resp, err := testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token2 := resp.Auth.ClientToken
+ sampleSpace[token2] = "token"
+
+ testClient.SetToken(token2)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease2 := leaseResp.LeaseID
+ sampleSpace[lease2] = "lease"
+
+ resp, err = testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token3 := resp.Auth.ClientToken
+ sampleSpace[token3] = "token"
+
+ testClient.SetToken(token3)
+
+ leaseResp, err = testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lease3 := leaseResp.LeaseID
+ sampleSpace[lease3] = "lease"
+
+ expected := make(map[string]string)
+ for k, v := range sampleSpace {
+ expected[k] = v
+ }
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+
+ // Cancel the base context of the lease cache. This should trigger
+ // evictions of all the entries from the cache.
+ leaseCache.baseCtxInfo.CancelFunc()
+ time.Sleep(1 * time.Second)
+
+ // Ensure that all the entries are now gone
+ expected = make(map[string]string)
+ tokenRevocationValidation(t, sampleSpace, expected, leaseCache)
+}
+
+func TestCache_NonCacheable(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: hclog.NewNullLogger(),
+ LogicalBackends: map[string]logical.Factory{
+ "kv": kv.Factory,
+ },
+ }
+
+ cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig)
+ defer cleanup()
+
+ // Query mounts first
+ origMounts, err := testClient.Sys().ListMounts()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Mount a kv backend
+ if err := testClient.Sys().Mount("kv", &api.MountInput{
+ Type: "kv",
+ Options: map[string]string{
+ "version": "2",
+ },
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Query mounts again
+ newMounts, err := testClient.Sys().ListMounts()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if diff := deep.Equal(origMounts, newMounts); diff == nil {
+ t.Logf("response #1: %#v", origMounts)
+ t.Logf("response #2: %#v", newMounts)
+ t.Fatal("expected requests to be not cached")
+ }
+}
+
+func TestCache_AuthResponse(t *testing.T) {
+ cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil)
+ defer cleanup()
+
+ resp, err := testClient.Logical().Write("auth/token/create", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ token := resp.Auth.ClientToken
+ testClient.SetToken(token)
+
+ authTokeCreateReq := func(t *testing.T, policies map[string]interface{}) *api.Secret {
+ resp, err := testClient.Logical().Write("auth/token/create", policies)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Auth == nil || resp.Auth.ClientToken == "" {
+ t.Fatalf("expected a valid client token in the response, got = %#v", resp)
+ }
+
+ return resp
+ }
+
+ // Test on auth response by creating a child token
+ {
+ proxiedResp := authTokeCreateReq(t, map[string]interface{}{
+ "policies": "default",
+ })
+
+ cachedResp := authTokeCreateReq(t, map[string]interface{}{
+ "policies": "default",
+ })
+
+ if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil {
+ t.Fatal(diff)
+ }
+ }
+
+ // Test on *non-renewable* auth response by creating a child root token
+ {
+ proxiedResp := authTokeCreateReq(t, nil)
+
+ cachedResp := authTokeCreateReq(t, nil)
+
+ if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil {
+ t.Fatal(diff)
+ }
+ }
+}
+
+func TestCache_LeaseResponse(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: hclog.NewNullLogger(),
+ LogicalBackends: map[string]logical.Factory{
+ "kv": vault.LeasedPassthroughBackendFactory,
+ },
+ }
+
+ cleanup, client, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig)
+ defer cleanup()
+
+ err := client.Sys().Mount("kv", &api.MountInput{
+ Type: "kv",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test proxy by issuing two different requests
+ {
+ // Write data to the lease-kv backend
+ _, err := testClient.Logical().Write("kv/foo", map[string]interface{}{
+ "value": "bar",
+ "ttl": "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = testClient.Logical().Write("kv/foobar", map[string]interface{}{
+ "value": "bar",
+ "ttl": "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ firstResp, err := testClient.Logical().Read("kv/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ secondResp, err := testClient.Logical().Read("kv/foobar")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if diff := deep.Equal(firstResp, secondResp); diff == nil {
+ t.Logf("response: %#v", firstResp)
+ t.Fatal("expected proxied responses, got cached response on second request")
+ }
+ }
+
+ // Test caching behavior by issue the same request twice
+ {
+ _, err := testClient.Logical().Write("kv/baz", map[string]interface{}{
+ "value": "foo",
+ "ttl": "1h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ proxiedResp, err := testClient.Logical().Read("kv/baz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cachedResp, err := testClient.Logical().Read("kv/baz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if diff := deep.Equal(proxiedResp, cachedResp); diff != nil {
+ t.Fatal(diff)
+ }
+ }
+}
diff --git a/command/agent/cache/cachememdb/cache_memdb.go b/command/agent/cache/cachememdb/cache_memdb.go
new file mode 100644
index 000000000000..8f9aabfdd295
--- /dev/null
+++ b/command/agent/cache/cachememdb/cache_memdb.go
@@ -0,0 +1,265 @@
+package cachememdb
+
+import (
+ "errors"
+ "fmt"
+
+ memdb "github.com/hashicorp/go-memdb"
+)
+
+const (
+ tableNameIndexer = "indexer"
+)
+
+// CacheMemDB is the underlying cache database for storing indexes.
+type CacheMemDB struct {
+ db *memdb.MemDB
+}
+
+// New creates a new instance of CacheMemDB.
+func New() (*CacheMemDB, error) {
+ db, err := newDB()
+ if err != nil {
+ return nil, err
+ }
+
+ return &CacheMemDB{
+ db: db,
+ }, nil
+}
+
+func newDB() (*memdb.MemDB, error) {
+ cacheSchema := &memdb.DBSchema{
+ Tables: map[string]*memdb.TableSchema{
+ tableNameIndexer: &memdb.TableSchema{
+ Name: tableNameIndexer,
+ Indexes: map[string]*memdb.IndexSchema{
+ // This index enables fetching the cached item based on the
+ // identifier of the index.
+ IndexNameID: &memdb.IndexSchema{
+ Name: IndexNameID,
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{
+ Field: "ID",
+ },
+ },
+ // This index enables fetching all the entries in cache for
+ // a given request path, in a given namespace.
+ IndexNameRequestPath: &memdb.IndexSchema{
+ Name: IndexNameRequestPath,
+ Unique: false,
+ Indexer: &memdb.CompoundIndex{
+ Indexes: []memdb.Indexer{
+ &memdb.StringFieldIndex{
+ Field: "Namespace",
+ },
+ &memdb.StringFieldIndex{
+ Field: "RequestPath",
+ },
+ },
+ },
+ },
+ // This index enables fetching all the entries in cache
+ // belonging to the leases of a given token.
+ IndexNameLeaseToken: &memdb.IndexSchema{
+ Name: IndexNameLeaseToken,
+ Unique: false,
+ AllowMissing: true,
+ Indexer: &memdb.StringFieldIndex{
+ Field: "LeaseToken",
+ },
+ },
+ // This index enables fetching all the entries in cache
+ // that are tied to the given token, regardless of the
+ // entries belonging to the token or belonging to the
+ // lease.
+ IndexNameToken: &memdb.IndexSchema{
+ Name: IndexNameToken,
+ Unique: true,
+ AllowMissing: true,
+ Indexer: &memdb.StringFieldIndex{
+ Field: "Token",
+ },
+ },
+ // This index enables fetching all the entries in cache for
+ // the given parent token.
+ IndexNameTokenParent: &memdb.IndexSchema{
+ Name: IndexNameTokenParent,
+ Unique: false,
+ AllowMissing: true,
+ Indexer: &memdb.StringFieldIndex{
+ Field: "TokenParent",
+ },
+ },
+ // This index enables fetching all the entries in cache for
+ // the given accessor.
+ IndexNameTokenAccessor: &memdb.IndexSchema{
+ Name: IndexNameTokenAccessor,
+ Unique: true,
+ AllowMissing: true,
+ Indexer: &memdb.StringFieldIndex{
+ Field: "TokenAccessor",
+ },
+ },
+ // This index enables fetching all the entries in cache for
+ // the given lease identifier.
+ IndexNameLease: &memdb.IndexSchema{
+ Name: IndexNameLease,
+ Unique: true,
+ AllowMissing: true,
+ Indexer: &memdb.StringFieldIndex{
+ Field: "Lease",
+ },
+ },
+ },
+ },
+ },
+ }
+
+ db, err := memdb.NewMemDB(cacheSchema)
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
+
+// Get returns the index based on the indexer and the index values provided.
+func (c *CacheMemDB) Get(indexName string, indexValues ...interface{}) (*Index, error) {
+ if !validIndexName(indexName) {
+ return nil, fmt.Errorf("invalid index name %q", indexName)
+ }
+
+ raw, err := c.db.Txn(false).First(tableNameIndexer, indexName, indexValues...)
+ if err != nil {
+ return nil, err
+ }
+
+ if raw == nil {
+ return nil, nil
+ }
+
+ index, ok := raw.(*Index)
+ if !ok {
+ return nil, errors.New("unable to parse index value from the cache")
+ }
+
+ return index, nil
+}
+
+// Set stores the index into the cache.
+func (c *CacheMemDB) Set(index *Index) error {
+ if index == nil {
+ return errors.New("nil index provided")
+ }
+
+ txn := c.db.Txn(true)
+ defer txn.Abort()
+
+ if err := txn.Insert(tableNameIndexer, index); err != nil {
+ return fmt.Errorf("unable to insert index into cache: %v", err)
+ }
+
+ txn.Commit()
+
+ return nil
+}
+
+// GetByPrefix returns all the cached indexes based on the index name and the
+// value prefix.
+func (c *CacheMemDB) GetByPrefix(indexName string, indexValues ...interface{}) ([]*Index, error) {
+ if !validIndexName(indexName) {
+ return nil, fmt.Errorf("invalid index name %q", indexName)
+ }
+
+ indexName = indexName + "_prefix"
+
+ // Get all the objects
+ iter, err := c.db.Txn(false).Get(tableNameIndexer, indexName, indexValues...)
+ if err != nil {
+ return nil, err
+ }
+
+ var indexes []*Index
+ for {
+ obj := iter.Next()
+ if obj == nil {
+ break
+ }
+ index, ok := obj.(*Index)
+ if !ok {
+ return nil, fmt.Errorf("failed to cast cached index")
+ }
+
+ indexes = append(indexes, index)
+ }
+
+ return indexes, nil
+}
+
+// Evict removes an index from the cache based on index name and value.
+func (c *CacheMemDB) Evict(indexName string, indexValues ...interface{}) error {
+ index, err := c.Get(indexName, indexValues...)
+ if err != nil {
+ return fmt.Errorf("unable to fetch index on cache deletion: %v", err)
+ }
+
+ if index == nil {
+ return nil
+ }
+
+ txn := c.db.Txn(true)
+ defer txn.Abort()
+
+ if err := txn.Delete(tableNameIndexer, index); err != nil {
+ return fmt.Errorf("unable to delete index from cache: %v", err)
+ }
+
+ txn.Commit()
+
+ return nil
+}
+
+// EvictAll removes all matching indexes from the cache based on index name and value.
+func (c *CacheMemDB) EvictAll(indexName, indexValue string) error {
+ return c.batchEvict(false, indexName, indexValue)
+}
+
+// EvictByPrefix removes all matching prefix indexes from the cache based on index name and prefix.
+func (c *CacheMemDB) EvictByPrefix(indexName, indexPrefix string) error {
+ return c.batchEvict(true, indexName, indexPrefix)
+}
+
+// batchEvict is a helper that supports eviction based on absolute and prefixed index values.
+func (c *CacheMemDB) batchEvict(isPrefix bool, indexName string, indexValues ...interface{}) error {
+ if !validIndexName(indexName) {
+ return fmt.Errorf("invalid index name %q", indexName)
+ }
+
+ if isPrefix {
+ indexName = indexName + "_prefix"
+ }
+
+ txn := c.db.Txn(true)
+ defer txn.Abort()
+
+ _, err := txn.DeleteAll(tableNameIndexer, indexName, indexValues...)
+ if err != nil {
+ return err
+ }
+
+ txn.Commit()
+
+ return nil
+}
+
+// Flush resets the underlying cache object.
+func (c *CacheMemDB) Flush() error {
+ newDB, err := newDB()
+ if err != nil {
+ return err
+ }
+
+ c.db = newDB
+
+ return nil
+}
diff --git a/command/agent/cache/cachememdb/cache_memdb_test.go b/command/agent/cache/cachememdb/cache_memdb_test.go
new file mode 100644
index 000000000000..a8af42f5356f
--- /dev/null
+++ b/command/agent/cache/cachememdb/cache_memdb_test.go
@@ -0,0 +1,388 @@
+package cachememdb
+
+import (
+ "context"
+ "testing"
+
+ "github.com/go-test/deep"
+)
+
+func testContextInfo() *ContextInfo {
+ ctx, cancelFunc := context.WithCancel(context.Background())
+
+ return &ContextInfo{
+ Ctx: ctx,
+ CancelFunc: cancelFunc,
+ }
+}
+
+func TestNew(t *testing.T) {
+ _, err := New()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestCacheMemDB_Get(t *testing.T) {
+ cache, err := New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test invalid index name
+ _, err = cache.Get("foo", "bar")
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ // Test on empty cache
+ index, err := cache.Get(IndexNameID, "foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if index != nil {
+ t.Fatalf("expected nil index, got: %v", index)
+ }
+
+ // Populate cache
+ in := &Index{
+ ID: "test_id",
+ Namespace: "test_ns/",
+ RequestPath: "/v1/request/path",
+ Token: "test_token",
+ TokenAccessor: "test_accessor",
+ Lease: "test_lease",
+ Response: []byte("hello world"),
+ }
+
+ if err := cache.Set(in); err != nil {
+ t.Fatal(err)
+ }
+
+ testCases := []struct {
+ name string
+ indexName string
+ indexValues []interface{}
+ }{
+ {
+ "by_index_id",
+ "id",
+ []interface{}{in.ID},
+ },
+ {
+ "by_request_path",
+ "request_path",
+ []interface{}{in.Namespace, in.RequestPath},
+ },
+ {
+ "by_lease",
+ "lease",
+ []interface{}{in.Lease},
+ },
+ {
+ "by_token",
+ "token",
+ []interface{}{in.Token},
+ },
+ {
+ "by_token_accessor",
+ "token_accessor",
+ []interface{}{in.TokenAccessor},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ out, err := cache.Get(tc.indexName, tc.indexValues...)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := deep.Equal(in, out); diff != nil {
+ t.Fatal(diff)
+ }
+ })
+ }
+}
+
+func TestCacheMemDB_GetByPrefix(t *testing.T) {
+ cache, err := New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test invalid index name
+ _, err = cache.GetByPrefix("foo", "bar", "baz")
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ // Test on empty cache
+ index, err := cache.GetByPrefix(IndexNameRequestPath, "foo", "bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if index != nil {
+ t.Fatalf("expected nil index, got: %v", index)
+ }
+
+ // Populate cache
+ in := &Index{
+ ID: "test_id",
+ Namespace: "test_ns/",
+ RequestPath: "/v1/request/path/1",
+ Token: "test_token",
+ TokenAccessor: "test_accessor",
+ Lease: "path/to/test_lease/1",
+ Response: []byte("hello world"),
+ }
+
+ if err := cache.Set(in); err != nil {
+ t.Fatal(err)
+ }
+
+ // Populate cache
+ in2 := &Index{
+ ID: "test_id_2",
+ Namespace: "test_ns/",
+ RequestPath: "/v1/request/path/2",
+ Token: "test_token",
+ TokenAccessor: "test_accessor",
+ Lease: "path/to/test_lease/2",
+ Response: []byte("hello world"),
+ }
+
+ if err := cache.Set(in2); err != nil {
+ t.Fatal(err)
+ }
+
+ testCases := []struct {
+ name string
+ indexName string
+ indexValues []interface{}
+ }{
+ {
+ "by_request_path",
+ "request_path",
+ []interface{}{"test_ns/", "/v1/request/path"},
+ },
+ {
+ "by_lease",
+ "lease",
+ []interface{}{"path/to/test_lease"},
+ },
+ {
+ "by_token",
+ "token",
+ []interface{}{"test_token"},
+ },
+ {
+ "by_token_accessor",
+ "token_accessor",
+ []interface{}{"test_accessor"},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ out, err := cache.GetByPrefix(tc.indexName, tc.indexValues...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if diff := deep.Equal([]*Index{in, in2}, out); diff != nil {
+ t.Fatal(diff)
+ }
+ })
+ }
+}
+
+func TestCacheMemDB_Set(t *testing.T) {
+ cache, err := New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testCases := []struct {
+ name string
+ index *Index
+ wantErr bool
+ }{
+ {
+ "nil",
+ nil,
+ true,
+ },
+ {
+ "empty_fields",
+ &Index{},
+ true,
+ },
+ {
+ "missing_required_fields",
+ &Index{
+ Lease: "foo",
+ },
+ true,
+ },
+ {
+ "all_fields",
+ &Index{
+ ID: "test_id",
+ Namespace: "test_ns/",
+ RequestPath: "/v1/request/path",
+ Token: "test_token",
+ TokenAccessor: "test_accessor",
+ Lease: "test_lease",
+ RenewCtxInfo: testContextInfo(),
+ },
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ if err := cache.Set(tc.index); (err != nil) != tc.wantErr {
+ t.Fatalf("CacheMemDB.Set() error = %v, wantErr = %v", err, tc.wantErr)
+ }
+ })
+ }
+}
+
+func TestCacheMemDB_Evict(t *testing.T) {
+ cache, err := New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test on empty cache
+ if err := cache.Evict(IndexNameID, "foo"); err != nil {
+ t.Fatal(err)
+ }
+
+ testIndex := &Index{
+ ID: "test_id",
+ Namespace: "test_ns/",
+ RequestPath: "/v1/request/path",
+ Token: "test_token",
+ TokenAccessor: "test_token_accessor",
+ Lease: "test_lease",
+ RenewCtxInfo: testContextInfo(),
+ }
+
+ testCases := []struct {
+ name string
+ indexName string
+ indexValues []interface{}
+ insertIndex *Index
+ wantErr bool
+ }{
+ {
+ "empty_params",
+ "",
+ []interface{}{""},
+ nil,
+ true,
+ },
+ {
+ "invalid_params",
+ "foo",
+ []interface{}{"bar"},
+ nil,
+ true,
+ },
+ {
+ "by_id",
+ "id",
+ []interface{}{"test_id"},
+ testIndex,
+ false,
+ },
+ {
+ "by_request_path",
+ "request_path",
+ []interface{}{"test_ns/", "/v1/request/path"},
+ testIndex,
+ false,
+ },
+ {
+ "by_token",
+ "token",
+ []interface{}{"test_token"},
+ testIndex,
+ false,
+ },
+ {
+ "by_token_accessor",
+ "token_accessor",
+ []interface{}{"test_accessor"},
+ testIndex,
+ false,
+ },
+ {
+ "by_lease",
+ "lease",
+ []interface{}{"test_lease"},
+ testIndex,
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ if tc.insertIndex != nil {
+ if err := cache.Set(tc.insertIndex); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if err := cache.Evict(tc.indexName, tc.indexValues...); (err != nil) != tc.wantErr {
+ t.Fatal(err)
+ }
+
+ // Verify that the cache doesn't contain the entry any more
+ index, err := cache.Get(tc.indexName, tc.indexValues...)
+ if (err != nil) != tc.wantErr {
+ t.Fatal(err)
+ }
+
+ if index != nil {
+ t.Fatalf("expected nil entry, got = %#v", index)
+ }
+ })
+ }
+}
+
+func TestCacheMemDB_Flush(t *testing.T) {
+ cache, err := New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Populate cache
+ in := &Index{
+ ID: "test_id",
+ Token: "test_token",
+ Lease: "test_lease",
+ Namespace: "test_ns/",
+ RequestPath: "/v1/request/path",
+ Response: []byte("hello world"),
+ }
+
+ if err := cache.Set(in); err != nil {
+ t.Fatal(err)
+ }
+
+ // Reset the cache
+ if err := cache.Flush(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check the cache doesn't contain inserted index
+ out, err := cache.Get(IndexNameID, "test_id")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if out != nil {
+ t.Fatalf("expected cache to be empty, got = %v", out)
+ }
+}
diff --git a/command/agent/cache/cachememdb/index.go b/command/agent/cache/cachememdb/index.go
new file mode 100644
index 000000000000..4d932ca4f2fd
--- /dev/null
+++ b/command/agent/cache/cachememdb/index.go
@@ -0,0 +1,97 @@
+package cachememdb
+
+import "context"
+
+type ContextInfo struct {
+ Ctx context.Context
+ CancelFunc context.CancelFunc
+ DoneCh chan struct{}
+}
+
+// Index holds the response to be cached along with multiple other values that
+// serve as pointers to refer back to this index.
+type Index struct {
+ // ID is a value that uniquely represents the request held by this
+ // index. This is computed by serializing and hashing the response object.
+ // Required: true, Unique: true
+ ID string
+
+ // Token is the token that fetched the response held by this index
+ // Required: true, Unique: true
+ Token string
+
+ // TokenParent is the parent token of the token held by this index
+ // Required: false, Unique: false
+ TokenParent string
+
+ // TokenAccessor is the accessor of the token being cached in this index
+ // Required: true, Unique: true
+ TokenAccessor string
+
+ // Namespace is the namespace that was provided in the request path as the
+ // Vault namespace to query
+ Namespace string
+
+ // RequestPath is the path of the request that resulted in the response
+ // held by this index.
+ // Required: true, Unique: false
+ RequestPath string
+
+ // Lease is the identifier of the lease in Vault, that belongs to the
+ // response held by this index.
+ // Required: false, Unique: true
+ Lease string
+
+ // LeaseToken is the identifier of the token that created the lease held by
+ // this index.
+ // Required: false, Unique: false
+ LeaseToken string
+
+ // Response is the serialized response object that the agent is caching.
+ Response []byte
+
+ // RenewCtxInfo holds the context and the corresponding cancel func for the
+ // goroutine that manages the renewal of the secret belonging to the
+ // response in this index.
+ RenewCtxInfo *ContextInfo
+}
+
+type IndexName uint32
+
+const (
+ // IndexNameID is the ID of the index constructed from the serialized request.
+ IndexNameID = "id"
+
+ // IndexNameLease is the lease of the index.
+ IndexNameLease = "lease"
+
+ // IndexNameRequestPath is the request path of the index.
+ IndexNameRequestPath = "request_path"
+
+ // IndexNameToken is the token of the index.
+ IndexNameToken = "token"
+
+ // IndexNameTokenAccessor is the token accessor of the index.
+ IndexNameTokenAccessor = "token_accessor"
+
+ // IndexNameTokenParent is the token parent of the index.
+ IndexNameTokenParent = "token_parent"
+
+ // IndexNameLeaseToken is the token that created the lease.
+ IndexNameLeaseToken = "lease_token"
+)
+
+func validIndexName(indexName string) bool {
+ switch indexName {
+ case "id":
+ case "lease":
+ case "request_path":
+ case "token":
+ case "token_accessor":
+ case "token_parent":
+ case "lease_token":
+ default:
+ return false
+ }
+ return true
+}
diff --git a/command/agent/cache/handler.go b/command/agent/cache/handler.go
new file mode 100644
index 000000000000..10c36c7dd22b
--- /dev/null
+++ b/command/agent/cache/handler.go
@@ -0,0 +1,155 @@
+package cache
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/hashicorp/errwrap"
+ hclog "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/consts"
+ vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+)
+
+func Handler(ctx context.Context, logger hclog.Logger, proxier Proxier, useAutoAuthToken bool, client *api.Client) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ logger.Info("received request", "path", r.URL.Path, "method", r.Method)
+
+ token := r.Header.Get(consts.AuthHeaderName)
+ if token == "" && useAutoAuthToken {
+ logger.Debug("using auto auth token")
+ token = client.Token()
+ }
+
+ // Parse and reset body.
+ reqBody, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ logger.Error("failed to read request body")
+ respondError(w, http.StatusInternalServerError, errors.New("failed to read request body"))
+ }
+ if r.Body != nil {
+ r.Body.Close()
+ }
+ r.Body = ioutil.NopCloser(bytes.NewBuffer(reqBody))
+ req := &SendRequest{
+ Token: token,
+ Request: r,
+ RequestBody: reqBody,
+ }
+
+ resp, err := proxier.Send(ctx, req)
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, errwrap.Wrapf("failed to get the response: {{err}}", err))
+ return
+ }
+
+ err = processTokenLookupResponse(ctx, logger, useAutoAuthToken, client, req, resp)
+ if err != nil {
+ respondError(w, http.StatusInternalServerError, errwrap.Wrapf("failed to process token lookup response: {{err}}", err))
+ return
+ }
+
+ defer resp.Response.Body.Close()
+
+ copyHeader(w.Header(), resp.Response.Header)
+ w.WriteHeader(resp.Response.StatusCode)
+ io.Copy(w, resp.Response.Body)
+ return
+ })
+}
+
+// processTokenLookupResponse checks if the request was one of token
+// lookup-self. If the auto-auth token was used to perform lookup-self, the
+// identifier of the token and its accessor same will be stripped off of the
+// response.
+func processTokenLookupResponse(ctx context.Context, logger hclog.Logger, useAutoAuthToken bool, client *api.Client, req *SendRequest, resp *SendResponse) error {
+ // If auto-auth token is not being used, there is nothing to do.
+ if !useAutoAuthToken {
+ return nil
+ }
+
+ // If lookup responded with non 200 status, there is nothing to do.
+ if resp.Response.StatusCode != http.StatusOK {
+ return nil
+ }
+
+ // Strip-off namespace related information from the request and get the
+ // relative path of the request.
+ _, path := deriveNamespaceAndRevocationPath(req)
+ if path == vaultPathTokenLookupSelf {
+ logger.Info("stripping auto-auth token from the response", "path", req.Request.URL.Path, "method", req.Request.Method)
+ secret, err := api.ParseSecret(bytes.NewBuffer(resp.ResponseBody))
+ if err != nil {
+ return fmt.Errorf("failed to parse token lookup response: %v", err)
+ }
+ if secret != nil && secret.Data != nil && secret.Data["id"] != nil {
+ token, ok := secret.Data["id"].(string)
+ if !ok {
+ return fmt.Errorf("failed to type assert the token id in the response")
+ }
+ if token == client.Token() {
+ delete(secret.Data, "id")
+ delete(secret.Data, "accessor")
+ }
+
+ bodyBytes, err := json.Marshal(secret)
+ if err != nil {
+ return err
+ }
+ if resp.Response.Body != nil {
+ resp.Response.Body.Close()
+ }
+ resp.Response.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
+ resp.Response.ContentLength = int64(len(bodyBytes))
+
+ // Serialize and re-read the reponse
+ var respBytes bytes.Buffer
+ err = resp.Response.Write(&respBytes)
+ if err != nil {
+ return fmt.Errorf("failed to serialize the updated response: %v", err)
+ }
+
+ updatedResponse, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(respBytes.Bytes())), nil)
+ if err != nil {
+ return fmt.Errorf("failed to deserialize the updated response: %v", err)
+ }
+
+ resp.Response = &api.Response{
+ Response: updatedResponse,
+ }
+ resp.ResponseBody = bodyBytes
+ }
+ }
+ return nil
+}
+
+func copyHeader(dst, src http.Header) {
+ for k, vv := range src {
+ for _, v := range vv {
+ dst.Add(k, v)
+ }
+ }
+}
+
+func respondError(w http.ResponseWriter, status int, err error) {
+ logical.AdjustErrorStatusCode(&status, err)
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(status)
+
+ resp := &vaulthttp.ErrorResponse{Errors: make([]string, 0, 1)}
+ if err != nil {
+ resp.Errors = append(resp.Errors, err.Error())
+ }
+
+ enc := json.NewEncoder(w)
+ enc.Encode(resp)
+}
diff --git a/command/agent/cache/lease_cache.go b/command/agent/cache/lease_cache.go
new file mode 100644
index 000000000000..a998ec96fb51
--- /dev/null
+++ b/command/agent/cache/lease_cache.go
@@ -0,0 +1,813 @@
+package cache
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/hashicorp/errwrap"
+ hclog "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/vault/api"
+ cachememdb "github.com/hashicorp/vault/command/agent/cache/cachememdb"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/namespace"
+ nshelper "github.com/hashicorp/vault/helper/namespace"
+)
+
+const (
+ vaultPathTokenCreate = "/v1/auth/token/create"
+ vaultPathTokenRevoke = "/v1/auth/token/revoke"
+ vaultPathTokenRevokeSelf = "/v1/auth/token/revoke-self"
+ vaultPathTokenRevokeAccessor = "/v1/auth/token/revoke-accessor"
+ vaultPathTokenRevokeOrphan = "/v1/auth/token/revoke-orphan"
+ vaultPathTokenLookupSelf = "/v1/auth/token/lookup-self"
+ vaultPathLeaseRevoke = "/v1/sys/leases/revoke"
+ vaultPathLeaseRevokeForce = "/v1/sys/leases/revoke-force"
+ vaultPathLeaseRevokePrefix = "/v1/sys/leases/revoke-prefix"
+)
+
+var (
+ contextIndexID = contextIndex{}
+ errInvalidType = errors.New("invalid type provided")
+ revocationPaths = []string{
+ strings.TrimPrefix(vaultPathTokenRevoke, "/v1"),
+ strings.TrimPrefix(vaultPathTokenRevokeSelf, "/v1"),
+ strings.TrimPrefix(vaultPathTokenRevokeAccessor, "/v1"),
+ strings.TrimPrefix(vaultPathTokenRevokeOrphan, "/v1"),
+ strings.TrimPrefix(vaultPathLeaseRevoke, "/v1"),
+ strings.TrimPrefix(vaultPathLeaseRevokeForce, "/v1"),
+ strings.TrimPrefix(vaultPathLeaseRevokePrefix, "/v1"),
+ }
+)
+
+type contextIndex struct{}
+
+type cacheClearRequest struct {
+ Type string `json:"type"`
+ Value string `json:"value"`
+ Namespace string `json:"namespace"`
+}
+
+// LeaseCache is an implementation of Proxier that handles
+// the caching of responses. It passes the incoming request
+// to an underlying Proxier implementation.
+type LeaseCache struct {
+ proxier Proxier
+ logger hclog.Logger
+ db *cachememdb.CacheMemDB
+ baseCtxInfo *ContextInfo
+}
+
+// LeaseCacheConfig is the configuration for initializing a new
+// Lease.
+type LeaseCacheConfig struct {
+ BaseContext context.Context
+ Proxier Proxier
+ Logger hclog.Logger
+}
+
+// ContextInfo holds a derived context and cancelFunc pair.
+type ContextInfo struct {
+ Ctx context.Context
+ CancelFunc context.CancelFunc
+ DoneCh chan struct{}
+}
+
+// NewLeaseCache creates a new instance of a LeaseCache.
+func NewLeaseCache(conf *LeaseCacheConfig) (*LeaseCache, error) {
+ if conf == nil {
+ return nil, errors.New("nil configuration provided")
+ }
+
+ if conf.Proxier == nil || conf.Logger == nil {
+ return nil, fmt.Errorf("missing configuration required params: %v", conf)
+ }
+
+ db, err := cachememdb.New()
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a base context for the lease cache layer
+ baseCtx, baseCancelFunc := context.WithCancel(conf.BaseContext)
+ baseCtxInfo := &ContextInfo{
+ Ctx: baseCtx,
+ CancelFunc: baseCancelFunc,
+ }
+
+ return &LeaseCache{
+ proxier: conf.Proxier,
+ logger: conf.Logger,
+ db: db,
+ baseCtxInfo: baseCtxInfo,
+ }, nil
+}
+
+// Send performs a cache lookup on the incoming request. If it's a cache hit,
+// it will return the cached response, otherwise it will delegate to the
+// underlying Proxier and cache the received response.
+func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) {
+ // Compute the index ID
+ id, err := computeIndexID(req)
+ if err != nil {
+ c.logger.Error("failed to compute cache key", "error", err)
+ return nil, err
+ }
+
+ // Check if the response for this request is already in the cache
+ index, err := c.db.Get(cachememdb.IndexNameID, id)
+ if err != nil {
+ return nil, err
+ }
+
+ // Cached request is found, deserialize the response and return early
+ if index != nil {
+ c.logger.Debug("returning cached response", "path", req.Request.URL.Path)
+
+ reader := bufio.NewReader(bytes.NewReader(index.Response))
+ resp, err := http.ReadResponse(reader, nil)
+ if err != nil {
+ c.logger.Error("failed to deserialize response", "error", err)
+ return nil, err
+ }
+
+ return &SendResponse{
+ Response: &api.Response{
+ Response: resp,
+ },
+ ResponseBody: index.Response,
+ }, nil
+ }
+
+ c.logger.Debug("forwarding request", "path", req.Request.URL.Path, "method", req.Request.Method)
+
+ // Pass the request down and get a response
+ resp, err := c.proxier.Send(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the namespace from the request header
+ namespace := req.Request.Header.Get(consts.NamespaceHeaderName)
+ // We need to populate an empty value since go-memdb will skip over indexes
+ // that contain empty values.
+ if namespace == "" {
+ namespace = "root/"
+ }
+
+ // Build the index to cache based on the response received
+ index = &cachememdb.Index{
+ ID: id,
+ Namespace: namespace,
+ RequestPath: req.Request.URL.Path,
+ }
+
+ secret, err := api.ParseSecret(bytes.NewBuffer(resp.ResponseBody))
+ if err != nil {
+ c.logger.Error("failed to parse response as secret", "error", err)
+ return nil, err
+ }
+
+ isRevocation, err := c.handleRevocationRequest(ctx, req, resp)
+ if err != nil {
+ c.logger.Error("failed to process the response", "error", err)
+ return nil, err
+ }
+
+ // If this is a revocation request, do not go through cache logic.
+ if isRevocation {
+ return resp, nil
+ }
+
+ // Fast path for responses with no secrets
+ if secret == nil {
+ c.logger.Debug("pass-through response; no secret in response", "path", req.Request.URL.Path, "method", req.Request.Method)
+ return resp, nil
+ }
+
+ // Short-circuit if the secret is not renewable
+ tokenRenewable, err := secret.TokenIsRenewable()
+ if err != nil {
+ c.logger.Error("failed to parse renewable param", "error", err)
+ return nil, err
+ }
+ if !secret.Renewable && !tokenRenewable {
+ c.logger.Debug("pass-through response; secret not renewable", "path", req.Request.URL.Path, "method", req.Request.Method)
+ return resp, nil
+ }
+
+ var renewCtxInfo *ContextInfo
+ switch {
+ case secret.LeaseID != "":
+ c.logger.Debug("processing lease response", "path", req.Request.URL.Path, "method", req.Request.Method)
+ entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token)
+ if err != nil {
+ return nil, err
+ }
+ // If the lease belongs to a token that is not managed by the agent,
+ // return the response without caching it.
+ if entry == nil {
+ c.logger.Debug("pass-through lease response; token not managed by agent", "path", req.Request.URL.Path, "method", req.Request.Method)
+ return resp, nil
+ }
+
+ // Derive a context for renewal using the token's context
+ newCtxInfo := new(ContextInfo)
+ newCtxInfo.Ctx, newCtxInfo.CancelFunc = context.WithCancel(entry.RenewCtxInfo.Ctx)
+ newCtxInfo.DoneCh = make(chan struct{})
+ renewCtxInfo = newCtxInfo
+
+ index.Lease = secret.LeaseID
+ index.LeaseToken = req.Token
+
+ case secret.Auth != nil:
+ c.logger.Debug("processing auth response", "path", req.Request.URL.Path, "method", req.Request.Method)
+ isNonOrphanNewToken := strings.HasPrefix(req.Request.URL.Path, vaultPathTokenCreate) && resp.Response.StatusCode == http.StatusOK && !secret.Auth.Orphan
+
+ // If the new token is a result of token creation endpoints (not from
+ // login endpoints), and if its a non-orphan, then the new token's
+ // context should be derived from the context of the parent token.
+ var parentCtx context.Context
+ if isNonOrphanNewToken {
+ entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token)
+ if err != nil {
+ return nil, err
+ }
+ // If parent token is not managed by the agent, child shouldn't be
+ // either.
+ if entry == nil {
+ c.logger.Debug("pass-through auth response; parent token not managed by agent", "path", req.Request.URL.Path, "method", req.Request.Method)
+ return resp, nil
+ }
+
+ c.logger.Debug("setting parent context", "path", req.Request.URL.Path, "method", req.Request.Method)
+ parentCtx = entry.RenewCtxInfo.Ctx
+
+ entry.TokenParent = req.Token
+ }
+
+ renewCtxInfo = c.createCtxInfo(parentCtx, secret.Auth.ClientToken)
+ index.Token = secret.Auth.ClientToken
+ index.TokenAccessor = secret.Auth.Accessor
+
+ default:
+ // We shouldn't be hitting this, but will err on the side of caution and
+ // simply proxy.
+ c.logger.Debug("pass-through response; secret without lease and token", "path", req.Request.URL.Path, "method", req.Request.Method)
+ return resp, nil
+ }
+
+ // Serialize the response to store it in the cached index
+ var respBytes bytes.Buffer
+ err = resp.Response.Write(&respBytes)
+ if err != nil {
+ c.logger.Error("failed to serialize response", "error", err)
+ return nil, err
+ }
+
+ // Reset the response body for upper layers to read
+ if resp.Response.Body != nil {
+ resp.Response.Body.Close()
+ }
+ resp.Response.Body = ioutil.NopCloser(bytes.NewBuffer(resp.ResponseBody))
+
+ // Set the index's Response
+ index.Response = respBytes.Bytes()
+
+ // Store the index ID in the renewer context
+ renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID)
+
+ // Store the renewer context in the index
+ index.RenewCtxInfo = &cachememdb.ContextInfo{
+ Ctx: renewCtx,
+ CancelFunc: renewCtxInfo.CancelFunc,
+ DoneCh: renewCtxInfo.DoneCh,
+ }
+
+ // Store the index in the cache
+ c.logger.Debug("storing response into the cache", "path", req.Request.URL.Path, "method", req.Request.Method)
+ err = c.db.Set(index)
+ if err != nil {
+ c.logger.Error("failed to cache the proxied response", "error", err)
+ return nil, err
+ }
+
+ // Start renewing the secret in the response
+ go c.startRenewing(renewCtx, index, req, secret)
+
+ return resp, nil
+}
+
+func (c *LeaseCache) createCtxInfo(ctx context.Context, token string) *ContextInfo {
+ if ctx == nil {
+ ctx = c.baseCtxInfo.Ctx
+ }
+ ctxInfo := new(ContextInfo)
+ ctxInfo.Ctx, ctxInfo.CancelFunc = context.WithCancel(ctx)
+ ctxInfo.DoneCh = make(chan struct{})
+ return ctxInfo
+}
+
+func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, req *SendRequest, secret *api.Secret) {
+ defer func() {
+ id := ctx.Value(contextIndexID).(string)
+ c.logger.Debug("evicting index from cache", "id", id, "path", req.Request.URL.Path, "method", req.Request.Method)
+ err := c.db.Evict(cachememdb.IndexNameID, id)
+ if err != nil {
+ c.logger.Error("failed to evict index", "id", id, "error", err)
+ return
+ }
+ }()
+
+ client, err := api.NewClient(api.DefaultConfig())
+ if err != nil {
+ c.logger.Error("failed to create API client in the renewer", "error", err)
+ return
+ }
+ client.SetToken(req.Token)
+ client.SetHeaders(req.Request.Header)
+
+ renewer, err := client.NewRenewer(&api.RenewerInput{
+ Secret: secret,
+ })
+ if err != nil {
+ c.logger.Error("failed to create secret renewer", "error", err)
+ return
+ }
+
+ c.logger.Debug("initiating renewal", "path", req.Request.URL.Path, "method", req.Request.Method)
+ go renewer.Renew()
+ defer renewer.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ // This is the case which captures context cancellations from token
+ // and leases. Since all the contexts are derived from the agent's
+ // context, this will also cover the shutdown scenario.
+ c.logger.Debug("context cancelled; stopping renewer", "path", req.Request.URL.Path)
+ return
+ case err := <-renewer.DoneCh():
+ // This case covers renewal completion and renewal errors
+ if err != nil {
+ c.logger.Error("failed to renew secret", "error", err)
+ return
+ }
+ c.logger.Debug("renewal halted; evicting from cache", "path", req.Request.URL.Path)
+ return
+ case renewal := <-renewer.RenewCh():
+ // This case captures secret renewals. Renewed secret is updated in
+ // the cached index.
+ c.logger.Debug("renewal received; updating cache", "path", req.Request.URL.Path)
+ err = c.updateResponse(ctx, renewal)
+ if err != nil {
+ c.logger.Error("failed to handle renewal", "error", err)
+ return
+ }
+ case <-index.RenewCtxInfo.DoneCh:
+ // This case indicates the renewal process to shutdown and evict
+ // the cache entry. This is triggered when a specific secret
+ // renewal needs to be killed without affecting any of the derived
+ // context renewals.
+ c.logger.Debug("done channel closed")
+ return
+ }
+ }
+}
+
+func (c *LeaseCache) updateResponse(ctx context.Context, renewal *api.RenewOutput) error {
+ id := ctx.Value(contextIndexID).(string)
+
+ // Get the cached index using the id in the context
+ index, err := c.db.Get(cachememdb.IndexNameID, id)
+ if err != nil {
+ return err
+ }
+ if index == nil {
+ return fmt.Errorf("missing cache entry for id: %q", id)
+ }
+
+ // Read the response from the index
+ resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(index.Response)), nil)
+ if err != nil {
+ c.logger.Error("failed to deserialize response", "error", err)
+ return err
+ }
+
+ // Update the body in the reponse by the renewed secret
+ bodyBytes, err := json.Marshal(renewal.Secret)
+ if err != nil {
+ return err
+ }
+ if resp.Body != nil {
+ resp.Body.Close()
+ }
+ resp.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
+ resp.ContentLength = int64(len(bodyBytes))
+
+ // Serialize the response
+ var respBytes bytes.Buffer
+ err = resp.Write(&respBytes)
+ if err != nil {
+ c.logger.Error("failed to serialize updated response", "error", err)
+ return err
+ }
+
+ // Update the response in the index and set it in the cache
+ index.Response = respBytes.Bytes()
+ err = c.db.Set(index)
+ if err != nil {
+ c.logger.Error("failed to cache the proxied response", "error", err)
+ return err
+ }
+
+ return nil
+}
+
+// computeIndexID results in a value that uniquely identifies a request
+// received by the agent. It does so by SHA256 hashing the serialized request
+// object containing the request path, query parameters and body parameters.
+func computeIndexID(req *SendRequest) (string, error) {
+ var b bytes.Buffer
+
+ // Serialze the request
+ if err := req.Request.Write(&b); err != nil {
+ return "", fmt.Errorf("failed to serialize request: %v", err)
+ }
+
+ // Reset the request body after it has been closed by Write
+ if req.Request.Body != nil {
+ req.Request.Body.Close()
+ }
+ req.Request.Body = ioutil.NopCloser(bytes.NewBuffer(req.RequestBody))
+
+ // Append req.Token into the byte slice. This is needed since auto-auth'ed
+ // requests sets the token directly into SendRequest.Token
+ b.Write([]byte(req.Token))
+
+ sum := sha256.Sum256(b.Bytes())
+ return hex.EncodeToString(sum[:]), nil
+}
+
+// HandleCacheClear returns a handlerFunc that can perform cache clearing operations.
+func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ req := new(cacheClearRequest)
+ if err := jsonutil.DecodeJSONFromReader(r.Body, req); err != nil {
+ if err == io.EOF {
+ err = errors.New("empty JSON provided")
+ }
+ respondError(w, http.StatusBadRequest, errwrap.Wrapf("failed to parse JSON input: {{err}}", err))
+ return
+ }
+
+ c.logger.Debug("received cache-clear request", "type", req.Type, "namespace", req.Namespace, "value", req.Value)
+
+ if err := c.handleCacheClear(ctx, req.Type, req.Namespace, req.Value); err != nil {
+ // Default to 500 on error, unless the user provided an invalid type,
+ // which would then be a 400.
+ httpStatus := http.StatusInternalServerError
+ if err == errInvalidType {
+ httpStatus = http.StatusBadRequest
+ }
+ respondError(w, httpStatus, errwrap.Wrapf("failed to clear cache: {{err}}", err))
+ return
+ }
+
+ return
+ })
+}
+
+func (c *LeaseCache) handleCacheClear(ctx context.Context, clearType string, clearValues ...interface{}) error {
+ if len(clearValues) == 0 {
+ return errors.New("no value(s) provided to clear corresponding cache entries")
+ }
+
+ // The value that we want to clear, for most cases, is the last one provided.
+ clearValue, ok := clearValues[len(clearValues)-1].(string)
+ if !ok {
+ return fmt.Errorf("unable to convert %v to type string", clearValue)
+ }
+
+ switch clearType {
+ case "request_path":
+ // For this particular case, we need to ensure that there are 2 provided
+ // indexers for the proper lookup.
+ if len(clearValues) != 2 {
+ return fmt.Errorf("clearing cache by request path requires 2 indexers, got %d", len(clearValues))
+ }
+
+ // The first value provided for this case will be the namespace, but if it's
+ // an empty value we need to overwrite it with "root/" to ensure proper
+ // cache lookup.
+ if clearValues[0].(string) == "" {
+ clearValues[0] = "root/"
+ }
+
+ // Find all the cached entries which has the given request path and
+ // cancel the contexts of all the respective renewers
+ indexes, err := c.db.GetByPrefix(clearType, clearValues...)
+ if err != nil {
+ return err
+ }
+ for _, index := range indexes {
+ index.RenewCtxInfo.CancelFunc()
+ }
+
+ case "token":
+ if clearValue == "" {
+ return nil
+ }
+
+ // Get the context for the given token and cancel its context
+ index, err := c.db.Get(cachememdb.IndexNameToken, clearValue)
+ if err != nil {
+ return err
+ }
+ if index == nil {
+ return nil
+ }
+
+ c.logger.Debug("cancelling context of index attached to token")
+
+ index.RenewCtxInfo.CancelFunc()
+
+ case "token_accessor", "lease":
+ // Get the cached index and cancel the corresponding renewer context
+ index, err := c.db.Get(clearType, clearValue)
+ if err != nil {
+ return err
+ }
+ if index == nil {
+ return nil
+ }
+
+ c.logger.Debug("cancelling context of index attached to accessor")
+
+ index.RenewCtxInfo.CancelFunc()
+
+ case "all":
+ // Cancel the base context which triggers all the goroutines to
+ // stop and evict entries from cache.
+ c.logger.Debug("cancelling base context")
+ c.baseCtxInfo.CancelFunc()
+
+ // Reset the base context
+ baseCtx, baseCancel := context.WithCancel(ctx)
+ c.baseCtxInfo = &ContextInfo{
+ Ctx: baseCtx,
+ CancelFunc: baseCancel,
+ }
+
+ // Reset the memdb instance
+ if err := c.db.Flush(); err != nil {
+ return err
+ }
+
+ default:
+ return errInvalidType
+ }
+
+ c.logger.Debug("successfully cleared matching cache entries")
+
+ return nil
+}
+
+// handleRevocationRequest checks whether the originating request is a
+// revocation request, and if so perform applicable cache cleanups.
+// Returns true is this is a revocation request.
+func (c *LeaseCache) handleRevocationRequest(ctx context.Context, req *SendRequest, resp *SendResponse) (bool, error) {
+ // Lease and token revocations return 204's on success. Fast-path if that's
+ // not the case.
+ if resp.Response.StatusCode != http.StatusNoContent {
+ return false, nil
+ }
+
+ _, path := deriveNamespaceAndRevocationPath(req)
+
+ switch {
+ case path == vaultPathTokenRevoke:
+ // Get the token from the request body
+ jsonBody := map[string]interface{}{}
+ if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {
+ return false, err
+ }
+ tokenRaw, ok := jsonBody["token"]
+ if !ok {
+ return false, fmt.Errorf("failed to get token from request body")
+ }
+ token, ok := tokenRaw.(string)
+ if !ok {
+ return false, fmt.Errorf("expected token in the request body to be string")
+ }
+
+ // Clear the cache entry associated with the token and all the other
+ // entries belonging to the leases derived from this token.
+ if err := c.handleCacheClear(ctx, "token", token); err != nil {
+ return false, err
+ }
+
+ case path == vaultPathTokenRevokeSelf:
+ // Clear the cache entry associated with the token and all the other
+ // entries belonging to the leases derived from this token.
+ if err := c.handleCacheClear(ctx, "token", req.Token); err != nil {
+ return false, err
+ }
+
+ case path == vaultPathTokenRevokeAccessor:
+ jsonBody := map[string]interface{}{}
+ if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {
+ return false, err
+ }
+ accessorRaw, ok := jsonBody["accessor"]
+ if !ok {
+ return false, fmt.Errorf("failed to get accessor from request body")
+ }
+ accessor, ok := accessorRaw.(string)
+ if !ok {
+ return false, fmt.Errorf("expected accessor in the request body to be string")
+ }
+
+ if err := c.handleCacheClear(ctx, "token_accessor", accessor); err != nil {
+ return false, err
+ }
+
+ case path == vaultPathTokenRevokeOrphan:
+ jsonBody := map[string]interface{}{}
+ if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {
+ return false, err
+ }
+ tokenRaw, ok := jsonBody["token"]
+ if !ok {
+ return false, fmt.Errorf("failed to get token from request body")
+ }
+ token, ok := tokenRaw.(string)
+ if !ok {
+ return false, fmt.Errorf("expected token in the request body to be string")
+ }
+
+ // Kill the renewers of all the leases attached to the revoked token
+ indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLeaseToken, token)
+ if err != nil {
+ return false, err
+ }
+ for _, index := range indexes {
+ index.RenewCtxInfo.CancelFunc()
+ }
+
+ // Kill the renewer of the revoked token
+ index, err := c.db.Get(cachememdb.IndexNameToken, token)
+ if err != nil {
+ return false, err
+ }
+ if index == nil {
+ return true, nil
+ }
+
+ // Indicate the renewer goroutine for this index to return. This will
+ // not affect the child tokens because the context is not getting
+ // cancelled.
+ close(index.RenewCtxInfo.DoneCh)
+
+ // Clear the parent references of the revoked token in the entries
+ // belonging to the child tokens of the revoked token.
+ indexes, err = c.db.GetByPrefix(cachememdb.IndexNameTokenParent, token)
+ if err != nil {
+ return false, err
+ }
+ for _, index := range indexes {
+ index.TokenParent = ""
+ err = c.db.Set(index)
+ if err != nil {
+ c.logger.Error("failed to persist index", "error", err)
+ return false, err
+ }
+ }
+
+ case path == vaultPathLeaseRevoke:
+ // TODO: Should lease present in the URL itself be considered here?
+ // Get the lease from the request body
+ jsonBody := map[string]interface{}{}
+ if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {
+ return false, err
+ }
+ leaseIDRaw, ok := jsonBody["lease_id"]
+ if !ok {
+ return false, fmt.Errorf("failed to get lease_id from request body")
+ }
+ leaseID, ok := leaseIDRaw.(string)
+ if !ok {
+ return false, fmt.Errorf("expected lease_id the request body to be string")
+ }
+ if err := c.handleCacheClear(ctx, "lease", leaseID); err != nil {
+ return false, err
+ }
+
+ case strings.HasPrefix(path, vaultPathLeaseRevokeForce):
+ // Trim the URL path to get the request path prefix
+ prefix := strings.TrimPrefix(path, vaultPathLeaseRevokeForce)
+ // Get all the cache indexes that use the request path containing the
+ // prefix and cancel the renewer context of each.
+ indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix)
+ if err != nil {
+ return false, err
+ }
+
+ _, tokenNSID := namespace.SplitIDFromString(req.Token)
+ for _, index := range indexes {
+ _, leaseNSID := namespace.SplitIDFromString(index.Lease)
+ // Only evict leases that match the token's namespace
+ if tokenNSID == leaseNSID {
+ index.RenewCtxInfo.CancelFunc()
+ }
+ }
+
+ case strings.HasPrefix(path, vaultPathLeaseRevokePrefix):
+ // Trim the URL path to get the request path prefix
+ prefix := strings.TrimPrefix(path, vaultPathLeaseRevokePrefix)
+ // Get all the cache indexes that use the request path containing the
+ // prefix and cancel the renewer context of each.
+ indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix)
+ if err != nil {
+ return false, err
+ }
+
+ _, tokenNSID := namespace.SplitIDFromString(req.Token)
+ for _, index := range indexes {
+ _, leaseNSID := namespace.SplitIDFromString(index.Lease)
+ // Only evict leases that match the token's namespace
+ if tokenNSID == leaseNSID {
+ index.RenewCtxInfo.CancelFunc()
+ }
+ }
+
+ default:
+ return false, nil
+ }
+
+ c.logger.Debug("triggered caching eviction from revocation request")
+
+ return true, nil
+}
+
+// deriveNamespaceAndRevocationPath returns the namespace and relative path for
+// revocation paths.
+//
+// If the path contains a namespace, but it's not a revocation path, it will be
+// returned as-is, since there's no way to tell where the namespace ends and
+// where the request path begins purely based off a string.
+//
+// Case 1: /v1/ns1/leases/revoke -> ns1/, /v1/leases/revoke
+// Case 2: ns1/ /v1/leases/revoke -> ns1/, /v1/leases/revoke
+// Case 3: /v1/ns1/foo/bar -> root/, /v1/ns1/foo/bar
+// Case 4: ns1/ /v1/foo/bar -> ns1/, /v1/foo/bar
+func deriveNamespaceAndRevocationPath(req *SendRequest) (string, string) {
+ namespace := "root/"
+ nsHeader := req.Request.Header.Get(consts.NamespaceHeaderName)
+ if nsHeader != "" {
+ namespace = nsHeader
+ }
+
+ fullPath := req.Request.URL.Path
+ nonVersionedPath := strings.TrimPrefix(fullPath, "/v1")
+
+ for _, pathToCheck := range revocationPaths {
+ // We use strings.Contains here for paths that can contain
+ // vars in the path, e.g. /v1/lease/revoke-prefix/:prefix
+ i := strings.Index(nonVersionedPath, pathToCheck)
+ // If there's no match, move on to the next check
+ if i == -1 {
+ continue
+ }
+
+ // If the index is 0, this is a relative path with no namespace preppended,
+ // so we can break early
+ if i == 0 {
+ break
+ }
+
+ // We need to turn /ns1 into ns1/, this makes it easy
+ namespaceInPath := nshelper.Canonicalize(nonVersionedPath[:i])
+
+ // If it's root, we replace, otherwise we join
+ if namespace == "root/" {
+ namespace = namespaceInPath
+ } else {
+ namespace = namespace + namespaceInPath
+ }
+
+ return namespace, fmt.Sprintf("/v1%s", nonVersionedPath[i:])
+ }
+
+ return namespace, fmt.Sprintf("/v1%s", nonVersionedPath)
+}
diff --git a/command/agent/cache/lease_cache_test.go b/command/agent/cache/lease_cache_test.go
new file mode 100644
index 000000000000..a455944da738
--- /dev/null
+++ b/command/agent/cache/lease_cache_test.go
@@ -0,0 +1,507 @@
+package cache
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/go-test/deep"
+ hclog "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/logging"
+)
+
+func testNewLeaseCache(t *testing.T, responses []*SendResponse) *LeaseCache {
+ t.Helper()
+
+ lc, err := NewLeaseCache(&LeaseCacheConfig{
+ BaseContext: context.Background(),
+ Proxier: newMockProxier(responses),
+ Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"),
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ return lc
+}
+
+func TestCache_ComputeIndexID(t *testing.T) {
+ type args struct {
+ req *http.Request
+ }
+ tests := []struct {
+ name string
+ req *SendRequest
+ want string
+ wantErr bool
+ }{
+ {
+ "basic",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "test",
+ },
+ },
+ },
+ "2edc7e965c3e1bdce3b1d5f79a52927842569c0734a86544d222753f11ae4847",
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := computeIndexID(tt.req)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("actual_error: %v, expected_error: %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, string(tt.want)) {
+ t.Errorf("bad: index id; actual: %q, expected: %q", got, string(tt.want))
+ }
+ })
+ }
+}
+
+func TestCache_LeaseCache_EmptyToken(t *testing.T) {
+ responses := []*SendResponse{
+ &SendResponse{
+ Response: &api.Response{
+ Response: &http.Response{
+ StatusCode: http.StatusCreated,
+ Body: ioutil.NopCloser(strings.NewReader(`{"value": "invalid", "auth": {"client_token": "testtoken"}}`)),
+ },
+ },
+ ResponseBody: []byte(`{"value": "invalid", "auth": {"client_token": "testtoken"}}`),
+ },
+ }
+ lc := testNewLeaseCache(t, responses)
+
+ // Even if the send request doesn't have a token on it, a successful
+ // cacheable response should result in the index properly getting populated
+ // with a token and memdb shouldn't complain while inserting the index.
+ urlPath := "http://example.com/v1/sample/api"
+ sendReq := &SendRequest{
+ Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)),
+ }
+ resp, err := lc.Send(context.Background(), sendReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatalf("expected a non empty response")
+ }
+}
+
+func TestCache_LeaseCache_SendCacheable(t *testing.T) {
+ // Emulate 2 responses from the api proxy. One returns a new token and the
+ // other returns a lease.
+ responses := []*SendResponse{
+ &SendResponse{
+ Response: &api.Response{
+ Response: &http.Response{
+ StatusCode: http.StatusCreated,
+ Body: ioutil.NopCloser(strings.NewReader(`{"value": "invalid", "auth": {"client_token": "testtoken", "renewable": true}}`)),
+ },
+ },
+ ResponseBody: []byte(`{"value": "invalid", "auth": {"client_token": "testtoken", "renewable": true}}`),
+ },
+ &SendResponse{
+ Response: &api.Response{
+ Response: &http.Response{
+ StatusCode: http.StatusOK,
+ Body: ioutil.NopCloser(strings.NewReader(`{"value": "output", "lease_id": "foo", "renewable": true}`)),
+ },
+ },
+ ResponseBody: []byte(`{"value": "output", "lease_id": "foo", "renewable": true}`),
+ },
+ }
+ lc := testNewLeaseCache(t, responses)
+
+ // Make a request. A response with a new token is returned to the lease
+ // cache and that will be cached.
+ urlPath := "http://example.com/v1/sample/api"
+ sendReq := &SendRequest{
+ Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)),
+ }
+ resp, err := lc.Send(context.Background(), sendReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil {
+ t.Fatalf("expected getting proxied response: got %v", diff)
+ }
+
+ // Send the same request again to get the cached response
+ sendReq = &SendRequest{
+ Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)),
+ }
+ resp, err = lc.Send(context.Background(), sendReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil {
+ t.Fatalf("expected getting proxied response: got %v", diff)
+ }
+
+ // Modify the request a little bit to ensure the second response is
+ // returned to the lease cache. But make sure that the token in the request
+ // is valid.
+ sendReq = &SendRequest{
+ Token: "testtoken",
+ Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)),
+ }
+ resp, err = lc.Send(context.Background(), sendReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil {
+ t.Fatalf("expected getting proxied response: got %v", diff)
+ }
+
+ // Make the same request again and ensure that the same reponse is returned
+ // again.
+ sendReq = &SendRequest{
+ Token: "testtoken",
+ Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)),
+ }
+ resp, err = lc.Send(context.Background(), sendReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil {
+ t.Fatalf("expected getting proxied response: got %v", diff)
+ }
+}
+
+func TestCache_LeaseCache_SendNonCacheable(t *testing.T) {
+ responses := []*SendResponse{
+ &SendResponse{
+ Response: &api.Response{
+ Response: &http.Response{
+ StatusCode: http.StatusOK,
+ Body: ioutil.NopCloser(strings.NewReader(`{"value": "output"}`)),
+ },
+ },
+ },
+ &SendResponse{
+ Response: &api.Response{
+ Response: &http.Response{
+ StatusCode: http.StatusNotFound,
+ Body: ioutil.NopCloser(strings.NewReader(`{"value": "invalid"}`)),
+ },
+ },
+ },
+ }
+ lc := testNewLeaseCache(t, responses)
+
+ // Send a request through the lease cache which is not cacheable (there is
+ // no lease information or auth information in the response)
+ sendReq := &SendRequest{
+ Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)),
+ }
+ resp, err := lc.Send(context.Background(), sendReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil {
+ t.Fatalf("expected getting proxied response: got %v", diff)
+ }
+
+ // Since the response is non-cacheable, the second response will be
+ // returned.
+ sendReq = &SendRequest{
+ Token: "foo",
+ Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)),
+ }
+ resp, err = lc.Send(context.Background(), sendReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil {
+ t.Fatalf("expected getting proxied response: got %v", diff)
+ }
+}
+
+func TestCache_LeaseCache_SendNonCacheableNonTokenLease(t *testing.T) {
+ // Create the cache
+ responses := []*SendResponse{
+ &SendResponse{
+ Response: &api.Response{
+ Response: &http.Response{
+ StatusCode: http.StatusOK,
+ Body: ioutil.NopCloser(strings.NewReader(`{"value": "output", "lease_id": "foo"}`)),
+ },
+ },
+ ResponseBody: []byte(`{"value": "output", "lease_id": "foo"}`),
+ },
+ &SendResponse{
+ Response: &api.Response{
+ Response: &http.Response{
+ StatusCode: http.StatusCreated,
+ Body: ioutil.NopCloser(strings.NewReader(`{"value": "invalid", "auth": {"client_token": "testtoken"}}`)),
+ },
+ },
+ ResponseBody: []byte(`{"value": "invalid", "auth": {"client_token": "testtoken"}}`),
+ },
+ }
+ lc := testNewLeaseCache(t, responses)
+
+ // Send a request through lease cache which returns a response containing
+ // lease_id. Response will not be cached because it doesn't belong to a
+ // token that is managed by the lease cache.
+ urlPath := "http://example.com/v1/sample/api"
+ sendReq := &SendRequest{
+ Token: "foo",
+ Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)),
+ }
+ resp, err := lc.Send(context.Background(), sendReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil {
+ t.Fatalf("expected getting proxied response: got %v", diff)
+ }
+
+ // Verify that the response is not cached by sending the same request and
+ // by expecting a different response.
+ sendReq = &SendRequest{
+ Token: "foo",
+ Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)),
+ }
+ resp, err = lc.Send(context.Background(), sendReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff == nil {
+ t.Fatalf("expected getting proxied response: got %v", diff)
+ }
+}
+
+func TestCache_LeaseCache_HandleCacheClear(t *testing.T) {
+ lc := testNewLeaseCache(t, nil)
+
+ handler := lc.HandleCacheClear(context.Background())
+ ts := httptest.NewServer(handler)
+ defer ts.Close()
+
+ // Test missing body, should return 400
+ resp, err := http.Post(ts.URL, "application/json", nil)
+ if err != nil {
+ t.Fatal()
+ }
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("status code mismatch: expected = %v, got = %v", http.StatusBadRequest, resp.StatusCode)
+ }
+
+ testCases := []struct {
+ name string
+ reqType string
+ reqValue string
+ expectedStatusCode int
+ }{
+ {
+ "invalid_type",
+ "foo",
+ "",
+ http.StatusBadRequest,
+ },
+ {
+ "invalid_value",
+ "",
+ "bar",
+ http.StatusBadRequest,
+ },
+ {
+ "all",
+ "all",
+ "",
+ http.StatusOK,
+ },
+ {
+ "by_request_path",
+ "request_path",
+ "foo",
+ http.StatusOK,
+ },
+ {
+ "by_token",
+ "token",
+ "foo",
+ http.StatusOK,
+ },
+ {
+ "by_lease",
+ "lease",
+ "foo",
+ http.StatusOK,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ reqBody := fmt.Sprintf("{\"type\": \"%s\", \"value\": \"%s\"}", tc.reqType, tc.reqValue)
+ resp, err := http.Post(ts.URL, "application/json", strings.NewReader(reqBody))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tc.expectedStatusCode != resp.StatusCode {
+ t.Fatalf("status code mismatch: expected = %v, got = %v", tc.expectedStatusCode, resp.StatusCode)
+ }
+ })
+ }
+}
+
+func TestCache_DeriveNamespaceAndRevocationPath(t *testing.T) {
+ tests := []struct {
+ name string
+ req *SendRequest
+ wantNamespace string
+ wantRelativePath string
+ }{
+ {
+ "non_revocation_full_path",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "/v1/ns1/sys/mounts",
+ },
+ },
+ },
+ "root/",
+ "/v1/ns1/sys/mounts",
+ },
+ {
+ "non_revocation_relative_path",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "/v1/sys/mounts",
+ },
+ Header: http.Header{
+ consts.NamespaceHeaderName: []string{"ns1/"},
+ },
+ },
+ },
+ "ns1/",
+ "/v1/sys/mounts",
+ },
+ {
+ "non_revocation_relative_path",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "/v1/ns2/sys/mounts",
+ },
+ Header: http.Header{
+ consts.NamespaceHeaderName: []string{"ns1/"},
+ },
+ },
+ },
+ "ns1/",
+ "/v1/ns2/sys/mounts",
+ },
+ {
+ "revocation_full_path",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "/v1/ns1/sys/leases/revoke",
+ },
+ },
+ },
+ "ns1/",
+ "/v1/sys/leases/revoke",
+ },
+ {
+ "revocation_relative_path",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "/v1/sys/leases/revoke",
+ },
+ Header: http.Header{
+ consts.NamespaceHeaderName: []string{"ns1/"},
+ },
+ },
+ },
+ "ns1/",
+ "/v1/sys/leases/revoke",
+ },
+ {
+ "revocation_relative_partial_ns",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "/v1/ns2/sys/leases/revoke",
+ },
+ Header: http.Header{
+ consts.NamespaceHeaderName: []string{"ns1/"},
+ },
+ },
+ },
+ "ns1/ns2/",
+ "/v1/sys/leases/revoke",
+ },
+ {
+ "revocation_prefix_full_path",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "/v1/ns1/sys/leases/revoke-prefix/foo",
+ },
+ },
+ },
+ "ns1/",
+ "/v1/sys/leases/revoke-prefix/foo",
+ },
+ {
+ "revocation_prefix_relative_path",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "/v1/sys/leases/revoke-prefix/foo",
+ },
+ Header: http.Header{
+ consts.NamespaceHeaderName: []string{"ns1/"},
+ },
+ },
+ },
+ "ns1/",
+ "/v1/sys/leases/revoke-prefix/foo",
+ },
+ {
+ "revocation_prefix_partial_ns",
+ &SendRequest{
+ Request: &http.Request{
+ URL: &url.URL{
+ Path: "/v1/ns2/sys/leases/revoke-prefix/foo",
+ },
+ Header: http.Header{
+ consts.NamespaceHeaderName: []string{"ns1/"},
+ },
+ },
+ },
+ "ns1/ns2/",
+ "/v1/sys/leases/revoke-prefix/foo",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotNamespace, gotRelativePath := deriveNamespaceAndRevocationPath(tt.req)
+ if gotNamespace != tt.wantNamespace {
+ t.Errorf("deriveNamespaceAndRevocationPath() gotNamespace = %v, want %v", gotNamespace, tt.wantNamespace)
+ }
+ if gotRelativePath != tt.wantRelativePath {
+ t.Errorf("deriveNamespaceAndRevocationPath() gotRelativePath = %v, want %v", gotRelativePath, tt.wantRelativePath)
+ }
+ })
+ }
+}
diff --git a/command/agent/cache/listener.go b/command/agent/cache/listener.go
new file mode 100644
index 000000000000..1adca7a8dc4b
--- /dev/null
+++ b/command/agent/cache/listener.go
@@ -0,0 +1,105 @@
+package cache
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/vault/command/agent/config"
+ "github.com/hashicorp/vault/command/server"
+ "github.com/hashicorp/vault/helper/reload"
+ "github.com/mitchellh/cli"
+)
+
+func ServerListener(lnConfig *config.Listener, logger io.Writer, ui cli.Ui) (net.Listener, map[string]string, reload.ReloadFunc, error) {
+ switch lnConfig.Type {
+ case "unix":
+ return unixSocketListener(lnConfig.Config, logger, ui)
+ case "tcp":
+ return tcpListener(lnConfig.Config, logger, ui)
+ default:
+ return nil, nil, nil, fmt.Errorf("unsupported listener type: %q", lnConfig.Type)
+ }
+}
+
+func unixSocketListener(config map[string]interface{}, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reload.ReloadFunc, error) {
+ addr, ok := config["address"].(string)
+ if !ok {
+ return nil, nil, nil, fmt.Errorf("invalid address: %v", config["address"])
+ }
+
+ if addr == "" {
+ return nil, nil, nil, fmt.Errorf("address field should point to socket file path")
+ }
+
+ // Remove the socket file as it shouldn't exist for the domain socket to
+ // work
+ err := os.Remove(addr)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, nil, nil, fmt.Errorf("failed to remove the socket file: %v", err)
+ }
+
+ listener, err := net.Listen("unix", addr)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // Wrap the listener in rmListener so that the Unix domain socket file is
+ // removed on close.
+ listener = &rmListener{
+ Listener: listener,
+ Path: addr,
+ }
+
+ props := map[string]string{"addr": addr, "tls": "disabled"}
+
+ return listener, props, nil, nil
+}
+
+func tcpListener(config map[string]interface{}, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reload.ReloadFunc, error) {
+ bindProto := "tcp"
+ var addr string
+ addrRaw, ok := config["address"]
+ if !ok {
+ addr = "127.0.0.1:8300"
+ } else {
+ addr = addrRaw.(string)
+ }
+
+ // If they've passed 0.0.0.0, we only want to bind on IPv4
+ // rather than golang's dual stack default
+ if strings.HasPrefix(addr, "0.0.0.0:") {
+ bindProto = "tcp4"
+ }
+
+ ln, err := net.Listen(bindProto, addr)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ ln = server.TCPKeepAliveListener{ln.(*net.TCPListener)}
+
+ props := map[string]string{"addr": addr}
+
+ return server.ListenerWrapTLS(ln, props, config, ui)
+}
+
+// rmListener is an implementation of net.Listener that forwards most
+// calls to the listener but also removes a file as part of the close. We
+// use this to cleanup the unix domain socket on close.
+type rmListener struct {
+ net.Listener
+ Path string
+}
+
+func (l *rmListener) Close() error {
+ // Close the listener itself
+ if err := l.Listener.Close(); err != nil {
+ return err
+ }
+
+ // Remove the file
+ return os.Remove(l.Path)
+}
diff --git a/command/agent/cache/proxy.go b/command/agent/cache/proxy.go
new file mode 100644
index 000000000000..4637590917e9
--- /dev/null
+++ b/command/agent/cache/proxy.go
@@ -0,0 +1,28 @@
+package cache
+
+import (
+ "context"
+ "net/http"
+
+ "github.com/hashicorp/vault/api"
+)
+
+// SendRequest is the input for Proxier.Send.
+type SendRequest struct {
+ Token string
+ Request *http.Request
+ RequestBody []byte
+}
+
+// SendResponse is the output from Proxier.Send.
+type SendResponse struct {
+ Response *api.Response
+ ResponseBody []byte
+}
+
+// Proxier is the interface implemented by different components that are
+// responsible for performing specific tasks, such as caching and proxying. All
+// these tasks combined together would serve the request received by the agent.
+type Proxier interface {
+ Send(ctx context.Context, req *SendRequest) (*SendResponse, error)
+}
diff --git a/command/agent/cache/testing.go b/command/agent/cache/testing.go
new file mode 100644
index 000000000000..d9de1caadc7d
--- /dev/null
+++ b/command/agent/cache/testing.go
@@ -0,0 +1,36 @@
+package cache
+
+import (
+ "context"
+ "fmt"
+)
+
+// mockProxier is a mock implementation of the Proxier interface, used for testing purposes.
+// The mock will return the provided responses every time it reaches its Send method, up to
+// the last provided response. This lets tests control what the next/underlying Proxier layer
+// might expect to return.
+type mockProxier struct {
+ proxiedResponses []*SendResponse
+ responseIndex int
+}
+
+func newMockProxier(responses []*SendResponse) *mockProxier {
+ return &mockProxier{
+ proxiedResponses: responses,
+ }
+}
+
+func (p *mockProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) {
+ if p.responseIndex >= len(p.proxiedResponses) {
+ return nil, fmt.Errorf("index out of bounds: responseIndex = %d, responses = %d", p.responseIndex, len(p.proxiedResponses))
+ }
+ resp := p.proxiedResponses[p.responseIndex]
+
+ p.responseIndex++
+
+ return resp, nil
+}
+
+func (p *mockProxier) ResponseIndex() int {
+ return p.responseIndex
+}
diff --git a/command/agent/cache_end_to_end_test.go b/command/agent/cache_end_to_end_test.go
new file mode 100644
index 000000000000..88f1c36409f9
--- /dev/null
+++ b/command/agent/cache_end_to_end_test.go
@@ -0,0 +1,280 @@
+package agent
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "os"
+ "testing"
+ "time"
+
+ hclog "github.com/hashicorp/go-hclog"
+ log "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/vault/api"
+ credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
+ "github.com/hashicorp/vault/command/agent/auth"
+ agentapprole "github.com/hashicorp/vault/command/agent/auth/approle"
+ "github.com/hashicorp/vault/command/agent/cache"
+ "github.com/hashicorp/vault/command/agent/sink"
+ "github.com/hashicorp/vault/command/agent/sink/file"
+ "github.com/hashicorp/vault/helper/logging"
+ vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestCache_UsingAutoAuthToken(t *testing.T) {
+ var err error
+ logger := logging.NewVaultLogger(log.Trace)
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: log.NewNullLogger(),
+ CredentialBackends: map[string]logical.Factory{
+ "approle": credAppRole.Factory,
+ },
+ }
+
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ cores := cluster.Cores
+
+ vault.TestWaitActive(t, cores[0].Core)
+
+ client := cores[0].Client
+
+ defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress))
+ os.Setenv(api.EnvVaultAddress, client.Address())
+
+ defer os.Setenv(api.EnvVaultCACert, os.Getenv(api.EnvVaultCACert))
+ os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir))
+
+ err = client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{
+ Type: "approle",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Logical().Write("auth/approle/role/test1", map[string]interface{}{
+ "bind_secret_id": "true",
+ "token_ttl": "3s",
+ "token_max_ttl": "10s",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ secretID1 := resp.Data["secret_id"].(string)
+
+ resp, err = client.Logical().Read("auth/approle/role/test1/role-id")
+ if err != nil {
+ t.Fatal(err)
+ }
+ roleID1 := resp.Data["role_id"].(string)
+
+ rolef, err := ioutil.TempFile("", "auth.role-id.test.")
+ if err != nil {
+ t.Fatal(err)
+ }
+ role := rolef.Name()
+ rolef.Close() // WriteFile doesn't need it open
+ defer os.Remove(role)
+ t.Logf("input role_id_file_path: %s", role)
+
+ secretf, err := ioutil.TempFile("", "auth.secret-id.test.")
+ if err != nil {
+ t.Fatal(err)
+ }
+ secret := secretf.Name()
+ secretf.Close()
+ defer os.Remove(secret)
+ t.Logf("input secret_id_file_path: %s", secret)
+
+ // We close these right away because we're just basically testing
+ // permissions and finding a usable file name
+ ouf, err := ioutil.TempFile("", "auth.tokensink.test.")
+ if err != nil {
+ t.Fatal(err)
+ }
+ out := ouf.Name()
+ ouf.Close()
+ os.Remove(out)
+ t.Logf("output: %s", out)
+
+ ctx, cancelFunc := context.WithCancel(context.Background())
+ timer := time.AfterFunc(30*time.Second, func() {
+ cancelFunc()
+ })
+ defer timer.Stop()
+
+ conf := map[string]interface{}{
+ "role_id_file_path": role,
+ "secret_id_file_path": secret,
+ "remove_secret_id_file_after_reading": true,
+ }
+
+ am, err := agentapprole.NewApproleAuthMethod(&auth.AuthConfig{
+ Logger: logger.Named("auth.approle"),
+ MountPath: "auth/approle",
+ Config: conf,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ ahConfig := &auth.AuthHandlerConfig{
+ Logger: logger.Named("auth.handler"),
+ Client: client,
+ }
+ ah := auth.NewAuthHandler(ahConfig)
+ go ah.Run(ctx, am)
+ defer func() {
+ <-ah.DoneCh
+ }()
+
+ config := &sink.SinkConfig{
+ Logger: logger.Named("sink.file"),
+ Config: map[string]interface{}{
+ "path": out,
+ },
+ }
+ fs, err := file.NewFileSink(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ config.Sink = fs
+
+ ss := sink.NewSinkServer(&sink.SinkServerConfig{
+ Logger: logger.Named("sink.server"),
+ Client: client,
+ })
+ go ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config})
+ defer func() {
+ <-ss.DoneCh
+ }()
+
+ // This has to be after the other defers so it happens first
+ defer cancelFunc()
+
+ // Check that no sink file exists
+ _, err = os.Lstat(out)
+ if err == nil {
+ t.Fatal("expected err")
+ }
+ if !os.IsNotExist(err) {
+ t.Fatal("expected notexist err")
+ }
+
+ if err := ioutil.WriteFile(role, []byte(roleID1), 0600); err != nil {
+ t.Fatal(err)
+ } else {
+ logger.Trace("wrote test role 1", "path", role)
+ }
+
+ if err := ioutil.WriteFile(secret, []byte(secretID1), 0600); err != nil {
+ t.Fatal(err)
+ } else {
+ logger.Trace("wrote test secret 1", "path", secret)
+ }
+
+ getToken := func() string {
+ timeout := time.Now().Add(10 * time.Second)
+ for {
+ if time.Now().After(timeout) {
+ t.Fatal("did not find a written token after timeout")
+ }
+ val, err := ioutil.ReadFile(out)
+ if err == nil {
+ os.Remove(out)
+ if len(val) == 0 {
+ t.Fatal("written token was empty")
+ }
+
+ _, err = os.Stat(secret)
+ if err == nil {
+ t.Fatal("secret file exists but was supposed to be removed")
+ }
+
+ client.SetToken(string(val))
+ _, err := client.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return string(val)
+ }
+ time.Sleep(250 * time.Millisecond)
+ }
+ }
+
+ t.Logf("auto-auth token: %q", getToken())
+
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer listener.Close()
+
+ cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache")
+
+ // Create the API proxier
+ apiProxy := cache.NewAPIProxy(&cache.APIProxyConfig{
+ Logger: cacheLogger.Named("apiproxy"),
+ })
+
+ // Create the lease cache proxier and set its underlying proxier to
+ // the API proxier.
+ leaseCache, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{
+ BaseContext: ctx,
+ Proxier: apiProxy,
+ Logger: cacheLogger.Named("leasecache"),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a muxer and add paths relevant for the lease cache layer
+ mux := http.NewServeMux()
+ mux.Handle("/v1/agent/cache-clear", leaseCache.HandleCacheClear(ctx))
+
+ mux.Handle("/", cache.Handler(ctx, cacheLogger, leaseCache, true, client))
+ server := &http.Server{
+ Handler: mux,
+ ReadHeaderTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ IdleTimeout: 5 * time.Minute,
+ ErrorLog: cacheLogger.StandardLogger(nil),
+ }
+ go server.Serve(listener)
+
+ testClient, err := api.NewClient(api.DefaultConfig())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Wait for listeners to come up
+ time.Sleep(2 * time.Second)
+
+ resp, err = testClient.Logical().Read("auth/token/lookup-self")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatalf("failed to use the auto-auth token to perform lookup-self")
+ }
+}
diff --git a/command/agent/config/config.go b/command/agent/config/config.go
index 3a18b946efac..9c9a80aaf9b7 100644
--- a/command/agent/config/config.go
+++ b/command/agent/config/config.go
@@ -22,6 +22,17 @@ type Config struct {
AutoAuth *AutoAuth `hcl:"auto_auth"`
ExitAfterAuth bool `hcl:"exit_after_auth"`
PidFile string `hcl:"pid_file"`
+ Cache *Cache `hcl:"cache"`
+}
+
+type Cache struct {
+ UseAutoAuthToken bool `hcl:"use_auto_auth_token"`
+ Listeners []*Listener `hcl:"listeners"`
+}
+
+type Listener struct {
+ Type string
+ Config map[string]interface{}
}
type AutoAuth struct {
@@ -91,9 +102,102 @@ func LoadConfig(path string, logger log.Logger) (*Config, error) {
return nil, errwrap.Wrapf("error parsing 'auto_auth': {{err}}", err)
}
+ err = parseCache(&result, list)
+ if err != nil {
+ return nil, errwrap.Wrapf("error parsing 'cache':{{err}}", err)
+ }
+
return &result, nil
}
+func parseCache(result *Config, list *ast.ObjectList) error {
+ name := "cache"
+
+ cacheList := list.Filter(name)
+ if len(cacheList.Items) == 0 {
+ return nil
+ }
+
+ if len(cacheList.Items) > 1 {
+ return fmt.Errorf("one and only one %q block is required", name)
+ }
+
+ item := cacheList.Items[0]
+
+ var c Cache
+ err := hcl.DecodeObject(&c, item.Val)
+ if err != nil {
+ return err
+ }
+
+ result.Cache = &c
+
+ subs, ok := item.Val.(*ast.ObjectType)
+ if !ok {
+ return fmt.Errorf("could not parse %q as an object", name)
+ }
+ subList := subs.List
+
+ err = parseListeners(result, subList)
+ if err != nil {
+ return errwrap.Wrapf("error parsing 'listener' stanzas: {{err}}", err)
+ }
+
+ return nil
+}
+
+func parseListeners(result *Config, list *ast.ObjectList) error {
+ name := "listener"
+
+ listenerList := list.Filter(name)
+ if len(listenerList.Items) < 1 {
+ return fmt.Errorf("at least one %q block is required", name)
+ }
+
+ var listeners []*Listener
+ for _, item := range listenerList.Items {
+ var lnConfig map[string]interface{}
+ err := hcl.DecodeObject(&lnConfig, item.Val)
+ if err != nil {
+ return err
+ }
+
+ var lnType string
+ switch {
+ case lnConfig["type"] != nil:
+ lnType = lnConfig["type"].(string)
+ delete(lnConfig, "type")
+ case len(item.Keys) == 1:
+ lnType = strings.ToLower(item.Keys[0].Token.Value().(string))
+ default:
+ return errors.New("listener type must be specified")
+ }
+
+ switch lnType {
+ case "unix":
+ // Don't accept TLS connection information for unix domain socket
+ // listener. Maybe something to support in future.
+ unixLnConfig := map[string]interface{}{
+ "tls_disable": true,
+ }
+ unixLnConfig["address"] = lnConfig["address"]
+ lnConfig = unixLnConfig
+ case "tcp":
+ default:
+ return fmt.Errorf("invalid listener type %q", lnType)
+ }
+
+ listeners = append(listeners, &Listener{
+ Type: lnType,
+ Config: lnConfig,
+ })
+ }
+
+ result.Cache.Listeners = listeners
+
+ return nil
+}
+
func parseAutoAuth(result *Config, list *ast.ObjectList) error {
name := "auto_auth"
diff --git a/command/agent/config/config_test.go b/command/agent/config/config_test.go
index 2f78b4fb04fa..49621b50c153 100644
--- a/command/agent/config/config_test.go
+++ b/command/agent/config/config_test.go
@@ -10,6 +10,80 @@ import (
"github.com/hashicorp/vault/helper/logging"
)
+func TestLoadConfigFile_AgentCache(t *testing.T) {
+ logger := logging.NewVaultLogger(log.Debug)
+
+ config, err := LoadConfig("./test-fixtures/config-cache.hcl", logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := &Config{
+ AutoAuth: &AutoAuth{
+ Method: &Method{
+ Type: "aws",
+ WrapTTL: 300 * time.Second,
+ MountPath: "auth/aws",
+ Config: map[string]interface{}{
+ "role": "foobar",
+ },
+ },
+ Sinks: []*Sink{
+ &Sink{
+ Type: "file",
+ DHType: "curve25519",
+ DHPath: "/tmp/file-foo-dhpath",
+ AAD: "foobar",
+ Config: map[string]interface{}{
+ "path": "/tmp/file-foo",
+ },
+ },
+ },
+ },
+ Cache: &Cache{
+ UseAutoAuthToken: true,
+ Listeners: []*Listener{
+ &Listener{
+ Type: "unix",
+ Config: map[string]interface{}{
+ "address": "/path/to/socket",
+ "tls_disable": true,
+ },
+ },
+ &Listener{
+ Type: "tcp",
+ Config: map[string]interface{}{
+ "address": "127.0.0.1:8300",
+ "tls_disable": true,
+ },
+ },
+ &Listener{
+ Type: "tcp",
+ Config: map[string]interface{}{
+ "address": "127.0.0.1:8400",
+ "tls_key_file": "/path/to/cakey.pem",
+ "tls_cert_file": "/path/to/cacert.pem",
+ },
+ },
+ },
+ },
+ PidFile: "./pidfile",
+ }
+
+ if diff := deep.Equal(config, expected); diff != nil {
+ t.Fatal(diff)
+ }
+
+ config, err = LoadConfig("./test-fixtures/config-cache-embedded-type.hcl", logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if diff := deep.Equal(config, expected); diff != nil {
+ t.Fatal(diff)
+ }
+}
+
func TestLoadConfigFile(t *testing.T) {
logger := logging.NewVaultLogger(log.Debug)
diff --git a/command/agent/config/test-fixtures/config-cache-embedded-type.hcl b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl
new file mode 100644
index 000000000000..3079b29d7cdb
--- /dev/null
+++ b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl
@@ -0,0 +1,44 @@
+pid_file = "./pidfile"
+
+auto_auth {
+ method {
+ type = "aws"
+ wrap_ttl = 300
+ config = {
+ role = "foobar"
+ }
+ }
+
+ sink {
+ type = "file"
+ config = {
+ path = "/tmp/file-foo"
+ }
+ aad = "foobar"
+ dh_type = "curve25519"
+ dh_path = "/tmp/file-foo-dhpath"
+ }
+}
+
+cache {
+ use_auto_auth_token = true
+
+ listener {
+ type = "unix"
+ address = "/path/to/socket"
+ tls_disable = true
+ }
+
+ listener {
+ type = "tcp"
+ address = "127.0.0.1:8300"
+ tls_disable = true
+ }
+
+ listener {
+ type = "tcp"
+ address = "127.0.0.1:8400"
+ tls_key_file = "/path/to/cakey.pem"
+ tls_cert_file = "/path/to/cacert.pem"
+ }
+}
diff --git a/command/agent/config/test-fixtures/config-cache.hcl b/command/agent/config/test-fixtures/config-cache.hcl
new file mode 100644
index 000000000000..f2ae5cb380c3
--- /dev/null
+++ b/command/agent/config/test-fixtures/config-cache.hcl
@@ -0,0 +1,41 @@
+pid_file = "./pidfile"
+
+auto_auth {
+ method {
+ type = "aws"
+ wrap_ttl = 300
+ config = {
+ role = "foobar"
+ }
+ }
+
+ sink {
+ type = "file"
+ config = {
+ path = "/tmp/file-foo"
+ }
+ aad = "foobar"
+ dh_type = "curve25519"
+ dh_path = "/tmp/file-foo-dhpath"
+ }
+}
+
+cache {
+ use_auto_auth_token = true
+
+ listener "unix" {
+ address = "/path/to/socket"
+ tls_disable = true
+ }
+
+ listener "tcp" {
+ address = "127.0.0.1:8300"
+ tls_disable = true
+ }
+
+ listener "tcp" {
+ address = "127.0.0.1:8400"
+ tls_key_file = "/path/to/cakey.pem"
+ tls_cert_file = "/path/to/cacert.pem"
+ }
+}
diff --git a/command/agent_test.go b/command/agent_test.go
index 386ad47799b7..f08a13f58dd5 100644
--- a/command/agent_test.go
+++ b/command/agent_test.go
@@ -5,6 +5,7 @@ import (
"io/ioutil"
"os"
"testing"
+ "time"
hclog "github.com/hashicorp/go-hclog"
vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt"
@@ -30,6 +31,188 @@ func testAgentCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *AgentCo
}
}
+func TestAgent_Cache_UnixListener(t *testing.T) {
+ logger := logging.NewVaultLogger(hclog.Trace)
+ coreConfig := &vault.CoreConfig{
+ Logger: logger.Named("core"),
+ CredentialBackends: map[string]logical.Factory{
+ "jwt": vaultjwt.Factory,
+ },
+ }
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ vault.TestWaitActive(t, cluster.Cores[0].Core)
+ client := cluster.Cores[0].Client
+
+ defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress))
+ os.Setenv(api.EnvVaultAddress, client.Address())
+
+ defer os.Setenv(api.EnvVaultCACert, os.Getenv(api.EnvVaultCACert))
+ os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir))
+
+ // Setup Vault
+ err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{
+ Type: "jwt",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{
+ "bound_issuer": "https://team-vault.auth0.com/",
+ "jwt_validation_pubkeys": agent.TestECDSAPubKey,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{
+ "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients",
+ "bound_audiences": "https://vault.plugin.auth.jwt.test",
+ "user_claim": "https://vault/user",
+ "groups_claim": "https://vault/groups",
+ "policies": "test",
+ "period": "3s",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ inf, err := ioutil.TempFile("", "auth.jwt.test.")
+ if err != nil {
+ t.Fatal(err)
+ }
+ in := inf.Name()
+ inf.Close()
+ os.Remove(in)
+ t.Logf("input: %s", in)
+
+ sink1f, err := ioutil.TempFile("", "sink1.jwt.test.")
+ if err != nil {
+ t.Fatal(err)
+ }
+ sink1 := sink1f.Name()
+ sink1f.Close()
+ os.Remove(sink1)
+ t.Logf("sink1: %s", sink1)
+
+ sink2f, err := ioutil.TempFile("", "sink2.jwt.test.")
+ if err != nil {
+ t.Fatal(err)
+ }
+ sink2 := sink2f.Name()
+ sink2f.Close()
+ os.Remove(sink2)
+ t.Logf("sink2: %s", sink2)
+
+ conff, err := ioutil.TempFile("", "conf.jwt.test.")
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf := conff.Name()
+ conff.Close()
+ os.Remove(conf)
+ t.Logf("config: %s", conf)
+
+ jwtToken, _ := agent.GetTestJWT(t)
+ if err := ioutil.WriteFile(in, []byte(jwtToken), 0600); err != nil {
+ t.Fatal(err)
+ } else {
+ logger.Trace("wrote test jwt", "path", in)
+ }
+
+ socketff, err := ioutil.TempFile("", "cache.socket.")
+ if err != nil {
+ t.Fatal(err)
+ }
+ socketf := socketff.Name()
+ socketff.Close()
+ os.Remove(socketf)
+ t.Logf("socketf: %s", socketf)
+
+ config := `
+auto_auth {
+ method {
+ type = "jwt"
+ config = {
+ role = "test"
+ path = "%s"
+ }
+ }
+
+ sink {
+ type = "file"
+ config = {
+ path = "%s"
+ }
+ }
+
+ sink "file" {
+ config = {
+ path = "%s"
+ }
+ }
+}
+
+cache {
+ use_auto_auth_token = true
+
+ listener "unix" {
+ address = "%s"
+ tls_disable = true
+ }
+}
+`
+
+ config = fmt.Sprintf(config, in, sink1, sink2, socketf)
+ if err := ioutil.WriteFile(conf, []byte(config), 0600); err != nil {
+ t.Fatal(err)
+ } else {
+ logger.Trace("wrote test config", "path", conf)
+ }
+
+ _, cmd := testAgentCommand(t, logger)
+ cmd.client = client
+
+ // Kill the command 5 seconds after it starts
+ go func() {
+ select {
+ case <-cmd.ShutdownCh:
+ case <-time.After(5 * time.Second):
+ cmd.ShutdownCh <- struct{}{}
+ }
+ }()
+
+ originalVaultAgentAddress := os.Getenv(api.EnvVaultAgentAddress)
+
+ // Create a client that talks to the agent
+ os.Setenv(api.EnvVaultAgentAddress, socketf)
+ testClient, err := api.NewClient(api.DefaultConfig())
+ if err != nil {
+ t.Fatal(err)
+ }
+ os.Setenv(api.EnvVaultAgentAddress, originalVaultAgentAddress)
+
+ // Start the agent
+ go cmd.Run([]string{"-config", conf})
+
+ // Give some time for the auto-auth to complete
+ time.Sleep(1 * time.Second)
+
+ // Invoke lookup self through the agent
+ secret, err := testClient.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil || secret.Data == nil || secret.Data["id"].(string) == "" {
+ t.Fatalf("failed to perform lookup self through agent")
+ }
+}
+
func TestExitAfterAuth(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Trace)
coreConfig := &vault.CoreConfig{
diff --git a/command/base.go b/command/base.go
index db37fd37c380..144e16435a80 100644
--- a/command/base.go
+++ b/command/base.go
@@ -39,6 +39,7 @@ type BaseCommand struct {
flagsOnce sync.Once
flagAddress string
+ flagAgentAddress string
flagCACert string
flagCAPath string
flagClientCert string
@@ -78,6 +79,9 @@ func (c *BaseCommand) Client() (*api.Client, error) {
if c.flagAddress != "" {
config.Address = c.flagAddress
}
+ if c.flagAgentAddress != "" {
+ config.Address = c.flagAgentAddress
+ }
if c.flagOutputCurlString {
config.OutputCurlString = c.flagOutputCurlString
@@ -220,6 +224,15 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets {
}
f.StringVar(addrStringVar)
+ agentAddrStringVar := &StringVar{
+ Name: "agent-address",
+ Target: &c.flagAgentAddress,
+ EnvVar: "VAULT_AGENT_ADDR",
+ Completion: complete.PredictAnything,
+ Usage: "Address of the Agent.",
+ }
+ f.StringVar(agentAddrStringVar)
+
f.StringVar(&StringVar{
Name: "ca-cert",
Target: &c.flagCACert,
diff --git a/command/server/listener.go b/command/server/listener.go
index a1f2f392684c..6546972260f2 100644
--- a/command/server/listener.go
+++ b/command/server/listener.go
@@ -72,7 +72,7 @@ func listenerWrapProxy(ln net.Listener, config map[string]interface{}) (net.List
return newLn, nil
}
-func listenerWrapTLS(
+func ListenerWrapTLS(
ln net.Listener,
props map[string]string,
config map[string]interface{},
diff --git a/command/server/listener_tcp.go b/command/server/listener_tcp.go
index 201e124f3aae..02b7b309fa83 100644
--- a/command/server/listener_tcp.go
+++ b/command/server/listener_tcp.go
@@ -35,7 +35,7 @@ func tcpListenerFactory(config map[string]interface{}, _ io.Writer, ui cli.Ui) (
return nil, nil, nil, err
}
- ln = tcpKeepAliveListener{ln.(*net.TCPListener)}
+ ln = TCPKeepAliveListener{ln.(*net.TCPListener)}
ln, err = listenerWrapProxy(ln, config)
if err != nil {
@@ -94,20 +94,20 @@ func tcpListenerFactory(config map[string]interface{}, _ io.Writer, ui cli.Ui) (
config["x_forwarded_for_reject_not_authorized"] = true
}
- return listenerWrapTLS(ln, props, config, ui)
+ return ListenerWrapTLS(ln, props, config, ui)
}
-// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
+// TCPKeepAliveListener sets TCP keep-alive timeouts on accepted
// connections. It's used by ListenAndServe and ListenAndServeTLS so
// dead TCP connections (e.g. closing laptop mid-download) eventually
// go away.
//
// This is copied directly from the Go source code.
-type tcpKeepAliveListener struct {
+type TCPKeepAliveListener struct {
*net.TCPListener
}
-func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
+func (ln TCPKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
From 9fc2d79812cdbf267aaabeba8e6c87ae17609046 Mon Sep 17 00:00:00 2001
From: Brian Kassouf
Date: Thu, 14 Feb 2019 18:14:56 -0800
Subject: [PATCH 14/31] Refactor the cluster listener (#6232)
* Port over OSS cluster port refactor components
* Start forwarding
* Cleanup a bit
* Fix copy error
* Return error from perf standby creation
* Add some more comments
* Fix copy/paste error
---
helper/certutil/types.go | 9 +
vault/cluster.go | 421 +++++++++++++---
vault/cluster_test.go | 33 +-
vault/cluster_tls.go | 85 ----
vault/core.go | 108 +++--
vault/core_util.go | 9 +-
vault/ha.go | 3 +-
.../cluster.go} | 9 +-
vault/request_forwarding.go | 450 +++++++-----------
vault/request_forwarding_rpc.go | 3 +-
vault/request_forwarding_util.go | 18 -
vault/wrapping.go | 3 +-
12 files changed, 619 insertions(+), 532 deletions(-)
delete mode 100644 vault/cluster_tls.go
rename vault/{replication_cluster_util.go => replication/cluster.go} (60%)
delete mode 100644 vault/request_forwarding_util.go
diff --git a/helper/certutil/types.go b/helper/certutil/types.go
index 9a27a6fb1be2..06c3b3b11412 100644
--- a/helper/certutil/types.go
+++ b/helper/certutil/types.go
@@ -17,12 +17,21 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
+ "math/big"
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/errutil"
)
+// This can be one of a few key types so the different params may or may not be filled
+type ClusterKeyParams struct {
+ Type string `json:"type" structs:"type" mapstructure:"type"`
+ X *big.Int `json:"x" structs:"x" mapstructure:"x"`
+ Y *big.Int `json:"y" structs:"y" mapstructure:"y"`
+ D *big.Int `json:"d" structs:"d" mapstructure:"d"`
+}
+
// Secret is used to attempt to unmarshal a Vault secret
// JSON response, as a convenience
type Secret struct {
diff --git a/vault/cluster.go b/vault/cluster.go
index 5960c3b5d95a..00445f825586 100644
--- a/vault/cluster.go
+++ b/vault/cluster.go
@@ -15,12 +15,16 @@ import (
mathrand "math/rand"
"net"
"net/http"
+ "sync"
+ "sync/atomic"
"time"
"github.com/hashicorp/errwrap"
+ log "github.com/hashicorp/go-hclog"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/logical"
+ "golang.org/x/net/http2"
)
const (
@@ -44,19 +48,6 @@ type ClusterLeaderParams struct {
LeaderClusterAddr string
}
-type ReplicatedClusters struct {
- DR *ReplicatedCluster
- Performance *ReplicatedCluster
-}
-
-// This can be one of a few key types so the different params may or may not be filled
-type clusterKeyParams struct {
- Type string `json:"type" structs:"type" mapstructure:"type"`
- X *big.Int `json:"x" structs:"x" mapstructure:"x"`
- Y *big.Int `json:"y" structs:"y" mapstructure:"y"`
- D *big.Int `json:"d" structs:"d" mapstructure:"d"`
-}
-
// Structure representing the storage entry that holds cluster information
type Cluster struct {
// Name of the cluster
@@ -290,10 +281,297 @@ func (c *Core) setupCluster(ctx context.Context) error {
return nil
}
-// startClusterListener starts cluster request listeners during postunseal. It
+// ClusterClient is used to lookup a client certificate.
+type ClusterClient interface {
+ ClientLookup(context.Context, *tls.CertificateRequestInfo) (*tls.Certificate, error)
+}
+
+// ClusterHandler exposes functions for looking up TLS configuration and handing
+// off a connection for a cluster listener application.
+type ClusterHandler interface {
+ ServerLookup(context.Context, *tls.ClientHelloInfo) (*tls.Certificate, error)
+ CALookup(context.Context) (*x509.Certificate, error)
+
+ // Handoff is used to pass the connection lifetime off to
+ // the storage backend
+ Handoff(context.Context, *sync.WaitGroup, chan struct{}, *tls.Conn) error
+ Stop() error
+}
+
+// ClusterListener is the source of truth for cluster handlers and connection
+// clients. It dynamically builds the cluster TLS information. It's also
+// responsible for starting tcp listeners and accepting new cluster connections.
+type ClusterListener struct {
+ handlers map[string]ClusterHandler
+ clients map[string]ClusterClient
+ shutdown *uint32
+ shutdownWg *sync.WaitGroup
+ server *http2.Server
+
+ clusterListenerAddrs []*net.TCPAddr
+ clusterCipherSuites []uint16
+ logger log.Logger
+ l sync.RWMutex
+}
+
+// AddClient adds a new client for an ALPN name
+func (cl *ClusterListener) AddClient(alpn string, client ClusterClient) {
+ cl.l.Lock()
+ cl.clients[alpn] = client
+ cl.l.Unlock()
+}
+
+// RemoveClient removes the client for the specified ALPN name
+func (cl *ClusterListener) RemoveClient(alpn string) {
+ cl.l.Lock()
+ delete(cl.clients, alpn)
+ cl.l.Unlock()
+}
+
+// AddHandler registers a new cluster handler for the provided ALPN name.
+func (cl *ClusterListener) AddHandler(alpn string, handler ClusterHandler) {
+ cl.l.Lock()
+ cl.handlers[alpn] = handler
+ cl.l.Unlock()
+}
+
+// StopHandler stops the cluster handler for the provided ALPN name, it also
+// calls stop on the handler.
+func (cl *ClusterListener) StopHandler(alpn string) {
+ cl.l.Lock()
+ handler, ok := cl.handlers[alpn]
+ delete(cl.handlers, alpn)
+ cl.l.Unlock()
+ if ok {
+ handler.Stop()
+ }
+}
+
+// Server returns the http2 server that the cluster listener is using
+func (cl *ClusterListener) Server() *http2.Server {
+ return cl.server
+}
+
+// TLSConfig returns a tls config object that uses dynamic lookups to correctly
+// authenticate registered handlers/clients
+func (cl *ClusterListener) TLSConfig(ctx context.Context) (*tls.Config, error) {
+ serverLookup := func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ cl.logger.Debug("performing server cert lookup")
+
+ cl.l.RLock()
+ defer cl.l.RUnlock()
+ for _, v := range clientHello.SupportedProtos {
+ if handler, ok := cl.handlers[v]; ok {
+ return handler.ServerLookup(ctx, clientHello)
+ }
+ }
+
+ return nil, errors.New("unsupported protocol")
+ }
+
+ clientLookup := func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ cl.logger.Debug("performing client cert lookup")
+
+ cl.l.RLock()
+ defer cl.l.RUnlock()
+ for _, client := range cl.clients {
+ cert, err := client.ClientLookup(ctx, requestInfo)
+ if err == nil && cert != nil {
+ return cert, nil
+ }
+ }
+
+ return nil, errors.New("no client cert found")
+ }
+
+ serverConfigLookup := func(clientHello *tls.ClientHelloInfo) (*tls.Config, error) {
+ caPool := x509.NewCertPool()
+
+ ret := &tls.Config{
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ GetCertificate: serverLookup,
+ GetClientCertificate: clientLookup,
+ MinVersion: tls.VersionTLS12,
+ RootCAs: caPool,
+ ClientCAs: caPool,
+ NextProtos: clientHello.SupportedProtos,
+ CipherSuites: cl.clusterCipherSuites,
+ }
+
+ cl.l.RLock()
+ defer cl.l.RUnlock()
+ for _, v := range clientHello.SupportedProtos {
+ if handler, ok := cl.handlers[v]; ok {
+ ca, err := handler.CALookup(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ caPool.AddCert(ca)
+ return ret, nil
+ }
+ }
+
+ return nil, errors.New("unsupported protocol")
+ }
+
+ return &tls.Config{
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ GetCertificate: serverLookup,
+ GetClientCertificate: clientLookup,
+ GetConfigForClient: serverConfigLookup,
+ MinVersion: tls.VersionTLS12,
+ CipherSuites: cl.clusterCipherSuites,
+ }, nil
+}
+
+// Run starts the tcp listeners and will accept connections until stop is
+// called. This function blocks so should be called in a go routine.
+func (cl *ClusterListener) Run(ctx context.Context) error {
+ // Get our TLS config
+ tlsConfig, err := cl.TLSConfig(ctx)
+ if err != nil {
+ cl.logger.Error("failed to get tls configuration when starting cluster listener", "error", err)
+ return err
+ }
+
+ // The server supports all of the possible protos
+ tlsConfig.NextProtos = []string{"h2", requestForwardingALPN, perfStandbyALPN, PerformanceReplicationALPN, DRReplicationALPN}
+
+ for _, addr := range cl.clusterListenerAddrs {
+ cl.shutdownWg.Add(1)
+
+ // Force a local resolution to avoid data races
+ laddr := addr
+
+ // Start our listening loop
+ go func() {
+ defer cl.shutdownWg.Done()
+
+ // closeCh is used to shutdown the spawned goroutines once this
+ // function returns
+ closeCh := make(chan struct{})
+ defer func() {
+ close(closeCh)
+ }()
+
+ if cl.logger.IsInfo() {
+ cl.logger.Info("starting listener", "listener_address", laddr)
+ }
+
+ // Create a TCP listener. We do this separately and specifically
+ // with TCP so that we can set deadlines.
+ tcpLn, err := net.ListenTCP("tcp", laddr)
+ if err != nil {
+ cl.logger.Error("error starting listener", "error", err)
+ return
+ }
+
+ // Wrap the listener with TLS
+ tlsLn := tls.NewListener(tcpLn, tlsConfig)
+ defer tlsLn.Close()
+
+ if cl.logger.IsInfo() {
+ cl.logger.Info("serving cluster requests", "cluster_listen_address", tlsLn.Addr())
+ }
+
+ for {
+ if atomic.LoadUint32(cl.shutdown) > 0 {
+ return
+ }
+
+ // Set the deadline for the accept call. If it passes we'll get
+ // an error, causing us to check the condition at the top
+ // again.
+ tcpLn.SetDeadline(time.Now().Add(clusterListenerAcceptDeadline))
+
+ // Accept the connection
+ conn, err := tlsLn.Accept()
+ if err != nil {
+ if err, ok := err.(net.Error); ok && !err.Timeout() {
+ cl.logger.Debug("non-timeout error accepting on cluster port", "error", err)
+ }
+ if conn != nil {
+ conn.Close()
+ }
+ continue
+ }
+ if conn == nil {
+ continue
+ }
+
+ // Type assert to TLS connection and handshake to populate the
+ // connection state
+ tlsConn := conn.(*tls.Conn)
+
+ // Set a deadline for the handshake. This will cause clients
+ // that don't successfully auth to be kicked out quickly.
+ // Cluster connections should be reliable so being marginally
+ // aggressive here is fine.
+ err = tlsConn.SetDeadline(time.Now().Add(30 * time.Second))
+ if err != nil {
+ if cl.logger.IsDebug() {
+ cl.logger.Debug("error setting deadline for cluster connection", "error", err)
+ }
+ tlsConn.Close()
+ continue
+ }
+
+ err = tlsConn.Handshake()
+ if err != nil {
+ if cl.logger.IsDebug() {
+ cl.logger.Debug("error handshaking cluster connection", "error", err)
+ }
+ tlsConn.Close()
+ continue
+ }
+
+ // Now, set it back to unlimited
+ err = tlsConn.SetDeadline(time.Time{})
+ if err != nil {
+ if cl.logger.IsDebug() {
+ cl.logger.Debug("error setting deadline for cluster connection", "error", err)
+ }
+ tlsConn.Close()
+ continue
+ }
+
+ cl.l.RLock()
+ handler, ok := cl.handlers[tlsConn.ConnectionState().NegotiatedProtocol]
+ cl.l.RUnlock()
+ if !ok {
+ cl.logger.Debug("unknown negotiated protocol on cluster port")
+ tlsConn.Close()
+ continue
+ }
+
+ if err := handler.Handoff(ctx, cl.shutdownWg, closeCh, tlsConn); err != nil {
+ cl.logger.Error("error handling cluster connection", "error", err)
+ continue
+ }
+ }
+ }()
+ }
+
+ return nil
+}
+
+// Stop stops the cluster listner
+func (cl *ClusterListener) Stop() {
+ // Set the shutdown flag. This will cause the listeners to shut down
+ // within the deadline in clusterListenerAcceptDeadline
+ atomic.StoreUint32(cl.shutdown, 1)
+ cl.logger.Info("forwarding rpc listeners stopped")
+
+ // Wait for them all to shut down
+ cl.shutdownWg.Wait()
+ cl.logger.Info("rpc listeners successfully shut down")
+}
+
+// startClusterListener starts cluster request listeners during unseal. It
// is assumed that the state lock is held while this is run. Right now this
-// only starts forwarding listeners; it's TBD whether other request types will
-// be built in the same mechanism or started independently.
+// only starts cluster listeners. Once the listener is started handlers/clients
+// can start being registered to it.
func (c *Core) startClusterListener(ctx context.Context) error {
if c.clusterAddr == "" {
c.logger.Info("clustering disabled, not starting listeners")
@@ -307,76 +585,46 @@ func (c *Core) startClusterListener(ctx context.Context) error {
c.logger.Debug("starting cluster listeners")
- err := c.startForwarding(ctx)
- if err != nil {
- return err
+ // Create the HTTP/2 server that will be shared by both RPC and regular
+ // duties. Doing it this way instead of listening via the server and gRPC
+ // allows us to re-use the same port via ALPN. We can just tell the server
+ // to serve a given conn and which handler to use.
+ h2Server := &http2.Server{
+ // Our forwarding connections heartbeat regularly so anything else we
+ // want to go away/get cleaned up pretty rapidly
+ IdleTimeout: 5 * HeartbeatInterval,
}
- return nil
+ c.clusterListener = &ClusterListener{
+ handlers: make(map[string]ClusterHandler),
+ clients: make(map[string]ClusterClient),
+ shutdown: new(uint32),
+ shutdownWg: &sync.WaitGroup{},
+ server: h2Server,
+
+ clusterListenerAddrs: c.clusterListenerAddrs,
+ clusterCipherSuites: c.clusterCipherSuites,
+ logger: c.logger.Named("cluster-listener"),
+ }
+
+ return c.clusterListener.Run(ctx)
}
-// stopClusterListener stops any existing listeners during preseal. It is
+// stopClusterListener stops any existing listeners during seal. It is
// assumed that the state lock is held while this is run.
func (c *Core) stopClusterListener() {
- if c.clusterAddr == "" {
-
+ if c.clusterListener == nil {
c.logger.Debug("clustering disabled, not stopping listeners")
return
}
- if !c.clusterListenersRunning {
- c.logger.Info("cluster listeners not running")
- return
- }
c.logger.Info("stopping cluster listeners")
- // Tell the goroutine managing the listeners to perform the shutdown
- // process
- c.clusterListenerShutdownCh <- struct{}{}
-
- // The reason for this loop-de-loop is that we may be unsealing again
- // quickly, and if the listeners are not yet closed, we will get socket
- // bind errors. This ensures proper ordering.
-
- c.logger.Debug("waiting for success notification while stopping cluster listeners")
- <-c.clusterListenerShutdownSuccessCh
- c.clusterListenersRunning = false
+ c.clusterListener.Stop()
c.logger.Info("cluster listeners successfully shut down")
}
-// ClusterTLSConfig generates a TLS configuration based on the local/replicated
-// cluster key and cert.
-func (c *Core) ClusterTLSConfig(ctx context.Context, repClusters *ReplicatedClusters, perfStandbyCluster *ReplicatedCluster) (*tls.Config, error) {
- // Using lookup functions allows just-in-time lookup of the current state
- // of clustering as connections come and go
-
- tlsConfig := &tls.Config{
- ClientAuth: tls.RequireAndVerifyClientCert,
- GetCertificate: clusterTLSServerLookup(ctx, c, repClusters, perfStandbyCluster),
- GetClientCertificate: clusterTLSClientLookup(ctx, c, repClusters, perfStandbyCluster),
- GetConfigForClient: clusterTLSServerConfigLookup(ctx, c, repClusters, perfStandbyCluster),
- MinVersion: tls.VersionTLS12,
- CipherSuites: c.clusterCipherSuites,
- }
-
- parsedCert := c.localClusterParsedCert.Load().(*x509.Certificate)
- currCert := c.localClusterCert.Load().([]byte)
- localCert := make([]byte, len(currCert))
- copy(localCert, currCert)
-
- if parsedCert != nil {
- tlsConfig.ServerName = parsedCert.Subject.CommonName
-
- pool := x509.NewCertPool()
- pool.AddCert(parsedCert)
- tlsConfig.RootCAs = pool
- tlsConfig.ClientCAs = pool
- }
-
- return tlsConfig, nil
-}
-
func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) {
c.clusterListenerAddrs = addrs
if c.clusterAddr == "" && len(addrs) == 1 {
@@ -387,3 +635,36 @@ func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) {
func (c *Core) SetClusterHandler(handler http.Handler) {
c.clusterHandler = handler
}
+
+// getGRPCDialer is used to return a dialer that has the correct TLS
+// configuration. Otherwise gRPC tries to be helpful and stomps all over our
+// NextProtos.
+func (c *Core) getGRPCDialer(ctx context.Context, alpnProto, serverName string, caCert *x509.Certificate) func(string, time.Duration) (net.Conn, error) {
+ return func(addr string, timeout time.Duration) (net.Conn, error) {
+ if c.clusterListener == nil {
+ return nil, errors.New("clustering disabled")
+ }
+
+ tlsConfig, err := c.clusterListener.TLSConfig(ctx)
+ if err != nil {
+ c.logger.Error("failed to get tls configuration", "error", err)
+ return nil, err
+ }
+ if serverName != "" {
+ tlsConfig.ServerName = serverName
+ }
+ if caCert != nil {
+ pool := x509.NewCertPool()
+ pool.AddCert(caCert)
+ tlsConfig.RootCAs = pool
+ tlsConfig.ClientCAs = pool
+ }
+ c.logger.Debug("creating rpc dialer", "host", tlsConfig.ServerName)
+
+ tlsConfig.NextProtos = []string{alpnProto}
+ dialer := &net.Dialer{
+ Timeout: timeout,
+ }
+ return tls.DialWithDialer(dialer, "tcp", addr, tlsConfig)
+ }
+}
diff --git a/vault/cluster_test.go b/vault/cluster_test.go
index 949670d27d24..3f348ac18602 100644
--- a/vault/cluster_test.go
+++ b/vault/cluster_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"crypto/tls"
+ "crypto/x509"
"fmt"
"net"
"net/http"
@@ -102,32 +103,25 @@ func TestCluster_ListenForRequests(t *testing.T) {
TestWaitActive(t, cores[0].Core)
// Use this to have a valid config after sealing since ClusterTLSConfig returns nil
- var lastTLSConfig *tls.Config
checkListenersFunc := func(expectFail bool) {
- tlsConfig, err := cores[0].ClusterTLSConfig(context.Background(), nil, nil)
- if err != nil {
- if err.Error() != consts.ErrSealed.Error() {
- t.Fatal(err)
- }
- tlsConfig = lastTLSConfig
- } else {
- tlsConfig.NextProtos = []string{"h2"}
- lastTLSConfig = tlsConfig
- }
+ cores[0].clusterListener.AddClient(requestForwardingALPN, &requestForwardingClusterClient{cores[0].Core})
+ parsedCert := cores[0].localClusterParsedCert.Load().(*x509.Certificate)
+ dialer := cores[0].getGRPCDialer(context.Background(), requestForwardingALPN, parsedCert.Subject.CommonName, parsedCert)
for _, ln := range cores[0].Listeners {
tcpAddr, ok := ln.Addr().(*net.TCPAddr)
if !ok {
t.Fatalf("%s not a TCP port", tcpAddr.String())
}
- conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+105), tlsConfig)
+ netConn, err := dialer(fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+105), 0)
+ conn := netConn.(*tls.Conn)
if err != nil {
if expectFail {
t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
continue
}
- t.Fatalf("error: %v\nlisteners are\n%#v\n%#v\n", err, cores[0].Listeners[0], cores[0].Listeners[1])
+ t.Fatalf("error: %v\nlisteners are\n%#v\n%#v\n", err, cores[0].Listeners[0], cores[0].Listeners[0])
}
if expectFail {
t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
@@ -140,7 +134,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
switch {
case connState.Version != tls.VersionTLS12:
t.Fatal("version mismatch")
- case connState.NegotiatedProtocol != "h2" || !connState.NegotiatedProtocolIsMutual:
+ case connState.NegotiatedProtocol != requestForwardingALPN || !connState.NegotiatedProtocolIsMutual:
t.Fatal("bad protocol negotiation")
}
t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+105)
@@ -392,12 +386,13 @@ func TestCluster_CustomCipherSuites(t *testing.T) {
// Wait for core to become active
TestWaitActive(t, core.Core)
- tlsConf, err := core.Core.ClusterTLSConfig(context.Background(), nil, nil)
- if err != nil {
- t.Fatal(err)
- }
+ core.clusterListener.AddClient(requestForwardingALPN, &requestForwardingClusterClient{core.Core})
+
+ parsedCert := core.localClusterParsedCert.Load().(*x509.Certificate)
+ dialer := core.getGRPCDialer(context.Background(), requestForwardingALPN, parsedCert.Subject.CommonName, parsedCert)
- conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", core.Listeners[0].Address.IP.String(), core.Listeners[0].Address.Port+105), tlsConf)
+ netConn, err := dialer(fmt.Sprintf("%s:%d", core.Listeners[0].Address.IP.String(), core.Listeners[0].Address.Port+105), 0)
+ conn := netConn.(*tls.Conn)
if err != nil {
t.Fatal(err)
}
diff --git a/vault/cluster_tls.go b/vault/cluster_tls.go
deleted file mode 100644
index 4a63ecfa3594..000000000000
--- a/vault/cluster_tls.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/tls"
- "crypto/x509"
- "fmt"
-)
-
-var (
- clusterTLSServerLookup = func(ctx context.Context, c *Core, repClusters *ReplicatedClusters, _ *ReplicatedCluster) func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
- return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
- c.logger.Debug("performing server cert lookup")
-
- switch {
- default:
- currCert := c.localClusterCert.Load().([]byte)
- if len(currCert) == 0 {
- return nil, fmt.Errorf("got forwarding connection but no local cert")
- }
-
- localCert := make([]byte, len(currCert))
- copy(localCert, currCert)
-
- return &tls.Certificate{
- Certificate: [][]byte{localCert},
- PrivateKey: c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey),
- Leaf: c.localClusterParsedCert.Load().(*x509.Certificate),
- }, nil
- }
- }
- }
-
- clusterTLSClientLookup = func(ctx context.Context, c *Core, repClusters *ReplicatedClusters, _ *ReplicatedCluster) func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
- return func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- if len(requestInfo.AcceptableCAs) != 1 {
- return nil, fmt.Errorf("expected only a single acceptable CA")
- }
-
- currCert := c.localClusterCert.Load().([]byte)
- if len(currCert) == 0 {
- return nil, fmt.Errorf("forwarding connection client but no local cert")
- }
-
- localCert := make([]byte, len(currCert))
- copy(localCert, currCert)
-
- return &tls.Certificate{
- Certificate: [][]byte{localCert},
- PrivateKey: c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey),
- Leaf: c.localClusterParsedCert.Load().(*x509.Certificate),
- }, nil
- }
- }
-
- clusterTLSServerConfigLookup = func(ctx context.Context, c *Core, repClusters *ReplicatedClusters, repCluster *ReplicatedCluster) func(clientHello *tls.ClientHelloInfo) (*tls.Config, error) {
- return func(clientHello *tls.ClientHelloInfo) (*tls.Config, error) {
- //c.logger.Trace("performing server config lookup")
-
- caPool := x509.NewCertPool()
-
- ret := &tls.Config{
- ClientAuth: tls.RequireAndVerifyClientCert,
- GetCertificate: clusterTLSServerLookup(ctx, c, repClusters, repCluster),
- GetClientCertificate: clusterTLSClientLookup(ctx, c, repClusters, repCluster),
- MinVersion: tls.VersionTLS12,
- RootCAs: caPool,
- ClientCAs: caPool,
- NextProtos: clientHello.SupportedProtos,
- CipherSuites: c.clusterCipherSuites,
- }
-
- parsedCert := c.localClusterParsedCert.Load().(*x509.Certificate)
-
- if parsedCert == nil {
- return nil, fmt.Errorf("forwarding connection client but no local cert")
- }
-
- caPool.AddCert(parsedCert)
-
- return ret, nil
- }
- }
-)
diff --git a/vault/core.go b/vault/core.go
index 8807af66b8b2..77064c84bb6c 100644
--- a/vault/core.go
+++ b/vault/core.go
@@ -26,6 +26,7 @@ import (
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/certutil"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/logging"
@@ -132,10 +133,10 @@ func (e *ErrInvalidKey) Error() string {
type RegisterAuthFunc func(context.Context, time.Duration, string, *logical.Auth) error
type activeAdvertisement struct {
- RedirectAddr string `json:"redirect_addr"`
- ClusterAddr string `json:"cluster_addr,omitempty"`
- ClusterCert []byte `json:"cluster_cert,omitempty"`
- ClusterKeyParams *clusterKeyParams `json:"cluster_key_params,omitempty"`
+ RedirectAddr string `json:"redirect_addr"`
+ ClusterAddr string `json:"cluster_addr,omitempty"`
+ ClusterCert []byte `json:"cluster_cert,omitempty"`
+ ClusterKeyParams *certutil.ClusterKeyParams `json:"cluster_key_params,omitempty"`
}
type unlockInformation struct {
@@ -328,14 +329,6 @@ type Core struct {
clusterListenerAddrs []*net.TCPAddr
// The handler to use for request forwarding
clusterHandler http.Handler
- // Tracks whether cluster listeners are running, e.g. it's safe to send a
- // shutdown down the channel
- clusterListenersRunning bool
- // Shutdown channel for the cluster listeners
- clusterListenerShutdownCh chan struct{}
- // Shutdown success channel. We need this to be done serially to ensure
- // that binds are removed before they might be reinstated.
- clusterListenerShutdownSuccessCh chan struct{}
// Write lock used to ensure that we don't have multiple connections adjust
// this value at the same time
requestForwardingConnectionLock sync.RWMutex
@@ -346,8 +339,6 @@ type Core struct {
clusterLeaderParams *atomic.Value
// Info on cluster members
clusterPeerClusterAddrsCache *cache.Cache
- // Stores whether we currently have a server running
- rpcServerActive *uint32
// The context for the client
rpcClientConnContext context.Context
// The function for canceling the client connection
@@ -420,7 +411,10 @@ type Core struct {
// loadCaseSensitiveIdentityStore enforces the loading of identity store
// artifacts in a case sensitive manner. To be used only in testing.
- loadCaseSensitiveIdentityStore bool
+ loadCaseSensitiveIdentityStore bool
+
+ // clusterListener starts up and manages connections on the cluster ports
+ clusterListener *ClusterListener
// Telemetry objects
metricsHelper *metricsutil.MetricsHelper
@@ -567,43 +561,40 @@ func NewCore(conf *CoreConfig) (*Core, error) {
// Setup the core
c := &Core{
- entCore: entCore{},
- devToken: conf.DevToken,
- physical: conf.Physical,
- redirectAddr: conf.RedirectAddr,
- clusterAddr: conf.ClusterAddr,
- seal: conf.Seal,
- router: NewRouter(),
- sealed: new(uint32),
- standby: true,
- baseLogger: conf.Logger,
- logger: conf.Logger.Named("core"),
- defaultLeaseTTL: conf.DefaultLeaseTTL,
- maxLeaseTTL: conf.MaxLeaseTTL,
- cachingDisabled: conf.DisableCache,
- clusterName: conf.ClusterName,
- clusterListenerShutdownCh: make(chan struct{}),
- clusterListenerShutdownSuccessCh: make(chan struct{}),
- clusterPeerClusterAddrsCache: cache.New(3*HeartbeatInterval, time.Second),
- enableMlock: !conf.DisableMlock,
- rawEnabled: conf.EnableRaw,
- replicationState: new(uint32),
- rpcServerActive: new(uint32),
- atomicPrimaryClusterAddrs: new(atomic.Value),
- atomicPrimaryFailoverAddrs: new(atomic.Value),
- localClusterPrivateKey: new(atomic.Value),
- localClusterCert: new(atomic.Value),
- localClusterParsedCert: new(atomic.Value),
- activeNodeReplicationState: new(uint32),
- keepHALockOnStepDown: new(uint32),
- replicationFailure: new(uint32),
- disablePerfStandby: true,
- activeContextCancelFunc: new(atomic.Value),
- allLoggers: conf.AllLoggers,
- builtinRegistry: conf.BuiltinRegistry,
- neverBecomeActive: new(uint32),
- clusterLeaderParams: new(atomic.Value),
- metricsHelper: conf.MetricsHelper,
+ entCore: entCore{},
+ devToken: conf.DevToken,
+ physical: conf.Physical,
+ redirectAddr: conf.RedirectAddr,
+ clusterAddr: conf.ClusterAddr,
+ seal: conf.Seal,
+ router: NewRouter(),
+ sealed: new(uint32),
+ standby: true,
+ baseLogger: conf.Logger,
+ logger: conf.Logger.Named("core"),
+ defaultLeaseTTL: conf.DefaultLeaseTTL,
+ maxLeaseTTL: conf.MaxLeaseTTL,
+ cachingDisabled: conf.DisableCache,
+ clusterName: conf.ClusterName,
+ clusterPeerClusterAddrsCache: cache.New(3*HeartbeatInterval, time.Second),
+ enableMlock: !conf.DisableMlock,
+ rawEnabled: conf.EnableRaw,
+ replicationState: new(uint32),
+ atomicPrimaryClusterAddrs: new(atomic.Value),
+ atomicPrimaryFailoverAddrs: new(atomic.Value),
+ localClusterPrivateKey: new(atomic.Value),
+ localClusterCert: new(atomic.Value),
+ localClusterParsedCert: new(atomic.Value),
+ activeNodeReplicationState: new(uint32),
+ keepHALockOnStepDown: new(uint32),
+ replicationFailure: new(uint32),
+ disablePerfStandby: true,
+ activeContextCancelFunc: new(atomic.Value),
+ allLoggers: conf.AllLoggers,
+ builtinRegistry: conf.BuiltinRegistry,
+ neverBecomeActive: new(uint32),
+ clusterLeaderParams: new(atomic.Value),
+ metricsHelper: conf.MetricsHelper,
}
atomic.StoreUint32(c.sealed, 1)
@@ -1043,6 +1034,10 @@ func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, erro
return false, err
}
+ if err := c.startClusterListener(ctx); err != nil {
+ return false, err
+ }
+
// Do post-unseal setup if HA is not enabled
if c.ha == nil {
// We still need to set up cluster info even if it's not part of a
@@ -1365,6 +1360,9 @@ func (c *Core) sealInternalWithOptions(grabStateLock, keepHALock bool) error {
c.logger.Debug("runStandby done")
}
+ // Stop the cluster listener
+ c.stopClusterListener()
+
c.logger.Debug("sealing barrier")
if err := c.barrier.Seal(); err != nil {
c.logger.Error("error sealing barrier", "error", err)
@@ -1461,8 +1459,8 @@ func (s standardUnsealStrategy) unseal(ctx context.Context, logger log.Logger, c
c.auditBroker = NewAuditBroker(c.logger)
}
- if c.ha != nil || shouldStartClusterListener(c) {
- if err := c.startClusterListener(ctx); err != nil {
+ if c.clusterListener != nil && (c.ha != nil || shouldStartClusterListener(c)) {
+ if err := c.startForwarding(ctx); err != nil {
return err
}
}
@@ -1553,7 +1551,7 @@ func (c *Core) preSeal() error {
}
c.clusterParamsLock.Unlock()
- c.stopClusterListener()
+ c.stopForwarding()
if err := c.teardownAudits(); err != nil {
result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err))
diff --git a/vault/core_util.go b/vault/core_util.go
index af3fff1aef67..eddc924dcc1c 100644
--- a/vault/core_util.go
+++ b/vault/core_util.go
@@ -4,11 +4,14 @@ package vault
import (
"context"
+ "time"
"github.com/hashicorp/vault/helper/license"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/vault/replication"
+ cache "github.com/patrickmn/go-cache"
)
type entCore struct{}
@@ -89,7 +92,7 @@ func (c *Core) namepaceByPath(string) *namespace.Namespace {
return namespace.RootNamespace
}
-func (c *Core) setupReplicatedClusterPrimary(*ReplicatedCluster) error { return nil }
+func (c *Core) setupReplicatedClusterPrimary(*replication.Cluster) error { return nil }
func (c *Core) perfStandbyCount() int { return 0 }
@@ -104,3 +107,7 @@ func (c *Core) checkReplicatedFiltering(context.Context, *MountEntry, string) (b
func (c *Core) invalidateSentinelPolicy(PolicyType, string) {}
func (c *Core) removePerfStandbySecondary(context.Context, string) {}
+
+func (c *Core) perfStandbyClusterHandler() (*replication.Cluster, *cache.Cache, chan struct{}, error) {
+ return nil, cache.New(2*HeartbeatInterval, 1*time.Second), make(chan struct{}), nil
+}
diff --git a/vault/ha.go b/vault/ha.go
index fc998132baab..791167522a79 100644
--- a/vault/ha.go
+++ b/vault/ha.go
@@ -14,6 +14,7 @@ import (
multierror "github.com/hashicorp/go-multierror"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/certutil"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/namespace"
@@ -817,7 +818,7 @@ func (c *Core) advertiseLeader(ctx context.Context, uuid string, leaderLostCh <-
return fmt.Errorf("unknown cluster private key type %T", c.localClusterPrivateKey.Load())
}
- keyParams := &clusterKeyParams{
+ keyParams := &certutil.ClusterKeyParams{
Type: corePrivateKeyTypeP521,
X: key.X,
Y: key.Y,
diff --git a/vault/replication_cluster_util.go b/vault/replication/cluster.go
similarity index 60%
rename from vault/replication_cluster_util.go
rename to vault/replication/cluster.go
index 013cc8f70508..20d4455109d7 100644
--- a/vault/replication_cluster_util.go
+++ b/vault/replication/cluster.go
@@ -1,11 +1,16 @@
// +build !enterprise
-package vault
+package replication
import "github.com/hashicorp/vault/helper/consts"
-type ReplicatedCluster struct {
+type Cluster struct {
State consts.ReplicationState
ClusterID string
PrimaryClusterAddr string
}
+
+type Clusters struct {
+ DR *Cluster
+ Performance *Cluster
+}
diff --git a/vault/request_forwarding.go b/vault/request_forwarding.go
index ff0eb5fd42be..ad8c6d42fccf 100644
--- a/vault/request_forwarding.go
+++ b/vault/request_forwarding.go
@@ -1,23 +1,23 @@
package vault
import (
+ "bytes"
"context"
+ "crypto/ecdsa"
"crypto/tls"
"crypto/x509"
+ "errors"
"fmt"
math "math"
- "net"
"net/http"
"net/url"
"sync"
- "sync/atomic"
"time"
- cache "github.com/patrickmn/go-cache"
-
- uuid "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/consts"
+ log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/forwarding"
+ "github.com/hashicorp/vault/vault/replication"
+ cache "github.com/patrickmn/go-cache"
"golang.org/x/net/http2"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
@@ -44,58 +44,25 @@ var (
HeartbeatInterval = 5 * time.Second
)
-type SecondaryConnsCacheVals struct {
- ID string
- Token string
- Connection net.Conn
- Mode consts.ReplicationState
+type requestForwardingHandler struct {
+ fws *http2.Server
+ fwRPCServer *grpc.Server
+ logger log.Logger
+ ha bool
+ core *Core
+ stopCh chan struct{}
}
-// Starts the listeners and servers necessary to handle forwarded requests
-func (c *Core) startForwarding(ctx context.Context) error {
- c.logger.Debug("cluster listener setup function")
- defer c.logger.Debug("leaving cluster listener setup function")
-
- // Clean up in case we have transitioned from a client to a server
- c.requestForwardingConnectionLock.Lock()
- c.clearForwardingClients()
- c.requestForwardingConnectionLock.Unlock()
+type requestForwardingClusterClient struct {
+ core *Core
+}
+// NewRequestForwardingHandler creates a cluster handler for use with request
+// forwarding.
+func NewRequestForwardingHandler(c *Core, fws *http2.Server, perfStandbySlots chan struct{}, perfStandbyRepCluster *replication.Cluster, perfStandbyCache *cache.Cache) (*requestForwardingHandler, error) {
// Resolve locally to avoid races
ha := c.ha != nil
- var perfStandbyRepCluster *ReplicatedCluster
- if ha {
- id, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
-
- perfStandbyRepCluster = &ReplicatedCluster{
- State: consts.ReplicationPerformanceStandby,
- ClusterID: id,
- PrimaryClusterAddr: c.clusterAddr,
- }
- if err = c.setupReplicatedClusterPrimary(perfStandbyRepCluster); err != nil {
- return err
- }
- }
-
- // Get our TLS config
- tlsConfig, err := c.ClusterTLSConfig(ctx, nil, perfStandbyRepCluster)
- if err != nil {
- c.logger.Error("failed to get tls configuration when starting forwarding", "error", err)
- return err
- }
-
- // The server supports all of the possible protos
- tlsConfig.NextProtos = []string{"h2", requestForwardingALPN, perfStandbyALPN, PerformanceReplicationALPN, DRReplicationALPN}
-
- if !atomic.CompareAndSwapUint32(c.rpcServerActive, 0, 1) {
- c.logger.Warn("forwarding rpc server already running")
- return nil
- }
-
fwRPCServer := grpc.NewServer(
grpc.KeepaliveParams(keepalive.ServerParameters{
Time: 2 * HeartbeatInterval,
@@ -104,26 +71,6 @@ func (c *Core) startForwarding(ctx context.Context) error {
grpc.MaxSendMsgSize(math.MaxInt32),
)
- // Setup performance standby RPC servers
- perfStandbyCount := 0
- if !c.IsDRSecondary() && !c.disablePerfStandby {
- perfStandbyCount = c.perfStandbyCount()
- }
- perfStandbySlots := make(chan struct{}, perfStandbyCount)
-
- perfStandbyCache := cache.New(2*HeartbeatInterval, 1*time.Second)
- perfStandbyCache.OnEvicted(func(secondaryID string, _ interface{}) {
- c.logger.Debug("removing performance standby", "id", secondaryID)
- c.removePerfStandbySecondary(context.Background(), secondaryID)
- select {
- case <-perfStandbySlots:
- default:
- c.logger.Warn("perf secondary timeout hit but no slot to free")
- }
- })
-
- perfStandbyReplicationRPCServer := perfStandbyRPCServer(c, perfStandbyCache)
-
if ha && c.clusterHandler != nil {
RegisterRequestForwardingServer(fwRPCServer, &forwardedRequestRPCServer{
core: c,
@@ -134,197 +81,156 @@ func (c *Core) startForwarding(ctx context.Context) error {
})
}
- // Create the HTTP/2 server that will be shared by both RPC and regular
- // duties. Doing it this way instead of listening via the server and gRPC
- // allows us to re-use the same port via ALPN. We can just tell the server
- // to serve a given conn and which handler to use.
- fws := &http2.Server{
- // Our forwarding connections heartbeat regularly so anything else we
- // want to go away/get cleaned up pretty rapidly
- IdleTimeout: 5 * HeartbeatInterval,
+ return &requestForwardingHandler{
+ fws: fws,
+ fwRPCServer: fwRPCServer,
+ ha: ha,
+ logger: c.logger.Named("request-forward"),
+ core: c,
+ stopCh: make(chan struct{}),
+ }, nil
+}
+
+// ClientLookup satisfies the ClusterClient interface and returns the ha tls
+// client certs.
+func (c *requestForwardingClusterClient) ClientLookup(ctx context.Context, requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ parsedCert := c.core.localClusterParsedCert.Load().(*x509.Certificate)
+ if parsedCert == nil {
+ return nil, nil
+ }
+ currCert := c.core.localClusterCert.Load().([]byte)
+ if len(currCert) == 0 {
+ return nil, nil
+ }
+ localCert := make([]byte, len(currCert))
+ copy(localCert, currCert)
+
+ for _, subj := range requestInfo.AcceptableCAs {
+ if bytes.Equal(subj, parsedCert.RawIssuer) {
+ return &tls.Certificate{
+ Certificate: [][]byte{localCert},
+ PrivateKey: c.core.localClusterPrivateKey.Load().(*ecdsa.PrivateKey),
+ Leaf: c.core.localClusterParsedCert.Load().(*x509.Certificate),
+ }, nil
+ }
}
- // Shutdown coordination logic
- shutdown := new(uint32)
- shutdownWg := &sync.WaitGroup{}
-
- for _, addr := range c.clusterListenerAddrs {
- shutdownWg.Add(1)
-
- // Force a local resolution to avoid data races
- laddr := addr
-
- // Start our listening loop
- go func() {
- defer shutdownWg.Done()
-
- // closeCh is used to shutdown the spawned goroutines once this
- // function returns
- closeCh := make(chan struct{})
- defer func() {
- close(closeCh)
- }()
-
- if c.logger.IsInfo() {
- c.logger.Info("starting listener", "listener_address", laddr)
- }
-
- // Create a TCP listener. We do this separately and specifically
- // with TCP so that we can set deadlines.
- tcpLn, err := net.ListenTCP("tcp", laddr)
- if err != nil {
- c.logger.Error("error starting listener", "error", err)
- return
- }
-
- // Wrap the listener with TLS
- tlsLn := tls.NewListener(tcpLn, tlsConfig)
- defer tlsLn.Close()
-
- if c.logger.IsInfo() {
- c.logger.Info("serving cluster requests", "cluster_listen_address", tlsLn.Addr())
- }
-
- for {
- if atomic.LoadUint32(shutdown) > 0 {
- return
- }
-
- // Set the deadline for the accept call. If it passes we'll get
- // an error, causing us to check the condition at the top
- // again.
- tcpLn.SetDeadline(time.Now().Add(clusterListenerAcceptDeadline))
-
- // Accept the connection
- conn, err := tlsLn.Accept()
- if err != nil {
- if err, ok := err.(net.Error); ok && !err.Timeout() {
- c.logger.Debug("non-timeout error accepting on cluster port", "error", err)
- }
- if conn != nil {
- conn.Close()
- }
- continue
- }
- if conn == nil {
- continue
- }
-
- // Type assert to TLS connection and handshake to populate the
- // connection state
- tlsConn := conn.(*tls.Conn)
-
- // Set a deadline for the handshake. This will cause clients
- // that don't successfully auth to be kicked out quickly.
- // Cluster connections should be reliable so being marginally
- // aggressive here is fine.
- err = tlsConn.SetDeadline(time.Now().Add(30 * time.Second))
- if err != nil {
- if c.logger.IsDebug() {
- c.logger.Debug("error setting deadline for cluster connection", "error", err)
- }
- tlsConn.Close()
- continue
- }
-
- err = tlsConn.Handshake()
- if err != nil {
- if c.logger.IsDebug() {
- c.logger.Debug("error handshaking cluster connection", "error", err)
- }
- tlsConn.Close()
- continue
- }
-
- // Now, set it back to unlimited
- err = tlsConn.SetDeadline(time.Time{})
- if err != nil {
- if c.logger.IsDebug() {
- c.logger.Debug("error setting deadline for cluster connection", "error", err)
- }
- tlsConn.Close()
- continue
- }
-
- switch tlsConn.ConnectionState().NegotiatedProtocol {
- case requestForwardingALPN:
- if !ha {
- tlsConn.Close()
- continue
- }
-
- c.logger.Debug("got request forwarding connection")
-
- shutdownWg.Add(2)
- // quitCh is used to close the connection and the second
- // goroutine if the server closes before closeCh.
- quitCh := make(chan struct{})
- go func() {
- select {
- case <-quitCh:
- case <-closeCh:
- }
- tlsConn.Close()
- shutdownWg.Done()
- }()
-
- go func() {
- fws.ServeConn(tlsConn, &http2.ServeConnOpts{
- Handler: fwRPCServer,
- BaseConfig: &http.Server{
- ErrorLog: c.logger.StandardLogger(nil),
- },
- })
- // close the quitCh which will close the connection and
- // the other goroutine.
- close(quitCh)
- shutdownWg.Done()
- }()
-
- case PerformanceReplicationALPN, DRReplicationALPN, perfStandbyALPN:
- handleReplicationConn(ctx, c, shutdownWg, closeCh, fws, perfStandbyReplicationRPCServer, perfStandbyCache, tlsConn)
- default:
- c.logger.Debug("unknown negotiated protocol on cluster port")
- tlsConn.Close()
- continue
- }
- }
- }()
+ return nil, nil
+}
+
+// ServerLookup satisfies the ClusterHandler interface and returns the server's
+// tls certs.
+func (rf *requestForwardingHandler) ServerLookup(ctx context.Context, clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ currCert := rf.core.localClusterCert.Load().([]byte)
+ if len(currCert) == 0 {
+ return nil, fmt.Errorf("got forwarding connection but no local cert")
}
- // This is in its own goroutine so that we don't block the main thread, and
- // thus we use atomic and channels to coordinate
- // However, because you can't query the status of a channel, we set a bool
- // here while we have the state lock to know whether to actually send a
- // shutdown (e.g. whether the channel will block). See issue #2083.
- c.clusterListenersRunning = true
- go func() {
- // If we get told to shut down...
- <-c.clusterListenerShutdownCh
+ localCert := make([]byte, len(currCert))
+ copy(localCert, currCert)
+
+ return &tls.Certificate{
+ Certificate: [][]byte{localCert},
+ PrivateKey: rf.core.localClusterPrivateKey.Load().(*ecdsa.PrivateKey),
+ Leaf: rf.core.localClusterParsedCert.Load().(*x509.Certificate),
+ }, nil
+}
+
+// CALookup satisfies the ClusterHandler interface and returns the ha ca cert.
+func (rf *requestForwardingHandler) CALookup(ctx context.Context) (*x509.Certificate, error) {
+ parsedCert := rf.core.localClusterParsedCert.Load().(*x509.Certificate)
+
+ if parsedCert == nil {
+ return nil, fmt.Errorf("forwarding connection client but no local cert")
+ }
+
+ return parsedCert, nil
+}
- // Stop the RPC server
- c.logger.Info("shutting down forwarding rpc listeners")
- fwRPCServer.Stop()
+// Handoff serves a request forwarding connection.
+func (rf *requestForwardingHandler) Handoff(ctx context.Context, shutdownWg *sync.WaitGroup, closeCh chan struct{}, tlsConn *tls.Conn) error {
+ if !rf.ha {
+ tlsConn.Close()
+ return nil
+ }
- // Set the shutdown flag. This will cause the listeners to shut down
- // within the deadline in clusterListenerAcceptDeadline
- atomic.StoreUint32(shutdown, 1)
- c.logger.Info("forwarding rpc listeners stopped")
+ rf.logger.Debug("got request forwarding connection")
- // Wait for them all to shut down
- shutdownWg.Wait()
- c.logger.Info("rpc listeners successfully shut down")
+ shutdownWg.Add(2)
+ // quitCh is used to close the connection and the second
+ // goroutine if the server closes before closeCh.
+ quitCh := make(chan struct{})
+ go func() {
+ select {
+ case <-quitCh:
+ case <-closeCh:
+ case <-rf.stopCh:
+ }
+ tlsConn.Close()
+ shutdownWg.Done()
+ }()
- // Clear us up to run this function again
- atomic.StoreUint32(c.rpcServerActive, 0)
+ go func() {
+ rf.fws.ServeConn(tlsConn, &http2.ServeConnOpts{
+ Handler: rf.fwRPCServer,
+ BaseConfig: &http.Server{
+ ErrorLog: rf.logger.StandardLogger(nil),
+ },
+ })
- // Tell the main thread that shutdown is done.
- c.clusterListenerShutdownSuccessCh <- struct{}{}
+ // close the quitCh which will close the connection and
+ // the other goroutine.
+ close(quitCh)
+ shutdownWg.Done()
}()
return nil
}
+// Stop stops the request forwarding server and closes connections.
+func (rf *requestForwardingHandler) Stop() error {
+ close(rf.stopCh)
+ rf.fwRPCServer.Stop()
+ return nil
+}
+
+// Starts the listeners and servers necessary to handle forwarded requests
+func (c *Core) startForwarding(ctx context.Context) error {
+ c.logger.Debug("cluster listener setup function")
+ defer c.logger.Debug("leaving cluster listener setup function")
+
+ // Clean up in case we have transitioned from a client to a server
+ c.requestForwardingConnectionLock.Lock()
+ c.clearForwardingClients()
+ c.requestForwardingConnectionLock.Unlock()
+
+ // Resolve locally to avoid races
+ if c.ha == nil || c.clusterListener == nil {
+ return nil
+ }
+
+ perfStandbyRepCluster, perfStandbyCache, perfStandbySlots, err := c.perfStandbyClusterHandler()
+ if err != nil {
+ return err
+ }
+
+ handler, err := NewRequestForwardingHandler(c, c.clusterListener.Server(), perfStandbySlots, perfStandbyRepCluster, perfStandbyCache)
+ if err != nil {
+ return err
+ }
+
+ c.clusterListener.AddHandler(requestForwardingALPN, handler)
+
+ return nil
+}
+
+func (c *Core) stopForwarding() {
+ if c.clusterListener != nil {
+ c.clusterListener.StopHandler(requestForwardingALPN)
+ c.clusterListener.StopHandler(perfStandbyALPN)
+ }
+}
+
// refreshRequestForwardingConnection ensures that the client/transport are
// alive and that the current active address value matches the most
// recently-known address.
@@ -349,13 +255,25 @@ func (c *Core) refreshRequestForwardingConnection(ctx context.Context, clusterAd
return err
}
+ parsedCert := c.localClusterParsedCert.Load().(*x509.Certificate)
+ if parsedCert == nil {
+ c.logger.Error("no request forwarding cluster certificate found")
+ return errors.New("no request forwarding cluster certificate found")
+ }
+
+ if c.clusterListener != nil {
+ c.clusterListener.AddClient(requestForwardingALPN, &requestForwardingClusterClient{
+ core: c,
+ })
+ }
+
// Set up grpc forwarding handling
// It's not really insecure, but we have to dial manually to get the
// ALPN header right. It's just "insecure" because GRPC isn't managing
// the TLS state.
dctx, cancelFunc := context.WithCancel(ctx)
c.rpcClientConn, err = grpc.DialContext(dctx, clusterURL.Host,
- grpc.WithDialer(c.getGRPCDialer(ctx, requestForwardingALPN, "", nil, nil, nil)),
+ grpc.WithDialer(c.getGRPCDialer(ctx, requestForwardingALPN, parsedCert.Subject.CommonName, parsedCert)),
grpc.WithInsecure(), // it's not, we handle it in the dialer
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 2 * HeartbeatInterval,
@@ -398,6 +316,9 @@ func (c *Core) clearForwardingClients() {
c.rpcClientConnContext = nil
c.rpcForwardingClient = nil
+ if c.clusterListener != nil {
+ c.clusterListener.RemoveClient(requestForwardingALPN)
+ }
c.clusterLeaderParams.Store((*ClusterLeaderParams)(nil))
}
@@ -450,32 +371,3 @@ func (c *Core) ForwardRequest(req *http.Request) (int, http.Header, []byte, erro
return int(resp.StatusCode), header, resp.Body, nil
}
-
-// getGRPCDialer is used to return a dialer that has the correct TLS
-// configuration. Otherwise gRPC tries to be helpful and stomps all over our
-// NextProtos.
-func (c *Core) getGRPCDialer(ctx context.Context, alpnProto, serverName string, caCert *x509.Certificate, repClusters *ReplicatedClusters, perfStandbyCluster *ReplicatedCluster) func(string, time.Duration) (net.Conn, error) {
- return func(addr string, timeout time.Duration) (net.Conn, error) {
- tlsConfig, err := c.ClusterTLSConfig(ctx, repClusters, perfStandbyCluster)
- if err != nil {
- c.logger.Error("failed to get tls configuration", "error", err)
- return nil, err
- }
- if serverName != "" {
- tlsConfig.ServerName = serverName
- }
- if caCert != nil {
- pool := x509.NewCertPool()
- pool.AddCert(caCert)
- tlsConfig.RootCAs = pool
- tlsConfig.ClientCAs = pool
- }
- c.logger.Debug("creating rpc dialer", "host", tlsConfig.ServerName)
-
- tlsConfig.NextProtos = []string{alpnProto}
- dialer := &net.Dialer{
- Timeout: timeout,
- }
- return tls.DialWithDialer(dialer, "tcp", addr, tlsConfig)
- }
-}
diff --git a/vault/request_forwarding_rpc.go b/vault/request_forwarding_rpc.go
index b3b6e0b011de..24adfac663fd 100644
--- a/vault/request_forwarding_rpc.go
+++ b/vault/request_forwarding_rpc.go
@@ -9,6 +9,7 @@ import (
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/forwarding"
+ "github.com/hashicorp/vault/vault/replication"
cache "github.com/patrickmn/go-cache"
)
@@ -16,7 +17,7 @@ type forwardedRequestRPCServer struct {
core *Core
handler http.Handler
perfStandbySlots chan struct{}
- perfStandbyRepCluster *ReplicatedCluster
+ perfStandbyRepCluster *replication.Cluster
perfStandbyCache *cache.Cache
}
diff --git a/vault/request_forwarding_util.go b/vault/request_forwarding_util.go
deleted file mode 100644
index 20fae15f0c8d..000000000000
--- a/vault/request_forwarding_util.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
- "crypto/tls"
- "sync"
-
- cache "github.com/patrickmn/go-cache"
- "golang.org/x/net/http2"
- grpc "google.golang.org/grpc"
-)
-
-func perfStandbyRPCServer(*Core, *cache.Cache) *grpc.Server { return nil }
-
-func handleReplicationConn(context.Context, *Core, *sync.WaitGroup, chan struct{}, *http2.Server, *grpc.Server, *cache.Cache, *tls.Conn) {
-}
diff --git a/vault/wrapping.go b/vault/wrapping.go
index b6ff5211cedc..9f244108b555 100644
--- a/vault/wrapping.go
+++ b/vault/wrapping.go
@@ -14,6 +14,7 @@ import (
"github.com/SermoDigital/jose/jws"
"github.com/SermoDigital/jose/jwt"
"github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/helper/certutil"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/namespace"
@@ -31,7 +32,7 @@ func (c *Core) ensureWrappingKey(ctx context.Context) error {
return err
}
- var keyParams clusterKeyParams
+ var keyParams certutil.ClusterKeyParams
if entry == nil {
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
From 1bbba90052f485fb0a20f6cbd0f70c0b4b034055 Mon Sep 17 00:00:00 2001
From: Jeff Mitchell
Date: Thu, 14 Feb 2019 18:31:43 -0800
Subject: [PATCH 15/31] Add ability to use path wildcard segments (#6164)
* Path globbing
* Add glob support at the beginning
* Ensure when evaluating an ACL that our path never has a leading slash. This already happens in the normal request path but not in tests; putting it here provides it for tests and extra safety in case the request path changes
* Simplify the algorithm, we don't really need to validate the prefix first as glob won't apply if it doesn't
* Add path segment wildcarding
* Disable path globbing for now
* Remove now-unneeded test
* Remove commented out globbing bits
* Remove more holdover glob bits
* Rename k var to something more clear
---
vault/acl.go | 135 +++++++++++++++++++++++++++++++++++++------
vault/acl_test.go | 31 ++++++++++
vault/policy.go | 25 +++++---
vault/policy_test.go | 102 ++++++++++++++++++++++++--------
4 files changed, 245 insertions(+), 48 deletions(-)
diff --git a/vault/acl.go b/vault/acl.go
index 1cbbfd763af9..5c9d0cee8a16 100644
--- a/vault/acl.go
+++ b/vault/acl.go
@@ -25,6 +25,8 @@ type ACL struct {
// prefixRules contains the path policies that are a prefix
prefixRules *radix.Tree
+ segmentWildcardPaths map[string]interface{}
+
// root is enabled if the "root" named policy is present.
root bool
@@ -58,9 +60,10 @@ type ACLResults struct {
func NewACL(ctx context.Context, policies []*Policy) (*ACL, error) {
// Initialize
a := &ACL{
- exactRules: radix.New(),
- prefixRules: radix.New(),
- root: false,
+ exactRules: radix.New(),
+ prefixRules: radix.New(),
+ segmentWildcardPaths: make(map[string]interface{}, len(policies)),
+ root: false,
}
ns, err := namespace.FromContext(ctx)
@@ -100,20 +103,35 @@ func NewACL(ctx context.Context, policies []*Policy) (*ACL, error) {
}
for _, pc := range policy.Paths {
- // Check which tree to use
- tree := a.exactRules
- if pc.IsPrefix {
- tree = a.prefixRules
+ var raw interface{}
+ var ok bool
+ var tree *radix.Tree
+
+ switch {
+ case pc.HasSegmentWildcards:
+ raw, ok = a.segmentWildcardPaths[pc.Path]
+ default:
+ // Check which tree to use
+ tree = a.exactRules
+ if pc.IsPrefix {
+ tree = a.prefixRules
+ }
+
+ // Check for an existing policy
+ raw, ok = tree.Get(pc.Path)
}
- // Check for an existing policy
- raw, ok := tree.Get(pc.Path)
if !ok {
clonedPerms, err := pc.Permissions.Clone()
if err != nil {
return nil, errwrap.Wrapf("error cloning ACL permissions: {{err}}", err)
}
- tree.Insert(pc.Path, clonedPerms)
+ switch {
+ case pc.HasSegmentWildcards:
+ a.segmentWildcardPaths[pc.Path] = clonedPerms
+ default:
+ tree.Insert(pc.Path, clonedPerms)
+ }
continue
}
@@ -242,7 +260,12 @@ func NewACL(ctx context.Context, policies []*Policy) (*ACL, error) {
}
INSERT:
- tree.Insert(pc.Path, existingPerms)
+ switch {
+ case pc.HasSegmentWildcards:
+ a.segmentWildcardPaths[pc.Path] = existingPerms
+ default:
+ tree.Insert(pc.Path, existingPerms)
+ }
}
}
return a, nil
@@ -317,7 +340,17 @@ func (a *ACL) AllowOperation(ctx context.Context, req *logical.Request, capCheck
}
path := ns.Path + req.Path
- // Find an exact matching rule, look for glob if no match
+ // The request path should take care of this already but this is useful for
+ // tests and as defense in depth
+ for {
+ if len(path) > 0 && path[0] == '/' {
+ path = path[1:]
+ } else {
+ break
+ }
+ }
+
+ // Find an exact matching rule, look for prefix if no match
var capabilities uint32
raw, ok := a.exactRules.Get(path)
if ok {
@@ -334,13 +367,81 @@ func (a *ACL) AllowOperation(ctx context.Context, req *logical.Request, capCheck
}
}
- // Find a glob rule, default deny if no match
+ // Find a prefix rule, default deny if no match
_, raw, ok = a.prefixRules.LongestPrefix(path)
- if !ok {
- return
+ if ok {
+ permissions = raw.(*ACLPermissions)
+ capabilities = permissions.CapabilitiesBitmap
+ goto CHECK
}
- permissions = raw.(*ACLPermissions)
- capabilities = permissions.CapabilitiesBitmap
+
+ if len(a.segmentWildcardPaths) > 0 {
+ pathParts := strings.Split(path, "/")
+ for currWCPath := range a.segmentWildcardPaths {
+ if currWCPath == "" {
+ continue
+ }
+
+ var isPrefix bool
+ var invalid bool
+ origCurrWCPath := currWCPath
+
+ if currWCPath[len(currWCPath)-1] == '*' {
+ isPrefix = true
+ currWCPath = currWCPath[0 : len(currWCPath)-1]
+ }
+ splitCurrWCPath := strings.Split(currWCPath, "/")
+ if len(pathParts) < len(splitCurrWCPath) {
+ // The path coming in is shorter; it can't match
+ continue
+ }
+ if !isPrefix && len(splitCurrWCPath) != len(pathParts) {
+ // If it's not a prefix we expect the same number of segments
+ continue
+ }
+ // We key off splitK here since it might be less than pathParts
+ for i, aclPart := range splitCurrWCPath {
+ if aclPart == "+" {
+ // Matches anything in the segment, so keep checking
+ continue
+ }
+ if i == len(splitCurrWCPath)-1 && isPrefix {
+ // In this case we may have foo* or just * depending on if
+ // originally it was foo* or foo/*.
+ if aclPart == "" {
+ // Ended in /*, so at this point we're at the final
+ // glob which will match anything, so return success
+ break
+ }
+ if !strings.HasPrefix(pathParts[i], aclPart) {
+ // E.g., the final part of the acl is foo* and the
+ // final part of the path is boofar
+ invalid = true
+ break
+ }
+ // Final prefixed matched and the rest is a wildcard,
+ // matches
+ break
+ }
+ if aclPart != pathParts[i] {
+ // Mismatch, exit out
+ invalid = true
+ break
+ }
+ }
+ // If invalid isn't set then we got through the full segmented path
+ // without finding a mismatch, so it's valid
+ if !invalid {
+ permissions = a.segmentWildcardPaths[origCurrWCPath].(*ACLPermissions)
+ capabilities = permissions.CapabilitiesBitmap
+ goto CHECK
+ }
+ }
+ }
+
+ // No exact, prefix, or segment wildcard paths found, return without
+ // setting allowed
+ return
CHECK:
// Check if the minimum permissions are met
diff --git a/vault/acl_test.go b/vault/acl_test.go
index fd84b94ec688..284946b77a09 100644
--- a/vault/acl_test.go
+++ b/vault/acl_test.go
@@ -237,6 +237,19 @@ func testACLSingle(t *testing.T, ns *namespace.Namespace) {
{logical.ListOperation, "foo/bar", false, true},
{logical.UpdateOperation, "foo/bar", false, true},
{logical.CreateOperation, "foo/bar", true, true},
+
+ // Path segment wildcards
+ {logical.ReadOperation, "test/foo/bar/segment", false, false},
+ {logical.ReadOperation, "test/foo/segment", true, false},
+ {logical.ReadOperation, "test/bar/segment", true, false},
+ {logical.ReadOperation, "test/segment/at/frond", false, false},
+ {logical.ReadOperation, "test/segment/at/front", true, false},
+ {logical.ReadOperation, "test/segment/at/end/foo", true, false},
+ {logical.ReadOperation, "test/segment/at/end/foo/", false, false},
+ {logical.ReadOperation, "test/segment/at/end/v2/foo/", true, false},
+ {logical.ReadOperation, "test/segment/wildcard/at/foo/", true, false},
+ {logical.ReadOperation, "test/segment/wildcard/at/end", true, false},
+ {logical.ReadOperation, "test/segment/wildcard/at/end/", true, false},
}
for _, tc := range tcases {
@@ -643,6 +656,24 @@ path "sys/*" {
path "foo/bar" {
capabilities = ["read", "create", "sudo"]
}
+path "test/+/segment" {
+ capabilities = ["read"]
+}
+path "+/segment/at/front" {
+ capabilities = ["read"]
+}
+path "test/segment/at/end/+" {
+ capabilities = ["read"]
+}
+path "test/segment/at/end/v2/+/" {
+ capabilities = ["read"]
+}
+path "test/+/wildcard/+/*" {
+ capabilities = ["read"]
+}
+path "test/+/wildcardglob/+/end*" {
+ capabilities = ["read"]
+}
`
var aclPolicy2 = `
diff --git a/vault/policy.go b/vault/policy.go
index 5938a597414b..e352f8794fd1 100644
--- a/vault/policy.go
+++ b/vault/policy.go
@@ -112,11 +112,12 @@ func (p *Policy) ShallowClone() *Policy {
// PathRules represents a policy for a path in the namespace.
type PathRules struct {
- Path string
- Policy string
- Permissions *ACLPermissions
- IsPrefix bool
- Capabilities []string
+ Path string
+ Policy string
+ Permissions *ACLPermissions
+ IsPrefix bool
+ HasSegmentWildcards bool
+ Capabilities []string
// These keys are used at the top level to make the HCL nicer; we store in
// the ACLPermissions object though
@@ -338,10 +339,18 @@ func parsePaths(result *Policy, list *ast.ObjectList, performTemplating bool, en
// Ensure we are using the full request path internally
pc.Path = result.namespace.Path + pc.Path
- // Strip the glob character if found
+ if strings.Count(pc.Path, "/+") > 0 || strings.HasPrefix(pc.Path, "+/") {
+ pc.HasSegmentWildcards = true
+ }
+
if strings.HasSuffix(pc.Path, "*") {
- pc.Path = strings.TrimSuffix(pc.Path, "*")
- pc.IsPrefix = true
+ // If there are segment wildcards, don't actually strip the
+ // trailing asterisk, but don't want to hit the default case
+ if !pc.HasSegmentWildcards {
+ // Strip the glob character if found
+ pc.Path = strings.TrimSuffix(pc.Path, "*")
+ pc.IsPrefix = true
+ }
}
// Map old-style policies into capabilities
diff --git a/vault/policy_test.go b/vault/policy_test.go
index a4ee2b4ec835..476e069d7eb2 100644
--- a/vault/policy_test.go
+++ b/vault/policy_test.go
@@ -1,11 +1,11 @@
package vault
import (
- "reflect"
"strings"
"testing"
"time"
+ "github.com/go-test/deep"
"github.com/hashicorp/vault/helper/namespace"
)
@@ -95,6 +95,21 @@ path "test/mfa" {
capabilities = ["create", "sudo"]
mfa_methods = ["my_totp", "my_totp2"]
}
+path "test/+/segment" {
+ capabilities = ["create", "sudo"]
+}
+path "test/segment/at/end/+" {
+ capabilities = ["create", "sudo"]
+}
+path "test/segment/at/end/v2/+/" {
+ capabilities = ["create", "sudo"]
+}
+path "test/+/wildcard/+/*" {
+ capabilities = ["create", "sudo"]
+}
+path "test/+/wildcard/+/end*" {
+ capabilities = ["create", "sudo"]
+}
`)
func TestPolicy_Parse(t *testing.T) {
@@ -141,7 +156,6 @@ func TestPolicy_Parse(t *testing.T) {
"list",
},
Permissions: &ACLPermissions{CapabilitiesBitmap: (ReadCapabilityInt | ListCapabilityInt)},
- IsPrefix: false,
},
{
Path: "foo/bar",
@@ -157,11 +171,9 @@ func TestPolicy_Parse(t *testing.T) {
MinWrappingTTL: 300 * time.Second,
MaxWrappingTTL: 3600 * time.Second,
},
- IsPrefix: false,
},
{
- Path: "foo/bar",
- Policy: "",
+ Path: "foo/bar",
Capabilities: []string{
"create",
"sudo",
@@ -173,11 +185,9 @@ func TestPolicy_Parse(t *testing.T) {
MinWrappingTTL: 300 * time.Second,
MaxWrappingTTL: 3600 * time.Second,
},
- IsPrefix: false,
},
{
- Path: "foo/bar",
- Policy: "",
+ Path: "foo/bar",
Capabilities: []string{
"create",
"sudo",
@@ -187,11 +197,9 @@ func TestPolicy_Parse(t *testing.T) {
CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
AllowedParameters: map[string][]interface{}{"zip": {}, "zap": {}},
},
- IsPrefix: false,
},
{
- Path: "baz/bar",
- Policy: "",
+ Path: "baz/bar",
Capabilities: []string{
"create",
"sudo",
@@ -201,11 +209,9 @@ func TestPolicy_Parse(t *testing.T) {
CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
DeniedParameters: map[string][]interface{}{"zip": []interface{}{}, "zap": []interface{}{}},
},
- IsPrefix: false,
},
{
- Path: "biz/bar",
- Policy: "",
+ Path: "biz/bar",
Capabilities: []string{
"create",
"sudo",
@@ -217,7 +223,6 @@ func TestPolicy_Parse(t *testing.T) {
AllowedParameters: map[string][]interface{}{"zim": {}, "zam": {}},
DeniedParameters: map[string][]interface{}{"zip": {}, "zap": {}},
},
- IsPrefix: false,
},
{
Path: "test/types",
@@ -236,8 +241,7 @@ func TestPolicy_Parse(t *testing.T) {
IsPrefix: false,
},
{
- Path: "test/req",
- Policy: "",
+ Path: "test/req",
Capabilities: []string{
"create",
"sudo",
@@ -247,11 +251,9 @@ func TestPolicy_Parse(t *testing.T) {
CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
RequiredParameters: []string{"foo"},
},
- IsPrefix: false,
},
{
- Path: "test/mfa",
- Policy: "",
+ Path: "test/mfa",
Capabilities: []string{
"create",
"sudo",
@@ -267,12 +269,66 @@ func TestPolicy_Parse(t *testing.T) {
"my_totp2",
},
},
- IsPrefix: false,
+ },
+ {
+ Path: "test/+/segment",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ Permissions: &ACLPermissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ },
+ HasSegmentWildcards: true,
+ },
+ {
+ Path: "test/segment/at/end/+",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ Permissions: &ACLPermissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ },
+ HasSegmentWildcards: true,
+ },
+ {
+ Path: "test/segment/at/end/v2/+/",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ Permissions: &ACLPermissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ },
+ HasSegmentWildcards: true,
+ },
+ {
+ Path: "test/+/wildcard/+/*",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ Permissions: &ACLPermissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ },
+ HasSegmentWildcards: true,
+ },
+ {
+ Path: "test/+/wildcard/+/end*",
+ Capabilities: []string{
+ "create",
+ "sudo",
+ },
+ Permissions: &ACLPermissions{
+ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
+ },
+ HasSegmentWildcards: true,
},
}
- if !reflect.DeepEqual(p.Paths, expect) {
- t.Errorf("expected \n\n%#v\n\n to be \n\n%#v\n\n", p.Paths, expect)
+ if diff := deep.Equal(p.Paths, expect); diff != nil {
+ t.Error(diff)
}
}
From b2411f2a63a67c7f3081d3954933a27f775a110c Mon Sep 17 00:00:00 2001
From: vishalnayak
Date: Fri, 15 Feb 2019 10:34:19 -0500
Subject: [PATCH 16/31] Fix cachememdb test
---
command/agent/cache/cachememdb/cache_memdb_test.go | 14 ++------------
1 file changed, 2 insertions(+), 12 deletions(-)
diff --git a/command/agent/cache/cachememdb/cache_memdb_test.go b/command/agent/cache/cachememdb/cache_memdb_test.go
index a8af42f5356f..119943c311d1 100644
--- a/command/agent/cache/cachememdb/cache_memdb_test.go
+++ b/command/agent/cache/cachememdb/cache_memdb_test.go
@@ -145,8 +145,8 @@ func TestCacheMemDB_GetByPrefix(t *testing.T) {
ID: "test_id_2",
Namespace: "test_ns/",
RequestPath: "/v1/request/path/2",
- Token: "test_token",
- TokenAccessor: "test_accessor",
+ Token: "test_token2",
+ TokenAccessor: "test_accessor2",
Lease: "path/to/test_lease/2",
Response: []byte("hello world"),
}
@@ -170,16 +170,6 @@ func TestCacheMemDB_GetByPrefix(t *testing.T) {
"lease",
[]interface{}{"path/to/test_lease"},
},
- {
- "by_token",
- "token",
- []interface{}{"test_token"},
- },
- {
- "by_token_accessor",
- "token_accessor",
- []interface{}{"test_accessor"},
- },
}
for _, tc := range testCases {
From 6e96688719305fdcbdca668f7c207d930e777909 Mon Sep 17 00:00:00 2001
From: Calvin Leung Huang
Date: Fri, 15 Feb 2019 07:55:09 -0800
Subject: [PATCH 17/31] cacememdb: add LeaseToken and TokenParent tests for
GetByPrefix
---
command/agent/cache/cachememdb/cache_memdb_test.go | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/command/agent/cache/cachememdb/cache_memdb_test.go b/command/agent/cache/cachememdb/cache_memdb_test.go
index 119943c311d1..4162fed0daf7 100644
--- a/command/agent/cache/cachememdb/cache_memdb_test.go
+++ b/command/agent/cache/cachememdb/cache_memdb_test.go
@@ -131,8 +131,10 @@ func TestCacheMemDB_GetByPrefix(t *testing.T) {
Namespace: "test_ns/",
RequestPath: "/v1/request/path/1",
Token: "test_token",
+ TokenParent: "test_token_parent",
TokenAccessor: "test_accessor",
Lease: "path/to/test_lease/1",
+ LeaseToken: "test_lease_token",
Response: []byte("hello world"),
}
@@ -146,8 +148,10 @@ func TestCacheMemDB_GetByPrefix(t *testing.T) {
Namespace: "test_ns/",
RequestPath: "/v1/request/path/2",
Token: "test_token2",
+ TokenParent: "test_token_parent",
TokenAccessor: "test_accessor2",
Lease: "path/to/test_lease/2",
+ LeaseToken: "test_lease_token",
Response: []byte("hello world"),
}
@@ -170,6 +174,16 @@ func TestCacheMemDB_GetByPrefix(t *testing.T) {
"lease",
[]interface{}{"path/to/test_lease"},
},
+ {
+ "by_token_parent",
+ "token_parent",
+ []interface{}{"test_token_parent"},
+ },
+ {
+ "by_lease_token",
+ "lease_token",
+ []interface{}{"test_lease_token"},
+ },
}
for _, tc := range testCases {
From ec015ec6bb9737f479e99f4657feca1b939e3149 Mon Sep 17 00:00:00 2001
From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com>
Date: Fri, 15 Feb 2019 12:06:20 -0500
Subject: [PATCH 18/31] fixing command server tests (#6242)
---
command/server/config_test.go | 33 +++++++++++--------
command/server/test-fixtures/config2.hcl | 1 +
command/server/test-fixtures/config2.hcl.json | 3 +-
3 files changed, 23 insertions(+), 14 deletions(-)
diff --git a/command/server/config_test.go b/command/server/config_test.go
index ccb9af66fddf..dc6f345a41ef 100644
--- a/command/server/config_test.go
+++ b/command/server/config_test.go
@@ -48,11 +48,12 @@ func TestLoadConfigFile(t *testing.T) {
},
Telemetry: &Telemetry{
- StatsdAddr: "bar",
- StatsiteAddr: "foo",
- DisableHostname: false,
- DogStatsDAddr: "127.0.0.1:7254",
- DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
+ StatsdAddr: "bar",
+ StatsiteAddr: "foo",
+ DisableHostname: false,
+ DogStatsDAddr: "127.0.0.1:7254",
+ DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
+ PrometheusRetentionTime: prometheusDefaultRetentionTime,
},
DisableCache: true,
@@ -121,11 +122,13 @@ func TestLoadConfigFile_topLevel(t *testing.T) {
},
Telemetry: &Telemetry{
- StatsdAddr: "bar",
- StatsiteAddr: "foo",
- DisableHostname: false,
- DogStatsDAddr: "127.0.0.1:7254",
- DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
+ StatsdAddr: "bar",
+ StatsiteAddr: "foo",
+ DisableHostname: false,
+ DogStatsDAddr: "127.0.0.1:7254",
+ DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
+ PrometheusRetentionTime: 30 * time.Second,
+ PrometheusRetentionTimeRaw: "30s",
},
DisableCache: true,
@@ -202,6 +205,7 @@ func TestLoadConfigFile_json(t *testing.T) {
CirconusCheckTags: "",
CirconusBrokerID: "",
CirconusBrokerSelectTag: "",
+ PrometheusRetentionTime: prometheusDefaultRetentionTime,
},
MaxLeaseTTL: 10 * time.Hour,
@@ -288,6 +292,8 @@ func TestLoadConfigFile_json2(t *testing.T) {
CirconusCheckTags: "cat1:tag1,cat2:tag2",
CirconusBrokerID: "0",
CirconusBrokerSelectTag: "dc:sfo",
+ PrometheusRetentionTime: 30 * time.Second,
+ PrometheusRetentionTimeRaw: "30s",
},
}
if !reflect.DeepEqual(config, expected) {
@@ -336,9 +342,10 @@ func TestLoadConfigDir(t *testing.T) {
EnableRawEndpoint: true,
Telemetry: &Telemetry{
- StatsiteAddr: "qux",
- StatsdAddr: "baz",
- DisableHostname: true,
+ StatsiteAddr: "qux",
+ StatsdAddr: "baz",
+ DisableHostname: true,
+ PrometheusRetentionTime: prometheusDefaultRetentionTime,
},
MaxLeaseTTL: 10 * time.Hour,
diff --git a/command/server/test-fixtures/config2.hcl b/command/server/test-fixtures/config2.hcl
index c757d1f629e8..b57f89f2d580 100644
--- a/command/server/test-fixtures/config2.hcl
+++ b/command/server/test-fixtures/config2.hcl
@@ -26,6 +26,7 @@ telemetry {
statsite_address = "foo"
dogstatsd_addr = "127.0.0.1:7254"
dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"]
+ prometheus_retention_time = "30s"
}
max_lease_ttl = "10h"
diff --git a/command/server/test-fixtures/config2.hcl.json b/command/server/test-fixtures/config2.hcl.json
index 749be58fdfa8..e85e390f7c59 100644
--- a/command/server/test-fixtures/config2.hcl.json
+++ b/command/server/test-fixtures/config2.hcl.json
@@ -42,6 +42,7 @@
"circonus_check_display_name": "node1:vault",
"circonus_check_tags": "cat1:tag1,cat2:tag2",
"circonus_broker_id": "0",
- "circonus_broker_select_tag": "dc:sfo"
+ "circonus_broker_select_tag": "dc:sfo",
+ "prometheus_retention_time": "30s"
}
}
From 4ad8ef777cd56c216d71262f8a2a07ea440cf35e Mon Sep 17 00:00:00 2001
From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com>
Date: Fri, 15 Feb 2019 12:06:37 -0500
Subject: [PATCH 19/31] fixing operator unseal test (#6241)
---
command/operator_unseal_test.go | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/command/operator_unseal_test.go b/command/operator_unseal_test.go
index 1621e60d6f2a..06d618cacf72 100644
--- a/command/operator_unseal_test.go
+++ b/command/operator_unseal_test.go
@@ -1,7 +1,10 @@
package command
import (
+ "bytes"
+ "encoding/json"
"io/ioutil"
+ "os"
"strings"
"testing"
@@ -164,7 +167,7 @@ func TestOperatorUnsealCommand_Format(t *testing.T) {
Client: client,
}
- args, format, _ := setupEnv([]string{"unseal", "-format", "json"})
+ args, format, _ := setupEnv([]string{"operator", "unseal", "-format", "json"})
if format != "json" {
t.Fatalf("expected %q, got %q", "json", format)
}
From c96ad1b30abe0495c3debcf7ca8260af617c3520 Mon Sep 17 00:00:00 2001
From: Jim Kalafut
Date: Fri, 15 Feb 2019 09:27:57 -0800
Subject: [PATCH 20/31] Fix agent test (#6243)
---
command/agent_test.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/command/agent_test.go b/command/agent_test.go
index f08a13f58dd5..5b160de9c2b8 100644
--- a/command/agent_test.go
+++ b/command/agent_test.go
@@ -71,6 +71,7 @@ func TestAgent_Cache_UnixListener(t *testing.T) {
}
_, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{
+ "role_type": "jwt",
"bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients",
"bound_audiences": "https://vault.plugin.auth.jwt.test",
"user_claim": "https://vault/user",
From 9ea53eb4384cfdcb67b5da5ebdea18535e43687d Mon Sep 17 00:00:00 2001
From: Jim Kalafut
Date: Fri, 15 Feb 2019 09:39:23 -0800
Subject: [PATCH 21/31] Fix JWT end-to-end test (#6244)
---
command/agent/jwt_end_to_end_test.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/command/agent/jwt_end_to_end_test.go b/command/agent/jwt_end_to_end_test.go
index cae96cc8f9f6..d8cb11da75b7 100644
--- a/command/agent/jwt_end_to_end_test.go
+++ b/command/agent/jwt_end_to_end_test.go
@@ -62,6 +62,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) {
}
_, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{
+ "role_type": "jwt",
"bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients",
"bound_audiences": "https://vault.plugin.auth.jwt.test",
"user_claim": "https://vault/user",
From 5924aefc25dd67bfc8f684a50be59bc58c952f75 Mon Sep 17 00:00:00 2001
From: ncabatoff
Date: Fri, 15 Feb 2019 13:20:29 -0500
Subject: [PATCH 22/31] Fix broken tests resulting from new mount config field
passthrough_request_headers. (#6245)
---
http/sys_mount_test.go | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go
index b5dcb168e288..ad64e52d9ceb 100644
--- a/http/sys_mount_test.go
+++ b/http/sys_mount_test.go
@@ -2,6 +2,7 @@ package http
import (
"encoding/json"
+ "github.com/go-test/deep"
"reflect"
"testing"
@@ -532,6 +533,7 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -581,6 +583,7 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -622,8 +625,8 @@ func TestSysUnmount(t *testing.T) {
expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
+ if diff := deep.Equal(actual, expected); len(diff) > 0 {
+ t.Fatalf("bad, diff: %#v", diff)
}
}
@@ -766,6 +769,7 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -827,6 +831,7 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -868,8 +873,8 @@ func TestSysTuneMount(t *testing.T) {
expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
+ if diff := deep.Equal(actual, expected); len(diff) > 0 {
+ t.Fatalf("bad, diff: %#v", diff)
}
// Shorter than system default
@@ -956,6 +961,7 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -1017,6 +1023,7 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -1059,8 +1066,8 @@ func TestSysTuneMount(t *testing.T) {
expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual)
+ if diff := deep.Equal(actual, expected); len(diff) > 0 {
+ t.Fatalf("bad, diff: %#v", diff)
}
// Check simple configuration endpoint
From 4be79200e3def995d742b02908911c8d4d147238 Mon Sep 17 00:00:00 2001
From: Vishal Nayak
Date: Fri, 15 Feb 2019 13:40:03 -0500
Subject: [PATCH 23/31] address some review back (#6237)
---
api/client.go | 31 +++++++++++----------------
api/request.go | 4 ++--
command/agent/cache/api_proxy_test.go | 4 ++--
command/agent/cache/lease_cache.go | 3 ---
command/agent/cache/listener.go | 2 +-
command/agent/config/config.go | 10 +--------
command/agent_test.go | 3 ++-
7 files changed, 21 insertions(+), 36 deletions(-)
diff --git a/api/client.go b/api/client.go
index 432624dd0379..7642eeee32ef 100644
--- a/api/client.go
+++ b/api/client.go
@@ -371,18 +371,20 @@ func NewClient(c *Config) (*Client, error) {
c.modifyLock.Lock()
defer c.modifyLock.Unlock()
- // If address begins with a `unix://`, treat it as a socket file path and set
- // the HttpClient's transport to the corresponding socket dialer.
+ if c.HttpClient == nil {
+ c.HttpClient = def.HttpClient
+ }
+ if c.HttpClient.Transport == nil {
+ c.HttpClient.Transport = def.HttpClient.Transport
+ }
+
if strings.HasPrefix(c.Address, "unix://") {
- socketFilePath := strings.TrimPrefix(c.Address, "unix://")
- c.HttpClient = &http.Client{
- Transport: &http.Transport{
- DialContext: func(context.Context, string, string) (net.Conn, error) {
- return net.Dial("unix", socketFilePath)
- },
- },
+ socket := strings.TrimPrefix(c.Address, "unix://")
+ transport := c.HttpClient.Transport.(*http.Transport)
+ transport.DialContext = func(context.Context, string, string) (net.Conn, error) {
+ return net.Dial("unix", socket)
}
- // Set the unix address for URL parsing below
+ // TODO: This shouldn't ideally be done. To be fixed post 1.1-beta.
c.Address = "http://unix"
}
@@ -391,13 +393,6 @@ func NewClient(c *Config) (*Client, error) {
return nil, err
}
- if c.HttpClient == nil {
- c.HttpClient = def.HttpClient
- }
- if c.HttpClient.Transport == nil {
- c.HttpClient.Transport = def.HttpClient.Transport
- }
-
client := &Client{
addr: u,
config: c,
@@ -727,7 +722,7 @@ func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Respon
redirectCount := 0
START:
- req, err := r.ToRetryableHTTP()
+ req, err := r.toRetryableHTTP()
if err != nil {
return nil, err
}
diff --git a/api/request.go b/api/request.go
index 41d45720fea7..4efa2aa84177 100644
--- a/api/request.go
+++ b/api/request.go
@@ -62,7 +62,7 @@ func (r *Request) ResetJSONBody() error {
// DEPRECATED: ToHTTP turns this request into a valid *http.Request for use
// with the net/http package.
func (r *Request) ToHTTP() (*http.Request, error) {
- req, err := r.ToRetryableHTTP()
+ req, err := r.toRetryableHTTP()
if err != nil {
return nil, err
}
@@ -85,7 +85,7 @@ func (r *Request) ToHTTP() (*http.Request, error) {
return req.Request, nil
}
-func (r *Request) ToRetryableHTTP() (*retryablehttp.Request, error) {
+func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) {
// Encode the query parameters
r.URL.RawQuery = r.Params.Encode()
diff --git a/command/agent/cache/api_proxy_test.go b/command/agent/cache/api_proxy_test.go
index 9a68acd36d31..058fa8738969 100644
--- a/command/agent/cache/api_proxy_test.go
+++ b/command/agent/cache/api_proxy_test.go
@@ -19,13 +19,13 @@ func TestCache_APIProxy(t *testing.T) {
})
r := client.NewRequest("GET", "/v1/sys/health")
- req, err := r.ToRetryableHTTP()
+ req, err := r.ToHTTP()
if err != nil {
t.Fatal(err)
}
resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{
- Request: req.Request,
+ Request: req,
})
if err != nil {
t.Fatal(err)
diff --git a/command/agent/cache/lease_cache.go b/command/agent/cache/lease_cache.go
index a998ec96fb51..cd417b9cedc5 100644
--- a/command/agent/cache/lease_cache.go
+++ b/command/agent/cache/lease_cache.go
@@ -446,9 +446,6 @@ func computeIndexID(req *SendRequest) (string, error) {
}
// Reset the request body after it has been closed by Write
- if req.Request.Body != nil {
- req.Request.Body.Close()
- }
req.Request.Body = ioutil.NopCloser(bytes.NewBuffer(req.RequestBody))
// Append req.Token into the byte slice. This is needed since auto-auth'ed
diff --git a/command/agent/cache/listener.go b/command/agent/cache/listener.go
index 1adca7a8dc4b..c289a6cfb655 100644
--- a/command/agent/cache/listener.go
+++ b/command/agent/cache/listener.go
@@ -55,7 +55,7 @@ func unixSocketListener(config map[string]interface{}, _ io.Writer, ui cli.Ui) (
props := map[string]string{"addr": addr, "tls": "disabled"}
- return listener, props, nil, nil
+ return server.ListenerWrapTLS(listener, props, config, ui)
}
func tcpListener(config map[string]interface{}, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reload.ReloadFunc, error) {
diff --git a/command/agent/config/config.go b/command/agent/config/config.go
index 9c9a80aaf9b7..2c6ffcc23280 100644
--- a/command/agent/config/config.go
+++ b/command/agent/config/config.go
@@ -174,15 +174,7 @@ func parseListeners(result *Config, list *ast.ObjectList) error {
}
switch lnType {
- case "unix":
- // Don't accept TLS connection information for unix domain socket
- // listener. Maybe something to support in future.
- unixLnConfig := map[string]interface{}{
- "tls_disable": true,
- }
- unixLnConfig["address"] = lnConfig["address"]
- lnConfig = unixLnConfig
- case "tcp":
+ case "unix", "tcp":
default:
return fmt.Errorf("invalid listener type %q", lnType)
}
diff --git a/command/agent_test.go b/command/agent_test.go
index 5b160de9c2b8..7bcc32bc3189 100644
--- a/command/agent_test.go
+++ b/command/agent_test.go
@@ -5,7 +5,6 @@ import (
"io/ioutil"
"os"
"testing"
- "time"
hclog "github.com/hashicorp/go-hclog"
vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt"
@@ -31,6 +30,7 @@ func testAgentCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *AgentCo
}
}
+/*
func TestAgent_Cache_UnixListener(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Trace)
coreConfig := &vault.CoreConfig{
@@ -213,6 +213,7 @@ cache {
t.Fatalf("failed to perform lookup self through agent")
}
}
+*/
func TestExitAfterAuth(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Trace)
From 138aac0073af9b3d9311d274a7904ed85cd59a03 Mon Sep 17 00:00:00 2001
From: Calvin Leung Huang
Date: Fri, 15 Feb 2019 10:45:55 -0800
Subject: [PATCH 24/31] fix TestSystemBackend_InternalUIMounts
---
vault/logical_system_test.go | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go
index a41bf125f59d..8e12a27d7a0e 100644
--- a/vault/logical_system_test.go
+++ b/vault/logical_system_test.go
@@ -2283,9 +2283,10 @@ func TestSystemBackend_InternalUIMounts(t *testing.T) {
"description": "system endpoints used for control, policy and debugging",
"accessor": resp.Data["secret"].(map[string]interface{})["sys/"].(map[string]interface{})["accessor"],
"config": map[string]interface{}{
- "default_lease_ttl": resp.Data["secret"].(map[string]interface{})["sys/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
- "max_lease_ttl": resp.Data["secret"].(map[string]interface{})["sys/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
- "force_no_cache": false,
+ "default_lease_ttl": resp.Data["secret"].(map[string]interface{})["sys/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
+ "max_lease_ttl": resp.Data["secret"].(map[string]interface{})["sys/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
+ "force_no_cache": false,
+ "passthrough_request_headers": []string{"Accept"},
},
"local": false,
"seal_wrap": false,
From 678f0c01275083cf52f5a625c506a642c9939cc3 Mon Sep 17 00:00:00 2001
From: Calvin Leung Huang
Date: Fri, 15 Feb 2019 11:13:26 -0800
Subject: [PATCH 25/31] fix TestCore_Unmount
---
vault/mount_test.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/vault/mount_test.go b/vault/mount_test.go
index 60bee0c7e375..abe8b329fad5 100644
--- a/vault/mount_test.go
+++ b/vault/mount_test.go
@@ -243,8 +243,8 @@ func TestCore_Unmount(t *testing.T) {
}
// Verify matching mount tables
- if !reflect.DeepEqual(c.mounts.sortEntriesByPath(), c2.mounts.sortEntriesByPath()) {
- t.Fatalf("mismatch: %v %v", c.mounts, c2.mounts)
+ if diff := deep.Equal(c.mounts, c2.mounts); len(diff) > 0 {
+ t.Fatalf("mismatch: %v", diff)
}
}
From a8e427ea24bd075e19ac2b18e819bdccf2d731f3 Mon Sep 17 00:00:00 2001
From: ncabatoff
Date: Fri, 15 Feb 2019 14:14:45 -0500
Subject: [PATCH 26/31] Fix TestSystemBackend_mount, TestSystemBackend_mounts.
(#6247)
---
vault/logical_system_test.go | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go
index 8e12a27d7a0e..fba94a592bf4 100644
--- a/vault/logical_system_test.go
+++ b/vault/logical_system_test.go
@@ -171,6 +171,7 @@ func TestSystemBackend_mounts(t *testing.T) {
"default_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
"max_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
"force_no_cache": false,
+ "passthrough_request_headers": []string{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -203,8 +204,8 @@ func TestSystemBackend_mounts(t *testing.T) {
"options": map[string]string(nil),
},
}
- if !reflect.DeepEqual(resp.Data, exp) {
- t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", resp.Data, exp)
+ if diff := deep.Equal(resp.Data, exp); len(diff) > 0 {
+ t.Fatalf("bad, diff: %#v", diff)
}
}
@@ -263,6 +264,7 @@ func TestSystemBackend_mount(t *testing.T) {
"default_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
"max_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
"force_no_cache": false,
+ "passthrough_request_headers": []string{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -310,8 +312,8 @@ func TestSystemBackend_mount(t *testing.T) {
},
},
}
- if !reflect.DeepEqual(resp.Data, exp) {
- t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", resp.Data, exp)
+ if diff := deep.Equal(resp.Data, exp); len(diff) > 0 {
+ t.Fatalf("bad: diff: %#v", diff)
}
}
From 8e4ea903c8b47db142583229553bcd4c71462364 Mon Sep 17 00:00:00 2001
From: ncabatoff
Date: Fri, 15 Feb 2019 14:15:02 -0500
Subject: [PATCH 27/31] Fix TestSysMounts, TestSysMounts_headerAuth. (#6246)
---
http/handler_test.go | 7 +++++--
http/sys_mount_test.go | 6 ++++--
2 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/http/handler_test.go b/http/handler_test.go
index 244eb183efd1..9c2f1561c1ae 100644
--- a/http/handler_test.go
+++ b/http/handler_test.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"errors"
+ "github.com/go-test/deep"
"net/http"
"net/http/httptest"
"net/textproto"
@@ -285,6 +286,7 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -334,6 +336,7 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -376,8 +379,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual)
+ if diff := deep.Equal(actual, expected); len(diff) > 0 {
+ t.Fatalf("bad, diff: %#v", diff)
}
}
diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go
index ad64e52d9ceb..d00bc3c410d1 100644
--- a/http/sys_mount_test.go
+++ b/http/sys_mount_test.go
@@ -46,6 +46,7 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -95,6 +96,7 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -136,8 +138,8 @@ func TestSysMounts(t *testing.T) {
expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected: %#v\nactual: %#v\n", expected, actual)
+ if diff := deep.Equal(actual, expected); len(diff) > 0 {
+ t.Fatalf("bad, diff: %#v", diff)
}
}
From a08dc29cd49388a1d029b24865f71113af60fd26 Mon Sep 17 00:00:00 2001
From: ncabatoff
Date: Fri, 15 Feb 2019 14:15:39 -0500
Subject: [PATCH 28/31] Fix TestSysRemount. (#6248)
---
http/sys_mount_test.go | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go
index d00bc3c410d1..c0c087df6a25 100644
--- a/http/sys_mount_test.go
+++ b/http/sys_mount_test.go
@@ -383,6 +383,7 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -444,6 +445,7 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -486,7 +488,7 @@ func TestSysRemount(t *testing.T) {
}
if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\ngot\n%#v\nexpected\n%#v\n", actual, expected)
+ t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual)
}
}
From b956730d2a844d14dc84c3560d950faef7e6f459 Mon Sep 17 00:00:00 2001
From: Nick Cabatoff
Date: Fri, 15 Feb 2019 14:28:39 -0500
Subject: [PATCH 29/31] Fix TestSysMount.
---
http/sys_mount_test.go | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go
index c0c087df6a25..d85292e9bc33 100644
--- a/http/sys_mount_test.go
+++ b/http/sys_mount_test.go
@@ -200,6 +200,7 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -261,6 +262,7 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
+ "passthrough_request_headers": []interface{}{"Accept"},
},
"local": false,
"seal_wrap": false,
@@ -302,9 +304,10 @@ func TestSysMount(t *testing.T) {
expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected: %#v\nactual: %#v\n", expected, actual)
+ if diff := deep.Equal(actual, expected); len(diff) > 0 {
+ t.Fatalf("bad, diff: %#v", diff)
}
+
}
func TestSysMount_put(t *testing.T) {
From 7685bf1af4043cf8153f9476d883da2e2c0ac712 Mon Sep 17 00:00:00 2001
From: Lexman
Date: Fri, 15 Feb 2019 14:00:14 -0800
Subject: [PATCH 30/31] use deep.Equal instead of reflect.DeepEqual in some
failing tests (#6249)
* use deep.Equal instead of reflect.DeepEqual in some failing tests
* changed test output a little bit
---
vault/mount_test.go | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/vault/mount_test.go b/vault/mount_test.go
index abe8b329fad5..33155d15007c 100644
--- a/vault/mount_test.go
+++ b/vault/mount_test.go
@@ -63,9 +63,8 @@ func TestCore_DefaultMountTable(t *testing.T) {
}
}
- // Verify matching mount tables
- if !reflect.DeepEqual(c.mounts.sortEntriesByPath(), c2.mounts.sortEntriesByPath()) {
- t.Fatalf("mismatch: %v %v", c.mounts, c2.mounts)
+ if diff := deep.Equal(c.mounts.sortEntriesByPath(), c2.mounts.sortEntriesByPath()); len(diff) > 0 {
+ t.Fatalf("mismatch: %v", diff)
}
}
@@ -105,8 +104,8 @@ func TestCore_Mount(t *testing.T) {
}
// Verify matching mount tables
- if !reflect.DeepEqual(c.mounts.sortEntriesByPath(), c2.mounts.sortEntriesByPath()) {
- t.Fatalf("mismatch: %v %v", c.mounts, c2.mounts)
+ if diff := deep.Equal(c.mounts.sortEntriesByPath(), c2.mounts.sortEntriesByPath()); len(diff) > 0 {
+ t.Fatalf("mismatch: %v", diff)
}
}
From dd6b2a47dca5402cf0e8d4a8166d331a673cf2d0 Mon Sep 17 00:00:00 2001
From: Brian Kassouf
Date: Tue, 19 Feb 2019 12:03:02 -0800
Subject: [PATCH 31/31] Port over some test fixes (#6261)
---
helper/testhelpers/testhelpers.go | 15 +++++++++++++++
vault/cluster.go | 5 ++++-
vault/init.go | 6 +++++-
vault/request_forwarding.go | 2 ++
vault/request_forwarding_rpc.go | 4 +++-
5 files changed, 29 insertions(+), 3 deletions(-)
diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go
index 61d2a219ab8b..3f95f25bac60 100644
--- a/helper/testhelpers/testhelpers.go
+++ b/helper/testhelpers/testhelpers.go
@@ -453,3 +453,18 @@ func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) {
t.Fatalf("%d cores were not sealed", n)
}
+
+func WaitForActiveNode(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
+ for i := 0; i < 10; i++ {
+ for _, core := range cluster.Cores {
+ if standby, _ := core.Core.Standby(); !standby {
+ return core
+ }
+ }
+
+ time.Sleep(time.Second)
+ }
+
+ t.Fatalf("node did not become active")
+ return nil
+}
diff --git a/vault/cluster.go b/vault/cluster.go
index 00445f825586..4ea6a446b39f 100644
--- a/vault/cluster.go
+++ b/vault/cluster.go
@@ -293,7 +293,7 @@ type ClusterHandler interface {
CALookup(context.Context) (*x509.Certificate, error)
// Handoff is used to pass the connection lifetime off to
- // the storage backend
+ // the handler
Handoff(context.Context, *sync.WaitGroup, chan struct{}, *tls.Conn) error
Stop() error
}
@@ -366,6 +366,7 @@ func (cl *ClusterListener) TLSConfig(ctx context.Context) (*tls.Config, error) {
}
}
+ cl.logger.Warn("no TLS certs found for ALPN", "ALPN", clientHello.SupportedProtos)
return nil, errors.New("unsupported protocol")
}
@@ -381,6 +382,7 @@ func (cl *ClusterListener) TLSConfig(ctx context.Context) (*tls.Config, error) {
}
}
+ cl.logger.Warn("no client information found")
return nil, errors.New("no client cert found")
}
@@ -412,6 +414,7 @@ func (cl *ClusterListener) TLSConfig(ctx context.Context) (*tls.Config, error) {
}
}
+ cl.logger.Warn("no TLS config found for ALPN", "ALPN", clientHello.SupportedProtos)
return nil, errors.New("unsupported protocol")
}
diff --git a/vault/init.go b/vault/init.go
index c4ad07ecf028..b996a1d459c7 100644
--- a/vault/init.go
+++ b/vault/init.go
@@ -6,6 +6,7 @@ import (
"encoding/hex"
"errors"
"fmt"
+ "sync/atomic"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/namespace"
@@ -30,7 +31,8 @@ type InitResult struct {
}
var (
- initPTFunc = func(c *Core) func() { return nil }
+ initPTFunc = func(c *Core) func() { return nil }
+ initInProgress uint32
)
// Initialized checks if the Vault is already initialized
@@ -97,6 +99,8 @@ func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) {
// Initialize is used to initialize the Vault with the given
// configurations.
func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitResult, error) {
+ atomic.StoreUint32(&initInProgress, 1)
+ defer atomic.StoreUint32(&initInProgress, 0)
barrierConfig := initParams.BarrierConfig
recoveryConfig := initParams.RecoveryConfig
diff --git a/vault/request_forwarding.go b/vault/request_forwarding.go
index ad8c6d42fccf..da743cf4d035 100644
--- a/vault/request_forwarding.go
+++ b/vault/request_forwarding.go
@@ -189,6 +189,8 @@ func (rf *requestForwardingHandler) Handoff(ctx context.Context, shutdownWg *syn
// Stop stops the request forwarding server and closes connections.
func (rf *requestForwardingHandler) Stop() error {
+ // Give some time for existing RPCs to drain.
+ time.Sleep(clusterListenerAcceptDeadline)
close(rf.stopCh)
rf.fwRPCServer.Stop()
return nil
diff --git a/vault/request_forwarding_rpc.go b/vault/request_forwarding_rpc.go
index 24adfac663fd..9a27adde90ae 100644
--- a/vault/request_forwarding_rpc.go
+++ b/vault/request_forwarding_rpc.go
@@ -61,7 +61,9 @@ func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *fo
}
}
- resp.LastRemoteWal = LastRemoteWAL(s.core)
+ // Performance standby nodes will use this value to do wait for WALs to ship
+ // in order to do a best-effort read after write gurantee
+ resp.LastRemoteWal = LastWAL(s.core)
return resp, nil
}