From c3ca83d9af890b88d899291cb684b76a2542b195 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Thu, 24 Oct 2019 10:19:01 -0400 Subject: [PATCH 01/90] changelog++ --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b5066c8fbaf..331979ab9c95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ IMPROVEMENTS: [GH-7541] * sys: Add a new `sys/internal/counters/entities` endpoint, that counts the total number of active identity entities. [GH-7541] + * replication (enterprise): added more replication metrics BUG FIXES: From 87207df0df82da0d2ff41bf0a453bb8506da3ba4 Mon Sep 17 00:00:00 2001 From: ncabatoff Date: Thu, 24 Oct 2019 10:23:31 -0400 Subject: [PATCH 02/90] Fix a regression introduced in #7698 that breaks root token generation. (#7727) --- vault/generate_root.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vault/generate_root.go b/vault/generate_root.go index c0521d30ece9..ca8ab54e6669 100644 --- a/vault/generate_root.go +++ b/vault/generate_root.go @@ -39,11 +39,11 @@ type GenerateRootStrategy interface { type generateStandardRootToken struct{} func (g generateStandardRootToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error { - _, err := c.unsealKeyToMasterKey(ctx, combinedKey) + masterKey, err := c.unsealKeyToMasterKey(ctx, combinedKey) if err != nil { return errwrap.Wrapf("unable to authenticate: {{err}}", err) } - if err := c.barrier.VerifyMaster(combinedKey); err != nil { + if err := c.barrier.VerifyMaster(masterKey); err != nil { return errwrap.Wrapf("master key verification failed: {{err}}", err) } From 1fc0bee2acf9b9500f1e7a66890f1c25076032f1 Mon Sep 17 00:00:00 2001 From: ncabatoff Date: Thu, 24 Oct 2019 13:37:13 -0400 Subject: [PATCH 03/90] Don't try to use req if we got a nonzero status, it'll be nil. (#7728) --- http/logical.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/http/logical.go b/http/logical.go index 9a6653383ad0..445c842b65df 100644 --- a/http/logical.go +++ b/http/logical.go @@ -128,7 +128,7 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { req, origBody, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) - if err != nil { + if err != nil || status != 0 { return nil, nil, status, err } From 2a3f18e88f0c10f0ba816bdf9fc6f1e0710bb1d0 Mon Sep 17 00:00:00 2001 From: Noelle Daley Date: Thu, 24 Oct 2019 11:35:25 -0700 Subject: [PATCH 04/90] indicate that secret version is deleted even when it is the current version (#7714) --- ui/app/templates/components/secret-edit.hbs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/app/templates/components/secret-edit.hbs b/ui/app/templates/components/secret-edit.hbs index 32ecdc901ade..06bf6bdf3b8b 100644 --- a/ui/app/templates/components/secret-edit.hbs +++ b/ui/app/templates/components/secret-edit.hbs @@ -78,7 +78,7 @@
  • Version {{secretVersion.version}} - {{#if (eq secretVersion.version this.model.currentVersion)}} + {{#if (and (eq secretVersion.version this.model.currentVersion) (not secretVersion.deleted))}} {{else if secretVersion.deleted}} From b2085a63d0ae92a104a16b3ca2aa27b0cf093af5 Mon Sep 17 00:00:00 2001 From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com> Date: Thu, 24 Oct 2019 14:54:09 -0400 Subject: [PATCH 05/90] changelog++ --- CHANGELOG.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 331979ab9c95..9a0a38b1e9af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ IMPROVEMENTS: * replication (enterprise): Write-Ahead-Log entries will not duplicate the data belonging to the encompassing physical entries of the transaction, thereby improving the performance and storage capacity. + * replication (enterprise): added more replication metrics * secrets/aws: The root config can now be read [GH-7245] * storage/azure: Add config parameter to Azure storage backend to allow specifying the ARM endpoint [GH-7567] @@ -66,6 +67,7 @@ IMPROVEMENTS: the host [GH-7330] * sys: Add a new set of endpoints under `sys/pprof/` that allows profiling information to be extracted [GH-7473] + * sys: Add endpoint that counts the total number of active identity entities [GH-7541] * sys/config: Add a new endpoint under `sys/config/state/sanitized` that returns the configuration state of the server. It excludes config values from `storage`, `ha_storage`, and `seal` stanzas and some values @@ -76,9 +78,6 @@ IMPROVEMENTS: * sys: Add a new `sys/internal/counters/tokens` endpoint, that counts the total number of active service token accessors in the shared token storage. [GH-7541] - * sys: Add a new `sys/internal/counters/entities` endpoint, that counts the - total number of active identity entities. [GH-7541] - * replication (enterprise): added more replication metrics BUG FIXES: From fae0f927f444d69c3fa5acdee0d830058f3ff05b Mon Sep 17 00:00:00 2001 From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com> Date: Thu, 24 Oct 2019 14:58:40 -0400 Subject: [PATCH 06/90] changelog++ --- CHANGELOG.md | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a0a38b1e9af..f7d8d6002b5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,33 +2,34 @@ FEATURES: - * **Recovery Mode**: Vault server can be brought up in recovery mode to resolve - outages caused due to data store being in bad state. This is a privileged mode - that allows `sys/raw` API calls to perform surgical corrections to the data - store. Bad storage state can be caused by bugs. However, this is usually - observed when known (and fixed) bugs are hit by older versions of Vault. - * **Stackdriver Metrics Sink**: Vault can now send metrics to - [Stackdriver](https://cloud.google.com/stackdriver/). See the [configuration - documentation](https://www.vaultproject.io/docs/config/index.html) for - details. [GH-6957] - * **Transit**: Signing and verification is now supported with the P-384 - (secp384r1) and P-521 (secp521r1) ECDSA curves [GH-7551] - * **Transit**: Encryption and decryption is now supported via AES128-GCM96 - [GH-7555] * **Vault Debug**: A new top-level subcommand, `debug`, is added that allows operators to retrieve debugging information related to a particular Vault node. Operators can use this simple workflow to capture triaging information, which can then be consumed programmatically or by support and engineering teams. It has the abilitity to probe for config, host, metrics, pprof, server status, and replication status. + * **Recovery Mode**: Vault server can be brought up in recovery mode to resolve + outages caused due to data store being in bad state. This is a privileged mode + that allows `sys/raw` API calls to perform surgical corrections to the data + store. Bad storage state can be caused by bugs. However, this is usually + observed when known (and fixed) bugs are hit by older versions of Vault. * **Active Directory Secret Check-In/Check-Out**: In the Active Directory secrets engine, users or applications can check out a service account for use, and its password will be rotated when it's checked back in. - * **New UI Features** The UI now supports managing users and groups for the Userpass, Cert, Okta, and Radius auth methods. - * **Vault Agent Template** Vault Agent now supports rendering templates containing Vault secrets to disk, similar to Consul Template [GH-7652] + * **Vault Agent Template** Vault Agent now supports rendering templates containing + Vault secrets to disk, similar to Consul Template [GH-7652] + * **Transit Key Type Support**: Signing and verification is now supported with the P-384 + (secp384r1) and P-521 (secp521r1) ECDSA curves [GH-7551] and encryption and + decryption is now supported via AES128-GCM96 [GH-7555] + * **New UI Features** The UI now supports managing users and groups for the + Userpass, Cert, Okta, and Radius auth methods. * **Shamir with Stored Master Key** The on disk format for Shamir seals has changed, allowing for a secondary cluster using Shamir downstream from a primary cluster - using AutoUnseal. [GH-7694] + using Auto Unseal. [GH-7694] + * **Stackdriver Metrics Sink**: Vault can now send metrics to + [Stackdriver](https://cloud.google.com/stackdriver/). See the [configuration + documentation](https://www.vaultproject.io/docs/config/index.html) for + details. [GH-6957] CHANGES: From d9291997721c0efa437034c74d24d2559a7ba3f3 Mon Sep 17 00:00:00 2001 From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com> Date: Thu, 24 Oct 2019 15:14:45 -0400 Subject: [PATCH 07/90] changelog++ --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7d8d6002b5e..a8bd68cc3c6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,8 @@ FEATURES: that allows `sys/raw` API calls to perform surgical corrections to the data store. Bad storage state can be caused by bugs. However, this is usually observed when known (and fixed) bugs are hit by older versions of Vault. + * **Entropy Augmentation (Enterprise)**: Vault now supports sourcing entropy from + external source for critical security parameters. The * **Active Directory Secret Check-In/Check-Out**: In the Active Directory secrets engine, users or applications can check out a service account for use, and its password will be rotated when it's checked back in. From 9c33f74dab5a8c2a3b7e5b081cf9d4291f49d030 Mon Sep 17 00:00:00 2001 From: Jeff Escalante Date: Thu, 24 Oct 2019 17:41:40 -0400 Subject: [PATCH 08/90] Update ruby dependencies (#7720) * update ruby dependencies * add specific version bundler dep * remove ruby-version * remove extra gemfile dep --- website/.ruby-version | 1 - website/Gemfile.lock | 117 ++++++++++++++++++------------------------ 2 files changed, 51 insertions(+), 67 deletions(-) delete mode 100644 website/.ruby-version diff --git a/website/.ruby-version b/website/.ruby-version deleted file mode 100644 index 35cee72dcbf4..000000000000 --- a/website/.ruby-version +++ /dev/null @@ -1 +0,0 @@ -2.4.3 diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 781f2373bed1..63fec203539f 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -1,6 +1,6 @@ GIT remote: https://github.com/carrot/middleman-hashicorp - revision: 2ae888ea440b9cc78d445d71b88b89103c0d621f + revision: a1e3615e8f7d1c3c798d1ec7d7040ff1ba565c0f specs: middleman-hashicorp (0.3.28) activesupport (~> 5.0) @@ -14,92 +14,81 @@ GIT GEM remote: https://rubygems.org/ specs: - activesupport (5.0.7.1) + activesupport (5.0.7.2) concurrent-ruby (~> 1.0, >= 1.0.2) i18n (>= 0.7, < 2) minitest (~> 5.1) tzinfo (~> 1.1) - addressable (2.6.0) - public_suffix (>= 2.0.2, < 4.0) - backports (3.11.3) + addressable (2.7.0) + public_suffix (>= 2.0.2, < 5.0) + backports (3.15.0) builder (3.2.3) cacert (0.5.0) coffee-script (2.4.1) coffee-script-source execjs coffee-script-source (1.12.2) - compass-import-once (1.0.5) - sass (>= 3.2, < 3.5) - concurrent-ruby (1.1.4) + concurrent-ruby (1.1.5) contracts (0.13.0) - dato (0.6.18) + dato (0.7.7) activesupport (>= 4.2.7) addressable cacert + dato_json_schema dotenv - downloadr faraday (>= 0.9.0) faraday_middleware (>= 0.9.0) - fastimage imgix (>= 0.3.1) - json_schema listen + mime-types pusher-client thor toml - domain_name (0.5.20180417) - unf (>= 0.0.5, < 1.0.0) + dato_json_schema (0.20.8) dotenv (2.1.0) - downloadr (0.0.41) - addressable (~> 2.3) - rest-client (~> 1.7) em-websocket (0.5.1) eventmachine (>= 0.12.9) http_parser.rb (~> 0.6.0) erubis (2.7.0) eventmachine (1.2.7) execjs (2.7.0) - faraday (0.15.4) + faraday (0.17.0) multipart-post (>= 1.2, < 3) - faraday_middleware (0.13.0) + faraday_middleware (0.13.1) faraday (>= 0.7.4, < 1.0) fast_blank (1.0.0) - fastimage (2.1.5) - ffi (1.10.0) - haml (5.0.4) + fastimage (2.1.7) + ffi (1.11.1) + haml (5.1.2) temple (>= 0.8.0) tilt hamster (3.0.0) concurrent-ruby (~> 1.0) hashie (3.6.0) - http-cookie (1.0.3) - domain_name (~> 0.5) http_parser.rb (0.6.0) - i18n (0.7.0) - imgix (1.2.2) + i18n (0.9.5) + concurrent-ruby (~> 1.0) + imgix (3.1.1) addressable - json (2.1.0) - json_schema (0.20.1) + json (2.2.0) kramdown (1.17.0) listen (3.0.8) rb-fsevent (~> 0.9, >= 0.9.4) rb-inotify (~> 0.9, >= 0.9.7) memoist (0.16.0) - middleman (4.2.1) + middleman (4.3.5) coffee-script (~> 2.2) - compass-import-once (= 1.0.5) haml (>= 4.0.5) kramdown (~> 1.2) - middleman-cli (= 4.2.1) - middleman-core (= 4.2.1) - sass (>= 3.4.0, < 4.0) - middleman-cli (4.2.1) + middleman-cli (= 4.3.5) + middleman-core (= 4.3.5) + middleman-cli (4.3.5) thor (>= 0.17.0, < 2.0) - middleman-core (4.2.1) + middleman-core (4.3.5) activesupport (>= 4.2, < 5.1) addressable (~> 2.3) backports (~> 3.6) - bundler (~> 1.1) + bundler contracts (~> 0.13.0) dotenv erubis @@ -108,74 +97,69 @@ GEM fastimage (~> 2.0) hamster (~> 3.0) hashie (~> 3.4) - i18n (~> 0.7.0) + i18n (~> 0.9.0) listen (~> 3.0.0) memoist (~> 0.14) padrino-helpers (~> 0.13.0) parallel rack (>= 1.4.5, < 3) - sass (>= 3.4) + sassc (~> 2.0) servolux - tilt (~> 2.0) + tilt (~> 2.0.9) uglifier (~> 3.0) - middleman-dato (0.8.2) + middleman-dato (0.9.0) activesupport - dato (>= 0.3.2) + dato (>= 0.7.0) dotenv (<= 2.1) middleman-core (>= 4.1.10) middleman-livereload (3.4.6) em-websocket (~> 0.5.1) middleman-core (>= 3.3) rack-livereload (~> 0.3.15) - middleman-syntax (3.0.0) + middleman-syntax (3.2.0) middleman-core (>= 3.2) - rouge (~> 2.0) - mime-types (2.99.3) - mini_portile2 (2.3.0) - minitest (5.11.3) - multipart-post (2.0.0) - netrc (0.11.0) - nokogiri (1.8.5) - mini_portile2 (~> 2.3.0) + rouge (~> 3.2) + mime-types (3.3) + mime-types-data (~> 3.2015) + mime-types-data (3.2019.1009) + mini_portile2 (2.4.0) + minitest (5.12.2) + multipart-post (2.1.1) + nokogiri (1.10.4) + mini_portile2 (~> 2.4.0) padrino-helpers (0.13.3.4) i18n (~> 0.6, >= 0.6.7) padrino-support (= 0.13.3.4) tilt (>= 1.4.1, < 3) padrino-support (0.13.3.4) activesupport (>= 3.1) - parallel (1.12.1) + parallel (1.18.0) parslet (1.8.2) - public_suffix (3.0.3) + public_suffix (4.0.1) pusher-client (0.6.2) json websocket (~> 1.0) - rack (2.0.6) + rack (2.0.7) rack-livereload (0.3.17) rack rb-fsevent (0.10.3) rb-inotify (0.10.0) ffi (~> 1.0) - redcarpet (3.4.0) - rest-client (1.8.0) - http-cookie (>= 1.0.2, < 2.0) - mime-types (>= 1.16, < 3.0) - netrc (~> 0.7) - rouge (2.2.1) - sass (3.4.25) + redcarpet (3.5.0) + rouge (3.12.0) + sassc (2.2.1) + ffi (~> 1.9) servolux (0.13.0) - temple (0.8.0) + temple (0.8.2) thor (0.20.3) thread_safe (0.3.6) - tilt (2.0.8) + tilt (2.0.10) toml (0.2.0) parslet (~> 1.8.0) tzinfo (1.2.5) thread_safe (~> 0.1) uglifier (3.2.0) execjs (>= 0.3.0, < 3) - unf (0.1.4) - unf_ext - unf_ext (0.0.7.5) websocket (1.2.8) PLATFORMS @@ -183,6 +167,7 @@ PLATFORMS DEPENDENCIES builder + bundler (= 2.0.1) middleman (~> 4.2) middleman-dato middleman-hashicorp! @@ -190,4 +175,4 @@ DEPENDENCIES wdm (~> 0.1) BUNDLED WITH - 1.16.5 + 2.0.1 From b01234d2bbc7d8d596910d6851912ae0c8738a5e Mon Sep 17 00:00:00 2001 From: Matthew Irish Date: Thu, 24 Oct 2019 17:08:23 -0500 Subject: [PATCH 09/90] update yarn to 1.19.1 (#7731) --- scripts/cross/Dockerfile | 2 +- ui/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile index 84fb826919ed..83e7a3acdce4 100644 --- a/scripts/cross/Dockerfile +++ b/scripts/cross/Dockerfile @@ -16,7 +16,7 @@ RUN curl -sL https://deb.nodesource.com/setup_10.x | bash - RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list -RUN apt-get update -y && apt-get install -y -q nodejs yarn=1.17.3 +RUN apt-get update -y && apt-get install -y -q nodejs yarn=1.19.1 RUN rm -rf /var/lib/apt/lists/* diff --git a/ui/package.json b/ui/package.json index 630c6bf21b70..35919e4d8ce0 100644 --- a/ui/package.json +++ b/ui/package.json @@ -156,7 +156,7 @@ }, "engines": { "node": " >= 10.* <11", - "yarn": "1.17.3" + "yarn": "1.19.1" }, "private": true, "ember-addon": { From 9d5334f17232f8f621eaa8129da953b2ea8bb848 Mon Sep 17 00:00:00 2001 From: Sam Salisbury Date: Fri, 25 Oct 2019 13:35:22 +0100 Subject: [PATCH 10/90] run go mod vendor (#7736) --- go.mod | 4 - go.sum | 1 + vendor/github.com/BurntSushi/toml/.gitignore | 5 + vendor/github.com/BurntSushi/toml/.travis.yml | 15 + vendor/github.com/BurntSushi/toml/COMPATIBLE | 3 + vendor/github.com/BurntSushi/toml/COPYING | 21 + vendor/github.com/BurntSushi/toml/Makefile | 19 + vendor/github.com/BurntSushi/toml/README.md | 218 +++ vendor/github.com/BurntSushi/toml/decode.go | 509 +++++++ .../github.com/BurntSushi/toml/decode_meta.go | 121 ++ vendor/github.com/BurntSushi/toml/doc.go | 27 + vendor/github.com/BurntSushi/toml/encode.go | 568 +++++++ .../BurntSushi/toml/encoding_types.go | 19 + .../BurntSushi/toml/encoding_types_1.1.go | 18 + vendor/github.com/BurntSushi/toml/lex.go | 953 ++++++++++++ vendor/github.com/BurntSushi/toml/parse.go | 592 ++++++++ vendor/github.com/BurntSushi/toml/session.vim | 1 + .../github.com/BurntSushi/toml/type_check.go | 91 ++ .../github.com/BurntSushi/toml/type_fields.go | 242 +++ .../go-cfclient/gen_error.go | 115 -- .../compress/bzip2/internal/sais/sais_gen.go | 703 --------- .../google/go-github/github/gen-accessors.go | 332 ----- .../hashicorp/consul-template/LICENSE | 353 +++++ .../hashicorp/consul-template/child/child.go | 428 ++++++ .../hashicorp/consul-template/config/auth.go | 142 ++ .../consul-template/config/config.go | 606 ++++++++ .../consul-template/config/consul.go | 172 +++ .../consul-template/config/convert.go | 197 +++ .../hashicorp/consul-template/config/dedup.go | 132 ++ .../hashicorp/consul-template/config/env.go | 209 +++ .../hashicorp/consul-template/config/exec.go | 216 +++ .../consul-template/config/mapstructure.go | 75 + .../hashicorp/consul-template/config/retry.go | 170 +++ .../hashicorp/consul-template/config/ssl.go | 153 ++ .../consul-template/config/syslog.go | 87 ++ .../consul-template/config/template.go | 458 ++++++ .../consul-template/config/transport.go | 188 +++ .../hashicorp/consul-template/config/vault.go | 327 ++++ .../hashicorp/consul-template/config/wait.go | 191 +++ .../dependency/catalog_datacenters.go | 112 ++ .../dependency/catalog_node.go | 181 +++ .../dependency/catalog_nodes.go | 150 ++ .../dependency/catalog_service.go | 154 ++ .../dependency/catalog_services.go | 129 ++ .../consul-template/dependency/client_set.go | 338 +++++ .../consul-template/dependency/dependency.go | 189 +++ .../consul-template/dependency/errors.go | 13 + .../consul-template/dependency/file.go | 129 ++ .../dependency/health_service.go | 248 ++++ .../consul-template/dependency/kv_get.go | 112 ++ .../consul-template/dependency/kv_keys.go | 104 ++ .../consul-template/dependency/kv_list.go | 133 ++ .../consul-template/dependency/set.go | 72 + .../dependency/vault_agent_token.go | 121 ++ .../dependency/vault_common.go | 348 +++++ .../consul-template/dependency/vault_list.go | 126 ++ .../consul-template/dependency/vault_read.go | 175 +++ .../consul-template/dependency/vault_token.go | 95 ++ .../consul-template/dependency/vault_write.go | 177 +++ .../consul-template/manager/dedup.go | 512 +++++++ .../consul-template/manager/errors.go | 31 + .../consul-template/manager/runner.go | 1305 ++++++++++++++++ .../consul-template/renderer/file_perms.go | 22 + .../renderer/file_perms_windows.go | 9 + .../consul-template/renderer/renderer.go | 182 +++ .../consul-template/signals/mapstructure.go | 32 + .../hashicorp/consul-template/signals/nil.go | 7 + .../consul-template/signals/signals.go | 35 + .../consul-template/signals/signals_unix.go | 40 + .../signals/signals_windows.go | 24 + .../consul-template/template/brain.go | 74 + .../consul-template/template/funcs.go | 1322 +++++++++++++++++ .../consul-template/template/scratch.go | 125 ++ .../consul-template/template/template.go | 303 ++++ .../consul-template/version/version.go | 12 + .../hashicorp/consul-template/watch/view.go | 308 ++++ .../consul-template/watch/watcher.go | 253 ++++ vendor/github.com/hashicorp/consul/api/acl.go | 588 +++++++- .../github.com/hashicorp/consul/api/agent.go | 38 +- vendor/github.com/hashicorp/consul/api/api.go | 75 +- .../hashicorp/consul/api/config_entry.go | 255 ++++ vendor/github.com/hashicorp/consul/api/go.mod | 2 +- .../go-sockaddr/template/GNUmakefile | 2 + .../hashicorp/go-sockaddr/template/README.md | 6 + .../hashicorp/go-sockaddr/template/doc.go | 311 ++++ .../go-sockaddr/template/template.go | 155 ++ .../vault/sdk/helper/pointerutil/pointer.go | 28 + vendor/github.com/lib/pq/oid/gen.go | 93 -- .../mattn/go-shellwords/.travis.yml | 8 + vendor/github.com/mattn/go-shellwords/LICENSE | 21 + .../github.com/mattn/go-shellwords/README.md | 47 + vendor/github.com/mattn/go-shellwords/go.mod | 1 + .../mattn/go-shellwords/shellwords.go | 195 +++ .../mattn/go-shellwords/util_go15.go | 24 + .../mattn/go-shellwords/util_posix.go | 22 + .../mattn/go-shellwords/util_windows.go | 22 + .../mitchellh/hashstructure/LICENSE | 21 + .../mitchellh/hashstructure/README.md | 65 + .../github.com/mitchellh/hashstructure/go.mod | 1 + .../mitchellh/hashstructure/hashstructure.go | 358 +++++ .../mitchellh/hashstructure/include.go | 15 + .../docker/pkg/archive/example_changes.go | 97 -- .../shirou/gopsutil/disk/types_freebsd.go | 88 -- .../shirou/gopsutil/disk/types_openbsd.go | 70 - .../shirou/gopsutil/host/types_darwin.go | 17 - .../shirou/gopsutil/host/types_freebsd.go | 44 - .../shirou/gopsutil/host/types_linux.go | 42 - .../shirou/gopsutil/host/types_openbsd.go | 43 - .../shirou/gopsutil/mem/types_openbsd.go | 34 - .../shirou/gopsutil/process/types_darwin.go | 160 -- .../shirou/gopsutil/process/types_freebsd.go | 95 -- .../shirou/gopsutil/process/types_openbsd.go | 103 -- vendor/github.com/ugorji/go/codec/xml.go | 508 ------- vendor/github.com/ulikunitz/xz/example.go | 40 - vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 - vendor/golang.org/x/sys/unix/mkpost.go | 122 -- vendor/golang.org/x/sys/unix/mksyscall.go | 407 ----- .../x/sys/unix/mksyscall_aix_ppc.go | 415 ------ .../x/sys/unix/mksyscall_aix_ppc64.go | 614 -------- .../x/sys/unix/mksyscall_solaris.go | 335 ----- .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 ----- vendor/golang.org/x/sys/unix/mksysnum.go | 190 --- vendor/golang.org/x/sys/unix/types_aix.go | 237 --- vendor/golang.org/x/sys/unix/types_darwin.go | 283 ---- .../golang.org/x/sys/unix/types_dragonfly.go | 263 ---- vendor/golang.org/x/sys/unix/types_freebsd.go | 400 ----- vendor/golang.org/x/sys/unix/types_netbsd.go | 290 ---- vendor/golang.org/x/sys/unix/types_openbsd.go | 283 ---- vendor/golang.org/x/sys/unix/types_solaris.go | 266 ---- .../text/encoding/internal/identifier/gen.go | 142 -- vendor/golang.org/x/text/unicode/bidi/gen.go | 133 -- .../x/text/unicode/bidi/gen_ranges.go | 57 - .../x/text/unicode/bidi/gen_trieval.go | 64 - .../x/text/unicode/norm/maketables.go | 986 ------------ .../golang.org/x/text/unicode/norm/triegen.go | 117 -- vendor/modules.txt | 476 +++--- 136 files changed, 17689 insertions(+), 8884 deletions(-) create mode 100644 vendor/github.com/BurntSushi/toml/.gitignore create mode 100644 vendor/github.com/BurntSushi/toml/.travis.yml create mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE create mode 100644 vendor/github.com/BurntSushi/toml/COPYING create mode 100644 vendor/github.com/BurntSushi/toml/Makefile create mode 100644 vendor/github.com/BurntSushi/toml/README.md create mode 100644 vendor/github.com/BurntSushi/toml/decode.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_meta.go create mode 100644 vendor/github.com/BurntSushi/toml/doc.go create mode 100644 vendor/github.com/BurntSushi/toml/encode.go create mode 100644 vendor/github.com/BurntSushi/toml/encoding_types.go create mode 100644 vendor/github.com/BurntSushi/toml/encoding_types_1.1.go create mode 100644 vendor/github.com/BurntSushi/toml/lex.go create mode 100644 vendor/github.com/BurntSushi/toml/parse.go create mode 100644 vendor/github.com/BurntSushi/toml/session.vim create mode 100644 vendor/github.com/BurntSushi/toml/type_check.go create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go delete mode 100644 vendor/github.com/cloudfoundry-community/go-cfclient/gen_error.go delete mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go delete mode 100644 vendor/github.com/google/go-github/github/gen-accessors.go create mode 100644 vendor/github.com/hashicorp/consul-template/LICENSE create mode 100644 vendor/github.com/hashicorp/consul-template/child/child.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/auth.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/config.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/consul.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/convert.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/dedup.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/env.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/exec.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/mapstructure.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/retry.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/ssl.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/syslog.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/template.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/transport.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/vault.go create mode 100644 vendor/github.com/hashicorp/consul-template/config/wait.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/client_set.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/dependency.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/errors.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/file.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/health_service.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/kv_get.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/kv_list.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/set.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_common.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_list.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_read.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_token.go create mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_write.go create mode 100644 vendor/github.com/hashicorp/consul-template/manager/dedup.go create mode 100644 vendor/github.com/hashicorp/consul-template/manager/errors.go create mode 100644 vendor/github.com/hashicorp/consul-template/manager/runner.go create mode 100644 vendor/github.com/hashicorp/consul-template/renderer/file_perms.go create mode 100644 vendor/github.com/hashicorp/consul-template/renderer/file_perms_windows.go create mode 100644 vendor/github.com/hashicorp/consul-template/renderer/renderer.go create mode 100644 vendor/github.com/hashicorp/consul-template/signals/mapstructure.go create mode 100644 vendor/github.com/hashicorp/consul-template/signals/nil.go create mode 100644 vendor/github.com/hashicorp/consul-template/signals/signals.go create mode 100644 vendor/github.com/hashicorp/consul-template/signals/signals_unix.go create mode 100644 vendor/github.com/hashicorp/consul-template/signals/signals_windows.go create mode 100644 vendor/github.com/hashicorp/consul-template/template/brain.go create mode 100644 vendor/github.com/hashicorp/consul-template/template/funcs.go create mode 100644 vendor/github.com/hashicorp/consul-template/template/scratch.go create mode 100644 vendor/github.com/hashicorp/consul-template/template/template.go create mode 100644 vendor/github.com/hashicorp/consul-template/version/version.go create mode 100644 vendor/github.com/hashicorp/consul-template/watch/view.go create mode 100644 vendor/github.com/hashicorp/consul-template/watch/watcher.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/template/GNUmakefile create mode 100644 vendor/github.com/hashicorp/go-sockaddr/template/README.md create mode 100644 vendor/github.com/hashicorp/go-sockaddr/template/doc.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/template/template.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pointerutil/pointer.go delete mode 100644 vendor/github.com/lib/pq/oid/gen.go create mode 100644 vendor/github.com/mattn/go-shellwords/.travis.yml create mode 100644 vendor/github.com/mattn/go-shellwords/LICENSE create mode 100644 vendor/github.com/mattn/go-shellwords/README.md create mode 100644 vendor/github.com/mattn/go-shellwords/go.mod create mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_go15.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go create mode 100644 vendor/github.com/mitchellh/hashstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/hashstructure/README.md create mode 100644 vendor/github.com/mitchellh/hashstructure/go.mod create mode 100644 vendor/github.com/mitchellh/hashstructure/hashstructure.go create mode 100644 vendor/github.com/mitchellh/hashstructure/include.go delete mode 100644 vendor/github.com/ory/dockertest/docker/pkg/archive/example_changes.go delete mode 100644 vendor/github.com/shirou/gopsutil/disk/types_freebsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/disk/types_openbsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/host/types_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/host/types_freebsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/host/types_linux.go delete mode 100644 vendor/github.com/shirou/gopsutil/host/types_openbsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/mem/types_openbsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/process/types_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/process/types_freebsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/process/types_openbsd.go delete mode 100644 vendor/github.com/ugorji/go/codec/xml.go delete mode 100644 vendor/github.com/ulikunitz/xz/example.go delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go delete mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go diff --git a/go.mod b/go.mod index d4b28e3a2c76..82dfdcede3b1 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,6 @@ require ( github.com/cockroachdb/apd v1.1.0 // indirect github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c github.com/coreos/go-semver v0.2.0 - github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d // indirect github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a github.com/dnaeon/go-vcr v1.0.1 // indirect github.com/dsnet/compress v0.0.1 // indirect @@ -48,7 +47,6 @@ require ( github.com/golang/protobuf v1.3.2 github.com/google/go-github v17.0.0+incompatible github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a - github.com/google/go-querystring v1.0.0 // indirect github.com/hashicorp/consul-template v0.22.0 github.com/hashicorp/consul/api v1.1.0 github.com/hashicorp/errwrap v1.0.0 @@ -93,7 +91,6 @@ require ( github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869 github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f github.com/kr/pretty v0.1.0 - github.com/kr/pty v1.1.3 // indirect github.com/kr/text v0.1.0 github.com/lib/pq v1.2.0 github.com/mattn/go-colorable v0.1.2 @@ -108,7 +105,6 @@ require ( github.com/ncw/swift v1.0.47 github.com/nwaples/rardecode v1.0.0 // indirect github.com/oklog/run v1.0.0 - github.com/onsi/ginkgo v1.7.0 // indirect github.com/oracle/oci-go-sdk v7.0.0+incompatible github.com/ory/dockertest v3.3.4+incompatible github.com/patrickmn/go-cache v2.1.0+incompatible diff --git a/go.sum b/go.sum index 8bdced0f10b8..c401c5a01b22 100644 --- a/go.sum +++ b/go.sum @@ -236,6 +236,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 000000000000..0cd3800377d4 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,5 @@ +TAGS +tags +.*.swp +tomlcheck/tomlcheck +toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml new file mode 100644 index 000000000000..8b8afc4f0e00 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip +install: + - go install ./... + - go get github.com/BurntSushi/toml-test +script: + - export PATH="$PATH:$HOME/gopath/bin" + - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 000000000000..6efcfd0ce55e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) + diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 000000000000..01b5743200b8 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile new file mode 100644 index 000000000000..3600848d331a --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 000000000000..7c1b37ecc7a0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,218 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/toml-lang/toml + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Documentation: https://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 000000000000..b0fd51d5b6ea --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 000000000000..b9914a6798cf --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 000000000000..b371f396edca --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/toml-lang/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 000000000000..d905c21a2466 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 000000000000..d36e1dd6002b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 000000000000..e8d503d04690 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 000000000000..e0a742a8870f --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,953 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to three runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [3]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 3 { + lx.nprev++ + } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only twice between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, "+ + "comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("expected end of table array name delimiter %q, "+ + "but got %q instead", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name " + + "(table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator " + + "(table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, "+ + "but got %q instead", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("unexpected key separator %q", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("bare keys cannot contain %q", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("expected key separator %q, but got %q instead", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %q instead", + arrayEnd, r, + ) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + } + return lx.errorf("expected a comma or an inline table terminator %q, "+ + "but got %q instead", inlineTableEnd, r) +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case '\\': + return lexMultilineStringEscape + case stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following "+ + "escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z', '+': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 000000000000..50869ef9266e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,592 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + case itemInlineTableStart: + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + p.currentKey = "" + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ != itemKeyStart { + p.bug("Expected key start but instead found %q, around line %d", + it.val, p.approxLine) + } + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + // retrieve key + k := p.next() + p.approxLine = k.line + kname := p.keyString(k) + + // retrieve value + p.currentKey = kname + val, typ := p.value(p.next()) + // make sure we keep metadata up to date + p.setType(kname, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[kname] = val + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim new file mode 100644 index 000000000000..562164be0603 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/session.vim @@ -0,0 +1 @@ +au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 000000000000..c73f8afc1a6d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 000000000000..608997c22f68 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/gen_error.go b/vendor/github.com/cloudfoundry-community/go-cfclient/gen_error.go deleted file mode 100644 index 7405c6f3e798..000000000000 --- a/vendor/github.com/cloudfoundry-community/go-cfclient/gen_error.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build ignore - -package main - -import ( - "bytes" - "go/format" - "io/ioutil" - "log" - "net/http" - "sort" - "strings" - "text/template" - "time" - - "gopkg.in/yaml.v2" -) - -type CFCode int -type HTTPCode int - -type Definition struct { - CFCode `yaml:"-"` - Name string `yaml:"name"` - HTTPCode `yaml:"http_code"` - Message string `yaml:"message"` -} - -func main() { - const url = "https://raw.githubusercontent.com/cloudfoundry/cloud_controller_ng/master/vendor/errors/v2.yml" - - resp, err := http.Get(url) - if err != nil { - log.Fatal(err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Fatal(err) - } - - var m map[CFCode]Definition - - if err := yaml.Unmarshal(body, &m); err != nil { - log.Fatal(err) - } - - var definitions []Definition - - for c, d := range m { - d.CFCode = c - definitions = append(definitions, d) - } - - sort.Slice(definitions, func(i, j int) bool { - return definitions[i].CFCode < definitions[j].CFCode - }) - - buf := &bytes.Buffer{} - - if err := packageTemplate.Execute(buf, struct { - Timestamp time.Time - Definitions []Definition - }{ - Timestamp: time.Now(), - Definitions: definitions, - }); err != nil { - log.Fatal(err) - } - - dst, err := format.Source(buf.Bytes()) - if err != nil { - log.Printf("%s", buf.Bytes()) - log.Fatal(err) - } - - if err := ioutil.WriteFile("cf_error.go", dst, 0600); err != nil { - log.Fatal(err) - } -} - -// destutter ensures that s does not end in "Error". -func destutter(s string) string { - return strings.TrimSuffix(s, "Error") -} - -var packageTemplate = template.Must(template.New("").Funcs(template.FuncMap{ - "destutter": destutter, -}).Parse(` -package cfclient - -// Code generated by go generate. DO NOT EDIT. -// This file was generated by robots at -// {{ .Timestamp }} - -import "github.com/pkg/errors" - -{{- range .Definitions }} -{{$method := printf "Is%sError" (.Name | destutter) }} -// {{ $method }} returns a boolean indicating whether -// the error is known to report the Cloud Foundry error: -// - Cloud Foundry code: {{ .CFCode }} -// - HTTP code: {{ .HTTPCode }} -// - message: {{ printf "%q" .Message }} -func Is{{ .Name | destutter }}Error(err error) bool { - cause := errors.Cause(err) - cferr, ok := cause.(CloudFoundryError) - if !ok { - return false - } - return cferr.Code == {{ .CFCode }} -} -{{- end }} -`)) diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go deleted file mode 100644 index 26bf628e142e..000000000000 --- a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go +++ /dev/null @@ -1,703 +0,0 @@ -// Copyright 2017, Joe Tsai. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build ignore - -package main - -import ( - "bytes" - "go/format" - "io/ioutil" - "log" - "os" - "text/template" -) - -func main() { - if len(os.Args) != 3 { - log.Fatalf("Usage: %s GO_TYPE OUTPUT_FILE", os.Args[0]) - } - typ := os.Args[1] - path := os.Args[2] - - b := new(bytes.Buffer) - t := template.Must(template.New("source").Parse(source)) - if err := t.Execute(b, struct { - Type, GeneratedMessage string - }{typ, "// Code generated by sais_gen.go. DO NOT EDIT."}); err != nil { - log.Fatalf("Template.Execute error: %v", err) - } - out, err := format.Source(bytes.TrimSpace(b.Bytes())) - if err != nil { - log.Fatalf("format.Source error: %v", err) - } - if err := ioutil.WriteFile(path, out, 0644); err != nil { - log.Fatalf("ioutil.WriteFile error: %v", err) - } -} - -const source = ` -// Copyright 2015, Joe Tsai. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -{{.GeneratedMessage}} - -// ==================================================== -// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved. -// -// Permission is hereby granted, free of charge, to any person -// obtaining a copy of this software and associated documentation -// files (the "Software"), to deal in the Software without -// restriction, including without limitation the rights to use, -// copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -// OTHER DEALINGS IN THE SOFTWARE. -// ==================================================== - -package sais - -func getCounts_{{.Type}}(T []{{.Type}}, C []int, n, k int) { - var i int - for i = 0; i < k; i++ { - C[i] = 0 - } - for i = 0; i < n; i++ { - C[T[i]]++ - } -} - -func getBuckets_{{.Type}}(C, B []int, k int, end bool) { - var i, sum int - if end { - for i = 0; i < k; i++ { - sum += C[i] - B[i] = sum - } - } else { - for i = 0; i < k; i++ { - sum += C[i] - B[i] = sum - C[i] - } - } -} - -func sortLMS1_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) { - var b, i, j int - var c0, c1 int - - // Compute SAl. - if &C[0] == &B[0] { - getCounts_{{.Type}}(T, C, n, k) - } - getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets - j = n - 1 - c1 = int(T[j]) - b = B[c1] - j-- - if int(T[j]) < c1 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - for i = 0; i < n; i++ { - if j = SA[i]; j > 0 { - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - j-- - if int(T[j]) < c1 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - SA[i] = 0 - } else if j < 0 { - SA[i] = ^j - } - } - - // Compute SAs. - if &C[0] == &B[0] { - getCounts_{{.Type}}(T, C, n, k) - } - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - c1 = 0 - b = B[c1] - for i = n - 1; i >= 0; i-- { - if j = SA[i]; j > 0 { - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - j-- - b-- - if int(T[j]) > c1 { - SA[b] = ^(j + 1) - } else { - SA[b] = j - } - SA[i] = 0 - } - } -} - -func postProcLMS1_{{.Type}}(T []{{.Type}}, SA []int, n, m int) int { - var i, j, p, q, plen, qlen, name int - var c0, c1 int - var diff bool - - // Compact all the sorted substrings into the first m items of SA. - // 2*m must be not larger than n (provable). - for i = 0; SA[i] < 0; i++ { - SA[i] = ^SA[i] - } - if i < m { - for j, i = i, i+1; ; i++ { - if p = SA[i]; p < 0 { - SA[j] = ^p - j++ - SA[i] = 0 - if j == m { - break - } - } - } - } - - // Store the length of all substrings. - i = n - 1 - j = n - 1 - c0 = int(T[n-1]) - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - for i >= 0 { - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 > c1 { - break - } - } - if i >= 0 { - SA[m+((i+1)>>1)] = j - i - j = i + 1 - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - } - } - - // Find the lexicographic names of all substrings. - name = 0 - qlen = 0 - for i, q = 0, n; i < m; i++ { - p = SA[i] - plen = SA[m+(p>>1)] - diff = true - if (plen == qlen) && ((q + plen) < n) { - for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ { - } - if j == plen { - diff = false - } - } - if diff { - name++ - q = p - qlen = plen - } - SA[m+(p>>1)] = name - } - return name -} - -func sortLMS2_{{.Type}}(T []{{.Type}}, SA, C, B, D []int, n, k int) { - var b, i, j, t, d int - var c0, c1 int - - // Compute SAl. - getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets - j = n - 1 - c1 = int(T[j]) - b = B[c1] - j-- - if int(T[j]) < c1 { - t = 1 - } else { - t = 0 - } - j += n - if t&1 > 0 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - for i, d = 0, 0; i < n; i++ { - if j = SA[i]; j > 0 { - if n <= j { - d += 1 - j -= n - } - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - j-- - t = int(c0) << 1 - if int(T[j]) < c1 { - t |= 1 - } - if D[t] != d { - j += n - D[t] = d - } - if t&1 > 0 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - SA[i] = 0 - } else if j < 0 { - SA[i] = ^j - } - } - for i = n - 1; 0 <= i; i-- { - if SA[i] > 0 { - if SA[i] < n { - SA[i] += n - for j = i - 1; SA[j] < n; j-- { - } - SA[j] -= n - i = j - } - } - } - - // Compute SAs. - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - c1 = 0 - b = B[c1] - for i, d = n-1, d+1; i >= 0; i-- { - if j = SA[i]; j > 0 { - if n <= j { - d += 1 - j -= n - } - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - j-- - t = int(c0) << 1 - if int(T[j]) > c1 { - t |= 1 - } - if D[t] != d { - j += n - D[t] = d - } - b-- - if t&1 > 0 { - SA[b] = ^(j + 1) - } else { - SA[b] = j - } - SA[i] = 0 - } - } -} - -func postProcLMS2_{{.Type}}(SA []int, n, m int) int { - var i, j, d, name int - - // Compact all the sorted LMS substrings into the first m items of SA. - name = 0 - for i = 0; SA[i] < 0; i++ { - j = ^SA[i] - if n <= j { - name += 1 - } - SA[i] = j - } - if i < m { - for d, i = i, i+1; ; i++ { - if j = SA[i]; j < 0 { - j = ^j - if n <= j { - name += 1 - } - SA[d] = j - d++ - SA[i] = 0 - if d == m { - break - } - } - } - } - if name < m { - // Store the lexicographic names. - for i, d = m-1, name+1; 0 <= i; i-- { - if j = SA[i]; n <= j { - j -= n - d-- - } - SA[m+(j>>1)] = d - } - } else { - // Unset flags. - for i = 0; i < m; i++ { - if j = SA[i]; n <= j { - j -= n - SA[i] = j - } - } - } - return name -} - -func induceSA_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) { - var b, i, j int - var c0, c1 int - - // Compute SAl. - if &C[0] == &B[0] { - getCounts_{{.Type}}(T, C, n, k) - } - getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets - j = n - 1 - c1 = int(T[j]) - b = B[c1] - if j > 0 && int(T[j-1]) < c1 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - for i = 0; i < n; i++ { - j = SA[i] - SA[i] = ^j - if j > 0 { - j-- - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - if j > 0 && int(T[j-1]) < c1 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - } - } - - // Compute SAs. - if &C[0] == &B[0] { - getCounts_{{.Type}}(T, C, n, k) - } - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - c1 = 0 - b = B[c1] - for i = n - 1; i >= 0; i-- { - if j = SA[i]; j > 0 { - j-- - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - b-- - if (j == 0) || (int(T[j-1]) > c1) { - SA[b] = ^j - } else { - SA[b] = j - } - } else { - SA[i] = ^j - } - } -} - -func computeSA_{{.Type}}(T []{{.Type}}, SA []int, fs, n, k int) { - const ( - minBucketSize = 512 - sortLMS2Limit = 0x3fffffff - ) - - var C, B, D, RA []int - var bo int // Offset of B relative to SA - var b, i, j, m, p, q, name, newfs int - var c0, c1 int - var flags uint - - if k <= minBucketSize { - C = make([]int, k) - if k <= fs { - bo = n + fs - k - B = SA[bo:] - flags = 1 - } else { - B = make([]int, k) - flags = 3 - } - } else if k <= fs { - C = SA[n+fs-k:] - if k <= fs-k { - bo = n + fs - 2*k - B = SA[bo:] - flags = 0 - } else if k <= 4*minBucketSize { - B = make([]int, k) - flags = 2 - } else { - B = C - flags = 8 - } - } else { - C = make([]int, k) - B = C - flags = 4 | 8 - } - if n <= sortLMS2Limit && 2 <= (n/k) { - if flags&1 > 0 { - if 2*k <= fs-k { - flags |= 32 - } else { - flags |= 16 - } - } else if flags == 0 && 2*k <= (fs-2*k) { - flags |= 32 - } - } - - // Stage 1: Reduce the problem by at least 1/2. - // Sort all the LMS-substrings. - getCounts_{{.Type}}(T, C, n, k) - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - for i = 0; i < n; i++ { - SA[i] = 0 - } - b = -1 - i = n - 1 - j = n - m = 0 - c0 = int(T[n-1]) - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - for i >= 0 { - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 > c1 { - break - } - } - if i >= 0 { - if b >= 0 { - SA[b] = j - } - B[c1]-- - b = B[c1] - j = i - m++ - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - } - } - - if m > 1 { - if flags&(16|32) > 0 { - if flags&16 > 0 { - D = make([]int, 2*k) - } else { - D = SA[bo-2*k:] - } - B[T[j+1]]++ - for i, j = 0, 0; i < k; i++ { - j += C[i] - if B[i] != j { - SA[B[i]] += n - } - D[i] = 0 - D[i+k] = 0 - } - sortLMS2_{{.Type}}(T, SA, C, B, D, n, k) - name = postProcLMS2_{{.Type}}(SA, n, m) - } else { - sortLMS1_{{.Type}}(T, SA, C, B, n, k) - name = postProcLMS1_{{.Type}}(T, SA, n, m) - } - } else if m == 1 { - SA[b] = j + 1 - name = 1 - } else { - name = 0 - } - - // Stage 2: Solve the reduced problem. - // Recurse if names are not yet unique. - if name < m { - newfs = n + fs - 2*m - if flags&(1|4|8) == 0 { - if k+name <= newfs { - newfs -= k - } else { - flags |= 8 - } - } - RA = SA[m+newfs:] - for i, j = m+(n>>1)-1, m-1; m <= i; i-- { - if SA[i] != 0 { - RA[j] = SA[i] - 1 - j-- - } - } - computeSA_int(RA, SA, newfs, m, name) - - i = n - 1 - j = m - 1 - c0 = int(T[n-1]) - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - for i >= 0 { - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 > c1 { - break - } - } - if i >= 0 { - RA[j] = i + 1 - j-- - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - } - } - for i = 0; i < m; i++ { - SA[i] = RA[SA[i]] - } - if flags&4 > 0 { - B = make([]int, k) - C = B - } - if flags&2 > 0 { - B = make([]int, k) - } - } - - // Stage 3: Induce the result for the original problem. - if flags&8 > 0 { - getCounts_{{.Type}}(T, C, n, k) - } - // Put all left-most S characters into their buckets. - if m > 1 { - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - i = m - 1 - j = n - p = SA[m-1] - c1 = int(T[p]) - for { - c0 = c1 - q = B[c0] - for q < j { - j-- - SA[j] = 0 - } - for { - j-- - SA[j] = p - if i--; i < 0 { - break - } - p = SA[i] - if c1 = int(T[p]); c1 != c0 { - break - } - } - if i < 0 { - break - } - } - for j > 0 { - j-- - SA[j] = 0 - } - } - induceSA_{{.Type}}(T, SA, C, B, n, k) -} -` diff --git a/vendor/github.com/google/go-github/github/gen-accessors.go b/vendor/github.com/google/go-github/github/gen-accessors.go deleted file mode 100644 index fe92206fcf86..000000000000 --- a/vendor/github.com/google/go-github/github/gen-accessors.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// gen-accessors generates accessor methods for structs with pointer fields. -// -// It is meant to be used by the go-github authors in conjunction with the -// go generate tool before sending a commit to GitHub. -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "sort" - "strings" - "text/template" -) - -const ( - fileSuffix = "-accessors.go" -) - -var ( - verbose = flag.Bool("v", false, "Print verbose log messages") - - sourceTmpl = template.Must(template.New("source").Parse(source)) - - // blacklistStructMethod lists "struct.method" combos to skip. - blacklistStructMethod = map[string]bool{ - "RepositoryContent.GetContent": true, - "Client.GetBaseURL": true, - "Client.GetUploadURL": true, - "ErrorResponse.GetResponse": true, - "RateLimitError.GetResponse": true, - "AbuseRateLimitError.GetResponse": true, - } - // blacklistStruct lists structs to skip. - blacklistStruct = map[string]bool{ - "Client": true, - } -) - -func logf(fmt string, args ...interface{}) { - if *verbose { - log.Printf(fmt, args...) - } -} - -func main() { - flag.Parse() - fset := token.NewFileSet() - - pkgs, err := parser.ParseDir(fset, ".", sourceFilter, 0) - if err != nil { - log.Fatal(err) - return - } - - for pkgName, pkg := range pkgs { - t := &templateData{ - filename: pkgName + fileSuffix, - Year: 2017, - Package: pkgName, - Imports: map[string]string{}, - } - for filename, f := range pkg.Files { - logf("Processing %v...", filename) - if err := t.processAST(f); err != nil { - log.Fatal(err) - } - } - if err := t.dump(); err != nil { - log.Fatal(err) - } - } - logf("Done.") -} - -func (t *templateData) processAST(f *ast.File) error { - for _, decl := range f.Decls { - gd, ok := decl.(*ast.GenDecl) - if !ok { - continue - } - for _, spec := range gd.Specs { - ts, ok := spec.(*ast.TypeSpec) - if !ok { - continue - } - // Skip unexported identifiers. - if !ts.Name.IsExported() { - logf("Struct %v is unexported; skipping.", ts.Name) - continue - } - // Check if the struct is blacklisted. - if blacklistStruct[ts.Name.Name] { - logf("Struct %v is blacklisted; skipping.", ts.Name) - continue - } - st, ok := ts.Type.(*ast.StructType) - if !ok { - continue - } - for _, field := range st.Fields.List { - se, ok := field.Type.(*ast.StarExpr) - if len(field.Names) == 0 || !ok { - continue - } - - fieldName := field.Names[0] - // Skip unexported identifiers. - if !fieldName.IsExported() { - logf("Field %v is unexported; skipping.", fieldName) - continue - } - // Check if "struct.method" is blacklisted. - if key := fmt.Sprintf("%v.Get%v", ts.Name, fieldName); blacklistStructMethod[key] { - logf("Method %v is blacklisted; skipping.", key) - continue - } - - switch x := se.X.(type) { - case *ast.ArrayType: - t.addArrayType(x, ts.Name.String(), fieldName.String()) - case *ast.Ident: - t.addIdent(x, ts.Name.String(), fieldName.String()) - case *ast.MapType: - t.addMapType(x, ts.Name.String(), fieldName.String()) - case *ast.SelectorExpr: - t.addSelectorExpr(x, ts.Name.String(), fieldName.String()) - default: - logf("processAST: type %q, field %q, unknown %T: %+v", ts.Name, fieldName, x, x) - } - } - } - } - return nil -} - -func sourceFilter(fi os.FileInfo) bool { - return !strings.HasSuffix(fi.Name(), "_test.go") && !strings.HasSuffix(fi.Name(), fileSuffix) -} - -func (t *templateData) dump() error { - if len(t.Getters) == 0 { - logf("No getters for %v; skipping.", t.filename) - return nil - } - - // Sort getters by ReceiverType.FieldName. - sort.Sort(byName(t.Getters)) - - var buf bytes.Buffer - if err := sourceTmpl.Execute(&buf, t); err != nil { - return err - } - clean, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - - logf("Writing %v...", t.filename) - return ioutil.WriteFile(t.filename, clean, 0644) -} - -func newGetter(receiverType, fieldName, fieldType, zeroValue string, namedStruct bool) *getter { - return &getter{ - sortVal: strings.ToLower(receiverType) + "." + strings.ToLower(fieldName), - ReceiverVar: strings.ToLower(receiverType[:1]), - ReceiverType: receiverType, - FieldName: fieldName, - FieldType: fieldType, - ZeroValue: zeroValue, - NamedStruct: namedStruct, - } -} - -func (t *templateData) addArrayType(x *ast.ArrayType, receiverType, fieldName string) { - var eltType string - switch elt := x.Elt.(type) { - case *ast.Ident: - eltType = elt.String() - default: - logf("addArrayType: type %q, field %q: unknown elt type: %T %+v; skipping.", receiverType, fieldName, elt, elt) - return - } - - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, "[]"+eltType, "nil", false)) -} - -func (t *templateData) addIdent(x *ast.Ident, receiverType, fieldName string) { - var zeroValue string - var namedStruct = false - switch x.String() { - case "int", "int64": - zeroValue = "0" - case "string": - zeroValue = `""` - case "bool": - zeroValue = "false" - case "Timestamp": - zeroValue = "Timestamp{}" - default: - zeroValue = "nil" - namedStruct = true - } - - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, x.String(), zeroValue, namedStruct)) -} - -func (t *templateData) addMapType(x *ast.MapType, receiverType, fieldName string) { - var keyType string - switch key := x.Key.(type) { - case *ast.Ident: - keyType = key.String() - default: - logf("addMapType: type %q, field %q: unknown key type: %T %+v; skipping.", receiverType, fieldName, key, key) - return - } - - var valueType string - switch value := x.Value.(type) { - case *ast.Ident: - valueType = value.String() - default: - logf("addMapType: type %q, field %q: unknown value type: %T %+v; skipping.", receiverType, fieldName, value, value) - return - } - - fieldType := fmt.Sprintf("map[%v]%v", keyType, valueType) - zeroValue := fmt.Sprintf("map[%v]%v{}", keyType, valueType) - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false)) -} - -func (t *templateData) addSelectorExpr(x *ast.SelectorExpr, receiverType, fieldName string) { - if strings.ToLower(fieldName[:1]) == fieldName[:1] { // Non-exported field. - return - } - - var xX string - if xx, ok := x.X.(*ast.Ident); ok { - xX = xx.String() - } - - switch xX { - case "time", "json": - if xX == "json" { - t.Imports["encoding/json"] = "encoding/json" - } else { - t.Imports[xX] = xX - } - fieldType := fmt.Sprintf("%v.%v", xX, x.Sel.Name) - zeroValue := fmt.Sprintf("%v.%v{}", xX, x.Sel.Name) - if xX == "time" && x.Sel.Name == "Duration" { - zeroValue = "0" - } - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false)) - default: - logf("addSelectorExpr: xX %q, type %q, field %q: unknown x=%+v; skipping.", xX, receiverType, fieldName, x) - } -} - -type templateData struct { - filename string - Year int - Package string - Imports map[string]string - Getters []*getter -} - -type getter struct { - sortVal string // Lower-case version of "ReceiverType.FieldName". - ReceiverVar string // The one-letter variable name to match the ReceiverType. - ReceiverType string - FieldName string - FieldType string - ZeroValue string - NamedStruct bool // Getter for named struct. -} - -type byName []*getter - -func (b byName) Len() int { return len(b) } -func (b byName) Less(i, j int) bool { return b[i].sortVal < b[j].sortVal } -func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -const source = `// Copyright {{.Year}} The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by gen-accessors; DO NOT EDIT. - -package {{.Package}} -{{with .Imports}} -import ( - {{- range . -}} - "{{.}}" - {{end -}} -) -{{end}} -{{range .Getters}} -{{if .NamedStruct}} -// Get{{.FieldName}} returns the {{.FieldName}} field. -func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() *{{.FieldType}} { - if {{.ReceiverVar}} == nil { - return {{.ZeroValue}} - } - return {{.ReceiverVar}}.{{.FieldName}} -} -{{else}} -// Get{{.FieldName}} returns the {{.FieldName}} field if it's non-nil, zero value otherwise. -func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() {{.FieldType}} { - if {{.ReceiverVar}} == nil || {{.ReceiverVar}}.{{.FieldName}} == nil { - return {{.ZeroValue}} - } - return *{{.ReceiverVar}}.{{.FieldName}} -} -{{end}} -{{end}} -` diff --git a/vendor/github.com/hashicorp/consul-template/LICENSE b/vendor/github.com/hashicorp/consul-template/LICENSE new file mode 100644 index 000000000000..82b4de97c7e3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/consul-template/child/child.go b/vendor/github.com/hashicorp/consul-template/child/child.go new file mode 100644 index 000000000000..3c94816f596b --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/child/child.go @@ -0,0 +1,428 @@ +package child + +import ( + "errors" + "fmt" + "io" + "log" + "math/rand" + "os" + "os/exec" + "strings" + "sync" + "syscall" + "time" +) + +func init() { + // Seed the default rand Source with current time to produce better random + // numbers used with splay + rand.Seed(time.Now().UnixNano()) +} + +var ( + // ErrMissingCommand is the error returned when no command is specified + // to run. + ErrMissingCommand = errors.New("missing command") + + // ExitCodeOK is the default OK exit code. + ExitCodeOK = 0 + + // ExitCodeError is the default error code returned when the child exits with + // an error without a more specific code. + ExitCodeError = 127 +) + +// Child is a wrapper around a child process which can be used to send signals +// and manage the processes' lifecycle. +type Child struct { + sync.RWMutex + + stdin io.Reader + stdout, stderr io.Writer + command string + args []string + env []string + + timeout time.Duration + + reloadSignal os.Signal + + killSignal os.Signal + killTimeout time.Duration + + splay time.Duration + + // cmd is the actual child process under management. + cmd *exec.Cmd + + // exitCh is the channel where the processes exit will be returned. + exitCh chan int + + // stopLock is the mutex to lock when stopping. stopCh is the circuit breaker + // to force-terminate any waiting splays to kill the process now. stopped is + // a boolean that tells us if we have previously been stopped. + stopLock sync.RWMutex + stopCh chan struct{} + stopped bool +} + +// NewInput is input to the NewChild function. +type NewInput struct { + // Stdin is the io.Reader where input will come from. This is sent directly to + // the child process. Stdout and Stderr represent the io.Writer objects where + // the child process will send output and errorput. + Stdin io.Reader + Stdout, Stderr io.Writer + + // Command is the name of the command to execute. Args are the list of + // arguments to pass when starting the command. + Command string + Args []string + + // Timeout is the maximum amount of time to allow the command to execute. If + // set to 0, the command is permitted to run infinitely. + Timeout time.Duration + + // Env represents the condition of the child processes' environment + // variables. Only these environment variables will be given to the child, so + // it is the responsibility of the caller to include the parent processes + // environment, if required. This should be in the key=value format. + Env []string + + // ReloadSignal is the signal to send to reload this process. This value may + // be nil. + ReloadSignal os.Signal + + // KillSignal is the signal to send to gracefully kill this process. This + // value may be nil. + KillSignal os.Signal + + // KillTimeout is the amount of time to wait for the process to gracefully + // terminate before force-killing. + KillTimeout time.Duration + + // Splay is the maximum random amount of time to wait before sending signals. + // This option helps reduce the thundering herd problem by effectively + // sleeping for a random amount of time before sending the signal. This + // prevents multiple processes from all signaling at the same time. This value + // may be zero (which disables the splay entirely). + Splay time.Duration +} + +// New creates a new child process for management with high-level APIs for +// sending signals to the child process, restarting the child process, and +// gracefully terminating the child process. +func New(i *NewInput) (*Child, error) { + if i == nil { + i = new(NewInput) + } + + if len(i.Command) == 0 { + return nil, ErrMissingCommand + } + + child := &Child{ + stdin: i.Stdin, + stdout: i.Stdout, + stderr: i.Stderr, + command: i.Command, + args: i.Args, + env: i.Env, + timeout: i.Timeout, + reloadSignal: i.ReloadSignal, + killSignal: i.KillSignal, + killTimeout: i.KillTimeout, + splay: i.Splay, + stopCh: make(chan struct{}, 1), + } + + return child, nil +} + +// ExitCh returns the current exit channel for this child process. This channel +// may change if the process is restarted, so implementers must not cache this +// value. +func (c *Child) ExitCh() <-chan int { + c.RLock() + defer c.RUnlock() + return c.exitCh +} + +// Pid returns the pid of the child process. If no child process exists, 0 is +// returned. +func (c *Child) Pid() int { + c.RLock() + defer c.RUnlock() + return c.pid() +} + +// Command returns the human-formatted command with arguments. +func (c *Child) Command() string { + list := append([]string{c.command}, c.args...) + return strings.Join(list, " ") +} + +// Start starts and begins execution of the child process. A buffered channel +// is returned which is where the command's exit code will be returned upon +// exit. Any errors that occur prior to starting the command will be returned +// as the second error argument, but any errors returned by the command after +// execution will be returned as a non-zero value over the exit code channel. +func (c *Child) Start() error { + log.Printf("[INFO] (child) spawning: %s", c.Command()) + c.Lock() + defer c.Unlock() + return c.start() +} + +// Signal sends the signal to the child process, returning any errors that +// occur. +func (c *Child) Signal(s os.Signal) error { + log.Printf("[INFO] (child) receiving signal %q", s.String()) + c.RLock() + defer c.RUnlock() + return c.signal(s) +} + +// Reload sends the reload signal to the child process and does not wait for a +// response. If no reload signal was provided, the process is restarted and +// replaces the process attached to this Child. +func (c *Child) Reload() error { + if c.reloadSignal == nil { + log.Printf("[INFO] (child) restarting process") + + // Take a full lock because start is going to replace the process. We also + // want to make sure that no other routines attempt to send reload signals + // during this transition. + c.Lock() + defer c.Unlock() + + c.kill(false) + return c.start() + } + + log.Printf("[INFO] (child) reloading process") + + // We only need a read lock here because neither the process nor the exit + // channel are changing. + c.RLock() + defer c.RUnlock() + + return c.reload() +} + +// Kill sends the kill signal to the child process and waits for successful +// termination. If no kill signal is defined, the process is killed with the +// most aggressive kill signal. If the process does not gracefully stop within +// the provided KillTimeout, the process is force-killed. If a splay was +// provided, this function will sleep for a random period of time between 0 and +// the provided splay value to reduce the thundering herd problem. This function +// does not return any errors because it guarantees the process will be dead by +// the return of the function call. +func (c *Child) Kill() { + log.Printf("[INFO] (child) killing process") + c.Lock() + defer c.Unlock() + c.kill(false) +} + +// Stop behaves almost identical to Kill except it suppresses future processes +// from being started by this child and it prevents the killing of the child +// process from sending its value back up the exit channel. This is useful +// when doing a graceful shutdown of an application. +func (c *Child) Stop() { + c.internalStop(false) +} + +// StopImmediately behaves almost identical to Stop except it does not wait +// for any random splay if configured. This is used for performing a fast +// shutdown of consul-template and its children when a kill signal is received. +func (c *Child) StopImmediately() { + c.internalStop(true) +} + +func (c *Child) internalStop(immediately bool) { + log.Printf("[INFO] (child) stopping process") + + c.Lock() + defer c.Unlock() + + c.stopLock.Lock() + defer c.stopLock.Unlock() + if c.stopped { + log.Printf("[WARN] (child) already stopped") + return + } + c.kill(immediately) + close(c.stopCh) + c.stopped = true +} + +func (c *Child) start() error { + cmd := exec.Command(c.command, c.args...) + cmd.Stdin = c.stdin + cmd.Stdout = c.stdout + cmd.Stderr = c.stderr + cmd.Env = c.env + if err := cmd.Start(); err != nil { + return err + } + c.cmd = cmd + + // Create a new exitCh so that previously invoked commands (if any) don't + // cause us to exit, and start a goroutine to wait for that process to end. + exitCh := make(chan int, 1) + go func() { + var code int + err := cmd.Wait() + if err == nil { + code = ExitCodeOK + } else { + code = ExitCodeError + if exiterr, ok := err.(*exec.ExitError); ok { + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + code = status.ExitStatus() + } + } + } + + // If the child is in the process of killing, do not send a response back + // down the exit channel. + c.stopLock.RLock() + defer c.stopLock.RUnlock() + if c.stopped { + return + } + + select { + case <-c.stopCh: + case exitCh <- code: + } + }() + + c.exitCh = exitCh + + // If a timeout was given, start the timer to wait for the child to exit + if c.timeout != 0 { + select { + case code := <-exitCh: + if code != 0 { + return fmt.Errorf( + "command exited with a non-zero exit status:\n"+ + "\n"+ + " %s\n"+ + "\n"+ + "This is assumed to be a failure. Please ensure the command\n"+ + "exits with a zero exit status.", + c.Command(), + ) + } + case <-time.After(c.timeout): + // Force-kill the process + c.stopLock.Lock() + defer c.stopLock.Unlock() + if c.cmd != nil && c.cmd.Process != nil { + c.cmd.Process.Kill() + } + + return fmt.Errorf( + "command did not exit within %q:\n"+ + "\n"+ + " %s\n"+ + "\n"+ + "Commands must exit in a timely manner in order for processing to\n"+ + "continue. Consider using a process supervisor or utilizing the\n"+ + "built-in exec mode instead.", + c.timeout, + c.Command(), + ) + } + } + + return nil +} + +func (c *Child) pid() int { + if !c.running() { + return 0 + } + return c.cmd.Process.Pid +} + +func (c *Child) signal(s os.Signal) error { + if !c.running() { + return nil + } + return c.cmd.Process.Signal(s) +} + +func (c *Child) reload() error { + select { + case <-c.stopCh: + case <-c.randomSplay(): + } + + return c.signal(c.reloadSignal) +} + +func (c *Child) kill(immediately bool) { + if !c.running() { + return + } + + exited := false + process := c.cmd.Process + + if c.cmd.ProcessState != nil { + log.Printf("[DEBUG] (child) Kill() called but process dead; not waiting for splay.") + } else if immediately { + log.Printf("[DEBUG] (child) Kill() called but performing immediate shutdown; not waiting for splay.") + } else { + select { + case <-c.stopCh: + case <-c.randomSplay(): + } + } + + if c.killSignal != nil { + if err := process.Signal(c.killSignal); err == nil { + // Wait a few seconds for it to exit + killCh := make(chan struct{}, 1) + go func() { + defer close(killCh) + process.Wait() + }() + + select { + case <-c.stopCh: + case <-killCh: + exited = true + case <-time.After(c.killTimeout): + } + } + } + + if !exited { + process.Kill() + } + + c.cmd = nil +} + +func (c *Child) running() bool { + return c.cmd != nil && c.cmd.Process != nil +} + +func (c *Child) randomSplay() <-chan time.Time { + if c.splay == 0 { + return time.After(0) + } + + ns := c.splay.Nanoseconds() + offset := rand.Int63n(ns) + t := time.Duration(offset) + + log.Printf("[DEBUG] (child) waiting %.2fs for random splay", t.Seconds()) + + return time.After(t) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/auth.go b/vendor/github.com/hashicorp/consul-template/config/auth.go new file mode 100644 index 000000000000..207c78136db9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/auth.go @@ -0,0 +1,142 @@ +package config + +import ( + "errors" + "fmt" + "strings" +) + +var ( + // ErrAuthStringEmpty is the error returned with authentication is provided, + // but empty. + ErrAuthStringEmpty = errors.New("auth: cannot be empty") +) + +// AuthConfig is the HTTP basic authentication data. +type AuthConfig struct { + Enabled *bool `mapstructure:"enabled"` + Username *string `mapstructure:"username"` + Password *string `mapstructure:"password"` +} + +// DefaultAuthConfig is the default configuration. +func DefaultAuthConfig() *AuthConfig { + return &AuthConfig{} +} + +// ParseAuthConfig parses the auth into username:password. +func ParseAuthConfig(s string) (*AuthConfig, error) { + if s == "" { + return nil, ErrAuthStringEmpty + } + + var a AuthConfig + + if strings.Contains(s, ":") { + split := strings.SplitN(s, ":", 2) + a.Username = String(split[0]) + a.Password = String(split[1]) + } else { + a.Username = String(s) + } + + return &a, nil +} + +// Copy returns a deep copy of this configuration. +func (c *AuthConfig) Copy() *AuthConfig { + if c == nil { + return nil + } + + var o AuthConfig + o.Enabled = c.Enabled + o.Username = c.Username + o.Password = c.Password + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *AuthConfig) Merge(o *AuthConfig) *AuthConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Username != nil { + r.Username = o.Username + } + + if o.Password != nil { + r.Password = o.Password + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *AuthConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(false || + StringPresent(c.Username) || + StringPresent(c.Password)) + } + if c.Username == nil { + c.Username = String("") + } + + if c.Password == nil { + c.Password = String("") + } + + if c.Enabled == nil { + c.Enabled = Bool(*c.Username != "" || *c.Password != "") + } +} + +// GoString defines the printable version of this struct. +func (c *AuthConfig) GoString() string { + if c == nil { + return "(*AuthConfig)(nil)" + } + + return fmt.Sprintf("&AuthConfig{"+ + "Enabled:%s, "+ + "Username:%s, "+ + "Password:%s"+ + "}", + BoolGoString(c.Enabled), + StringGoString(c.Username), + StringGoString(c.Password), + ) +} + +// String is the string representation of this authentication. If authentication +// is not enabled, this returns the empty string. The username and password will +// be separated by a colon. +func (c *AuthConfig) String() string { + if !BoolVal(c.Enabled) { + return "" + } + + if c.Password != nil { + return fmt.Sprintf("%s:%s", StringVal(c.Username), StringVal(c.Password)) + } + + return StringVal(c.Username) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/config.go b/vendor/github.com/hashicorp/consul-template/config/config.go new file mode 100644 index 000000000000..b027040937b9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/config.go @@ -0,0 +1,606 @@ +package config + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "syscall" + "time" + + "github.com/hashicorp/consul-template/signals" + "github.com/hashicorp/hcl" + homedir "github.com/mitchellh/go-homedir" + "github.com/mitchellh/mapstructure" + + "github.com/pkg/errors" +) + +const ( + // DefaultLogLevel is the default logging level. + DefaultLogLevel = "WARN" + + // DefaultMaxStale is the default staleness permitted. This enables stale + // queries by default for performance reasons. + DefaultMaxStale = 2 * time.Second + + // DefaultReloadSignal is the default signal for reload. + DefaultReloadSignal = syscall.SIGHUP + + // DefaultKillSignal is the default signal for termination. + DefaultKillSignal = syscall.SIGINT +) + +var ( + // homePath is the location to the user's home directory. + homePath, _ = homedir.Dir() +) + +// Config is used to configure Consul Template +type Config struct { + // Consul is the configuration for connecting to a Consul cluster. + Consul *ConsulConfig `mapstructure:"consul"` + + // Dedup is used to configure the dedup settings + Dedup *DedupConfig `mapstructure:"deduplicate"` + + // Exec is the configuration for exec/supervise mode. + Exec *ExecConfig `mapstructure:"exec"` + + // KillSignal is the signal to listen for a graceful terminate event. + KillSignal *os.Signal `mapstructure:"kill_signal"` + + // LogLevel is the level with which to log for this config. + LogLevel *string `mapstructure:"log_level"` + + // MaxStale is the maximum amount of time for staleness from Consul as given + // by LastContact. If supplied, Consul Template will query all servers instead + // of just the leader. + MaxStale *time.Duration `mapstructure:"max_stale"` + + // PidFile is the path on disk where a PID file should be written containing + // this processes PID. + PidFile *string `mapstructure:"pid_file"` + + // ReloadSignal is the signal to listen for a reload event. + ReloadSignal *os.Signal `mapstructure:"reload_signal"` + + // Syslog is the configuration for syslog. + Syslog *SyslogConfig `mapstructure:"syslog"` + + // Templates is the list of templates. + Templates *TemplateConfigs `mapstructure:"template"` + + // Vault is the configuration for connecting to a vault server. + Vault *VaultConfig `mapstructure:"vault"` + + // Wait is the quiescence timers. + Wait *WaitConfig `mapstructure:"wait"` + + // Additional command line options + // Run once, executing each template exactly once, and exit + Once bool +} + +// Copy returns a deep copy of the current configuration. This is useful because +// the nested data structures may be shared. +func (c *Config) Copy() *Config { + if c == nil { + return nil + } + var o Config + + o.Consul = c.Consul + + if c.Consul != nil { + o.Consul = c.Consul.Copy() + } + + if c.Dedup != nil { + o.Dedup = c.Dedup.Copy() + } + + if c.Exec != nil { + o.Exec = c.Exec.Copy() + } + + o.KillSignal = c.KillSignal + + o.LogLevel = c.LogLevel + + o.MaxStale = c.MaxStale + + o.PidFile = c.PidFile + + o.ReloadSignal = c.ReloadSignal + + if c.Syslog != nil { + o.Syslog = c.Syslog.Copy() + } + + if c.Templates != nil { + o.Templates = c.Templates.Copy() + } + + if c.Vault != nil { + o.Vault = c.Vault.Copy() + } + + if c.Wait != nil { + o.Wait = c.Wait.Copy() + } + + o.Once = c.Once + + return &o +} + +// Merge merges the values in config into this config object. Values in the +// config object overwrite the values in c. +func (c *Config) Merge(o *Config) *Config { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Consul != nil { + r.Consul = r.Consul.Merge(o.Consul) + } + + if o.Dedup != nil { + r.Dedup = r.Dedup.Merge(o.Dedup) + } + + if o.Exec != nil { + r.Exec = r.Exec.Merge(o.Exec) + } + + if o.KillSignal != nil { + r.KillSignal = o.KillSignal + } + + if o.LogLevel != nil { + r.LogLevel = o.LogLevel + } + + if o.MaxStale != nil { + r.MaxStale = o.MaxStale + } + + if o.PidFile != nil { + r.PidFile = o.PidFile + } + + if o.ReloadSignal != nil { + r.ReloadSignal = o.ReloadSignal + } + + if o.Syslog != nil { + r.Syslog = r.Syslog.Merge(o.Syslog) + } + + if o.Templates != nil { + r.Templates = r.Templates.Merge(o.Templates) + } + + if o.Vault != nil { + r.Vault = r.Vault.Merge(o.Vault) + } + + if o.Wait != nil { + r.Wait = r.Wait.Merge(o.Wait) + } + + r.Once = o.Once + + return r +} + +// Parse parses the given string contents as a config +func Parse(s string) (*Config, error) { + var shadow interface{} + if err := hcl.Decode(&shadow, s); err != nil { + return nil, errors.Wrap(err, "error decoding config") + } + + // Convert to a map and flatten the keys we want to flatten + parsed, ok := shadow.(map[string]interface{}) + if !ok { + return nil, errors.New("error converting config") + } + + flattenKeys(parsed, []string{ + "auth", + "consul", + "consul.auth", + "consul.retry", + "consul.ssl", + "consul.transport", + "deduplicate", + "env", + "exec", + "exec.env", + "ssl", + "syslog", + "vault", + "vault.retry", + "vault.ssl", + "vault.transport", + "wait", + }) + + // FlattenFlatten keys belonging to the templates. We cannot do this above + // because it is an array of templates. + if templates, ok := parsed["template"].([]map[string]interface{}); ok { + for _, template := range templates { + flattenKeys(template, []string{ + "env", + "exec", + "exec.env", + "wait", + }) + } + } + + // Create a new, empty config + var c Config + + // Use mapstructure to populate the basic config fields + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + ConsulStringToStructFunc(), + StringToFileModeFunc(), + signals.StringToSignalFunc(), + StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ), + ErrorUnused: true, + Metadata: &md, + Result: &c, + }) + if err != nil { + return nil, errors.Wrap(err, "mapstructure decoder creation failed") + } + if err := decoder.Decode(parsed); err != nil { + return nil, errors.Wrap(err, "mapstructure decode failed") + } + + return &c, nil +} + +// Must returns a config object that must compile. If there are any errors, this +// function will panic. This is most useful in testing or constants. +func Must(s string) *Config { + c, err := Parse(s) + if err != nil { + log.Fatal(err) + } + return c +} + +// TestConfig returns a default, finalized config, with the provided +// configuration taking precedence. +func TestConfig(c *Config) *Config { + d := DefaultConfig().Merge(c) + d.Finalize() + return d +} + +// FromFile reads the configuration file at the given path and returns a new +// Config struct with the data populated. +func FromFile(path string) (*Config, error) { + c, err := ioutil.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "from file: "+path) + } + + config, err := Parse(string(c)) + if err != nil { + return nil, errors.Wrap(err, "from file: "+path) + } + return config, nil +} + +// FromPath iterates and merges all configuration files in a given +// directory, returning the resulting config. +func FromPath(path string) (*Config, error) { + // Ensure the given filepath exists + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil, errors.Wrap(err, "missing file/folder: "+path) + } + + // Check if a file was given or a path to a directory + stat, err := os.Stat(path) + if err != nil { + return nil, errors.Wrap(err, "failed stating file: "+path) + } + + // Recursively parse directories, single load files + if stat.Mode().IsDir() { + // Ensure the given filepath has at least one config file + _, err := ioutil.ReadDir(path) + if err != nil { + return nil, errors.Wrap(err, "failed listing dir: "+path) + } + + // Create a blank config to merge off of + var c *Config + + // Potential bug: Walk does not follow symlinks! + err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + // If WalkFunc had an error, just return it + if err != nil { + return err + } + + // Do nothing for directories + if info.IsDir() { + return nil + } + + // Parse and merge the config + newConfig, err := FromFile(path) + if err != nil { + return err + } + c = c.Merge(newConfig) + + return nil + }) + + if err != nil { + return nil, errors.Wrap(err, "walk error") + } + + return c, nil + } else if stat.Mode().IsRegular() { + return FromFile(path) + } + + return nil, fmt.Errorf("unknown filetype: %q", stat.Mode().String()) +} + +// GoString defines the printable version of this struct. +func (c *Config) GoString() string { + if c == nil { + return "(*Config)(nil)" + } + + return fmt.Sprintf("&Config{"+ + "Consul:%#v, "+ + "Dedup:%#v, "+ + "Exec:%#v, "+ + "KillSignal:%s, "+ + "LogLevel:%s, "+ + "MaxStale:%s, "+ + "PidFile:%s, "+ + "ReloadSignal:%s, "+ + "Syslog:%#v, "+ + "Templates:%#v, "+ + "Vault:%#v, "+ + "Wait:%#v,"+ + "Once:%#v"+ + "}", + c.Consul, + c.Dedup, + c.Exec, + SignalGoString(c.KillSignal), + StringGoString(c.LogLevel), + TimeDurationGoString(c.MaxStale), + StringGoString(c.PidFile), + SignalGoString(c.ReloadSignal), + c.Syslog, + c.Templates, + c.Vault, + c.Wait, + c.Once, + ) +} + +// Show diff between 2 Configs, useful in tests +func (expected *Config) Diff(actual *Config) string { + var b strings.Builder + fmt.Fprintf(&b, "\n") + ve := reflect.ValueOf(*expected) + va := reflect.ValueOf(*actual) + ct := ve.Type() + + for i := 0; i < ve.NumField(); i++ { + fc := ve.Field(i) + fo := va.Field(i) + if !reflect.DeepEqual(fc.Interface(), fo.Interface()) { + fmt.Fprintf(&b, "%s:\n", ct.Field(i).Name) + fmt.Fprintf(&b, "\texp: %#v\n", fc.Interface()) + fmt.Fprintf(&b, "\tact: %#v\n", fo.Interface()) + } + } + + return b.String() +} + +// DefaultConfig returns the default configuration struct. Certain environment +// variables may be set which control the values for the default configuration. +func DefaultConfig() *Config { + return &Config{ + Consul: DefaultConsulConfig(), + Dedup: DefaultDedupConfig(), + Exec: DefaultExecConfig(), + Syslog: DefaultSyslogConfig(), + Templates: DefaultTemplateConfigs(), + Vault: DefaultVaultConfig(), + Wait: DefaultWaitConfig(), + } +} + +// Finalize ensures all configuration options have the default values, so it +// is safe to dereference the pointers later down the line. It also +// intelligently tries to activate stanzas that should be "enabled" because +// data was given, but the user did not explicitly add "Enabled: true" to the +// configuration. +func (c *Config) Finalize() { + if c == nil { + return + } + if c.Consul == nil { + c.Consul = DefaultConsulConfig() + } + c.Consul.Finalize() + + if c.Dedup == nil { + c.Dedup = DefaultDedupConfig() + } + c.Dedup.Finalize() + + if c.Exec == nil { + c.Exec = DefaultExecConfig() + } + c.Exec.Finalize() + + if c.KillSignal == nil { + c.KillSignal = Signal(DefaultKillSignal) + } + + if c.LogLevel == nil { + c.LogLevel = stringFromEnv([]string{ + "CT_LOG", + "CONSUL_TEMPLATE_LOG", + }, DefaultLogLevel) + } + + if c.MaxStale == nil { + c.MaxStale = TimeDuration(DefaultMaxStale) + } + + if c.PidFile == nil { + c.PidFile = String("") + } + + if c.ReloadSignal == nil { + c.ReloadSignal = Signal(DefaultReloadSignal) + } + + if c.Syslog == nil { + c.Syslog = DefaultSyslogConfig() + } + c.Syslog.Finalize() + + if c.Templates == nil { + c.Templates = DefaultTemplateConfigs() + } + c.Templates.Finalize() + + if c.Vault == nil { + c.Vault = DefaultVaultConfig() + } + c.Vault.Finalize() + + if c.Wait == nil { + c.Wait = DefaultWaitConfig() + } + c.Wait.Finalize() + + // disable Wait if -once was specified + if c.Once { + c.Wait = &WaitConfig{Enabled: Bool(false)} + } +} + +func stringFromEnv(list []string, def string) *string { + for _, s := range list { + if v := os.Getenv(s); v != "" { + return String(strings.TrimSpace(v)) + } + } + return String(def) +} + +func stringFromFile(list []string, def string) *string { + for _, s := range list { + c, err := ioutil.ReadFile(s) + if err == nil { + return String(strings.TrimSpace(string(c))) + } + } + return String(def) +} + +func antiboolFromEnv(list []string, def bool) *bool { + for _, s := range list { + if v := os.Getenv(s); v != "" { + b, err := strconv.ParseBool(v) + if err == nil { + return Bool(!b) + } + } + } + return Bool(def) +} + +func boolFromEnv(list []string, def bool) *bool { + for _, s := range list { + if v := os.Getenv(s); v != "" { + b, err := strconv.ParseBool(v) + if err == nil { + return Bool(b) + } + } + } + return Bool(def) +} + +// flattenKeys is a function that takes a map[string]interface{} and recursively +// flattens any keys that are a []map[string]interface{} where the key is in the +// given list of keys. +func flattenKeys(m map[string]interface{}, keys []string) { + keyMap := make(map[string]struct{}) + for _, key := range keys { + keyMap[key] = struct{}{} + } + + var flatten func(map[string]interface{}, string) + flatten = func(m map[string]interface{}, parent string) { + for k, v := range m { + // Calculate the map key, since it could include a parent. + mapKey := k + if parent != "" { + mapKey = parent + "." + k + } + + if _, ok := keyMap[mapKey]; !ok { + continue + } + + switch typed := v.(type) { + case []map[string]interface{}: + if len(typed) > 0 { + last := typed[len(typed)-1] + flatten(last, mapKey) + m[k] = last + } else { + m[k] = nil + } + case map[string]interface{}: + flatten(typed, mapKey) + m[k] = typed + default: + m[k] = v + } + } + } + + flatten(m, "") +} diff --git a/vendor/github.com/hashicorp/consul-template/config/consul.go b/vendor/github.com/hashicorp/consul-template/config/consul.go new file mode 100644 index 000000000000..ca79ba8b66aa --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/consul.go @@ -0,0 +1,172 @@ +package config + +import "fmt" + +// ConsulConfig contains the configurations options for connecting to a +// Consul cluster. +type ConsulConfig struct { + // Address is the address of the Consul server. It may be an IP or FQDN. + Address *string + + // Auth is the HTTP basic authentication for communicating with Consul. + Auth *AuthConfig `mapstructure:"auth"` + + // Retry is the configuration for specifying how to behave on failure. + Retry *RetryConfig `mapstructure:"retry"` + + // SSL indicates we should use a secure connection while talking to + // Consul. This requires Consul to be configured to serve HTTPS. + SSL *SSLConfig `mapstructure:"ssl"` + + // Token is the token to communicate with Consul securely. + Token *string + + // Transport configures the low-level network connection details. + Transport *TransportConfig `mapstructure:"transport"` +} + +// DefaultConsulConfig returns a configuration that is populated with the +// default values. +func DefaultConsulConfig() *ConsulConfig { + return &ConsulConfig{ + Auth: DefaultAuthConfig(), + Retry: DefaultRetryConfig(), + SSL: DefaultSSLConfig(), + Transport: DefaultTransportConfig(), + } +} + +// Copy returns a deep copy of this configuration. +func (c *ConsulConfig) Copy() *ConsulConfig { + if c == nil { + return nil + } + + var o ConsulConfig + + o.Address = c.Address + + if c.Auth != nil { + o.Auth = c.Auth.Copy() + } + + if c.Retry != nil { + o.Retry = c.Retry.Copy() + } + + if c.SSL != nil { + o.SSL = c.SSL.Copy() + } + + o.Token = c.Token + + if c.Transport != nil { + o.Transport = c.Transport.Copy() + } + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *ConsulConfig) Merge(o *ConsulConfig) *ConsulConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Address != nil { + r.Address = o.Address + } + + if o.Auth != nil { + r.Auth = r.Auth.Merge(o.Auth) + } + + if o.Retry != nil { + r.Retry = r.Retry.Merge(o.Retry) + } + + if o.SSL != nil { + r.SSL = r.SSL.Merge(o.SSL) + } + + if o.Token != nil { + r.Token = o.Token + } + + if o.Transport != nil { + r.Transport = r.Transport.Merge(o.Transport) + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *ConsulConfig) Finalize() { + if c.Address == nil { + c.Address = stringFromEnv([]string{ + "CONSUL_HTTP_ADDR", + }, "") + } + + if c.Auth == nil { + c.Auth = DefaultAuthConfig() + } + c.Auth.Finalize() + + if c.Retry == nil { + c.Retry = DefaultRetryConfig() + } + c.Retry.Finalize() + + if c.SSL == nil { + c.SSL = DefaultSSLConfig() + } + c.SSL.Finalize() + + if c.Token == nil { + c.Token = stringFromEnv([]string{ + "CONSUL_TOKEN", + "CONSUL_HTTP_TOKEN", + }, "") + } + + if c.Transport == nil { + c.Transport = DefaultTransportConfig() + } + c.Transport.Finalize() +} + +// GoString defines the printable version of this struct. +func (c *ConsulConfig) GoString() string { + if c == nil { + return "(*ConsulConfig)(nil)" + } + + return fmt.Sprintf("&ConsulConfig{"+ + "Address:%s, "+ + "Auth:%#v, "+ + "Retry:%#v, "+ + "SSL:%#v, "+ + "Token:%t, "+ + "Transport:%#v"+ + "}", + StringGoString(c.Address), + c.Auth, + c.Retry, + c.SSL, + StringPresent(c.Token), + c.Transport, + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/convert.go b/vendor/github.com/hashicorp/consul-template/config/convert.go new file mode 100644 index 000000000000..0fc45bddcc2f --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/convert.go @@ -0,0 +1,197 @@ +package config + +import ( + "fmt" + "os" + "time" + + "github.com/hashicorp/consul-template/signals" +) + +// Bool returns a pointer to the given bool. +func Bool(b bool) *bool { + return &b +} + +// BoolVal returns the value of the boolean at the pointer, or false if the +// pointer is nil. +func BoolVal(b *bool) bool { + if b == nil { + return false + } + return *b +} + +// BoolGoString returns the value of the boolean for printing in a string. +func BoolGoString(b *bool) string { + if b == nil { + return "(*bool)(nil)" + } + return fmt.Sprintf("%t", *b) +} + +// BoolPresent returns a boolean indicating if the pointer is nil, or if the +// pointer is pointing to the zero value.. +func BoolPresent(b *bool) bool { + if b == nil { + return false + } + return true +} + +// FileMode returns a pointer to the given os.FileMode. +func FileMode(o os.FileMode) *os.FileMode { + return &o +} + +// FileModeVal returns the value of the os.FileMode at the pointer, or 0 if the +// pointer is nil. +func FileModeVal(o *os.FileMode) os.FileMode { + if o == nil { + return 0 + } + return *o +} + +// FileModeGoString returns the value of the os.FileMode for printing in a +// string. +func FileModeGoString(o *os.FileMode) string { + if o == nil { + return "(*os.FileMode)(nil)" + } + return fmt.Sprintf("%q", *o) +} + +// FileModePresent returns a boolean indicating if the pointer is nil, or if +// the pointer is pointing to the zero value. +func FileModePresent(o *os.FileMode) bool { + if o == nil { + return false + } + return *o != 0 +} + +// Int returns a pointer to the given int. +func Int(i int) *int { + return &i +} + +// IntVal returns the value of the int at the pointer, or 0 if the pointer is +// nil. +func IntVal(i *int) int { + if i == nil { + return 0 + } + return *i +} + +// IntGoString returns the value of the int for printing in a string. +func IntGoString(i *int) string { + if i == nil { + return "(*int)(nil)" + } + return fmt.Sprintf("%d", *i) +} + +// IntPresent returns a boolean indicating if the pointer is nil, or if the +// pointer is pointing to the zero value. +func IntPresent(i *int) bool { + if i == nil { + return false + } + return *i != 0 +} + +// Signal returns a pointer to the given os.Signal. +func Signal(s os.Signal) *os.Signal { + return &s +} + +// SignalVal returns the value of the os.Signal at the pointer, or 0 if the +// pointer is nil. +func SignalVal(s *os.Signal) os.Signal { + if s == nil { + return (os.Signal)(nil) + } + return *s +} + +// SignalGoString returns the value of the os.Signal for printing in a string. +func SignalGoString(s *os.Signal) string { + if s == nil { + return "(*os.Signal)(nil)" + } + if *s == nil { + return "" + } + return fmt.Sprintf("%q", *s) +} + +// SignalPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value.. +func SignalPresent(s *os.Signal) bool { + if s == nil { + return false + } + return *s != signals.SIGNIL +} + +// String returns a pointer to the given string. +func String(s string) *string { + return &s +} + +// StringVal returns the value of the string at the pointer, or "" if the +// pointer is nil. +func StringVal(s *string) string { + if s == nil { + return "" + } + return *s +} + +// StringGoString returns the value of the string for printing in a string. +func StringGoString(s *string) string { + if s == nil { + return "(*string)(nil)" + } + return fmt.Sprintf("%q", *s) +} + +// StringPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value.. +func StringPresent(s *string) bool { + if s == nil { + return false + } + return *s != "" +} + +// TimeDuration returns a pointer to the given time.Duration. +func TimeDuration(t time.Duration) *time.Duration { + return &t +} + +// TimeDurationVal returns the value of the string at the pointer, or 0 if the +// pointer is nil. +func TimeDurationVal(t *time.Duration) time.Duration { + if t == nil { + return time.Duration(0) + } + return *t +} + +// TimeDurationGoString returns the value of the time.Duration for printing in a +// string. +func TimeDurationGoString(t *time.Duration) string { + if t == nil { + return "(*time.Duration)(nil)" + } + return fmt.Sprintf("%s", t) +} + +// TimeDurationPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value.. +func TimeDurationPresent(t *time.Duration) bool { + if t == nil { + return false + } + return *t != 0 +} diff --git a/vendor/github.com/hashicorp/consul-template/config/dedup.go b/vendor/github.com/hashicorp/consul-template/config/dedup.go new file mode 100644 index 000000000000..247855a938e3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/dedup.go @@ -0,0 +1,132 @@ +package config + +import ( + "fmt" + "time" +) + +const ( + // DefaultDedupPrefix is the default prefix used for deduplication mode. + DefaultDedupPrefix = "consul-template/dedup/" + + // DefaultDedupTTL is the default TTL for deduplicate mode. + DefaultDedupTTL = 15 * time.Second + + // DefaultDedupMaxStale is the default max staleness for the deduplication + // manager. + DefaultDedupMaxStale = DefaultMaxStale +) + +// DedupConfig is used to enable the de-duplication mode, which depends +// on electing a leader per-template and watching of a key. This is used +// to reduce the cost of many instances of CT running the same template. +type DedupConfig struct { + // Controls if deduplication mode is enabled + Enabled *bool `mapstructure:"enabled"` + + // MaxStale is the maximum amount of time to allow for stale queries. + MaxStale *time.Duration `mapstructure:"max_stale"` + + // Controls the KV prefix used. Defaults to defaultDedupPrefix + Prefix *string `mapstructure:"prefix"` + + // TTL is the Session TTL used for lock acquisition, defaults to 15 seconds. + TTL *time.Duration `mapstructure:"ttl"` +} + +// DefaultDedupConfig returns a configuration that is populated with the +// default values. +func DefaultDedupConfig() *DedupConfig { + return &DedupConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *DedupConfig) Copy() *DedupConfig { + if c == nil { + return nil + } + + var o DedupConfig + o.Enabled = c.Enabled + o.MaxStale = c.MaxStale + o.Prefix = c.Prefix + o.TTL = c.TTL + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *DedupConfig) Merge(o *DedupConfig) *DedupConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.MaxStale != nil { + r.MaxStale = o.MaxStale + } + + if o.Prefix != nil { + r.Prefix = o.Prefix + } + + if o.TTL != nil { + r.TTL = o.TTL + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *DedupConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(false || + TimeDurationPresent(c.MaxStale) || + StringPresent(c.Prefix) || + TimeDurationPresent(c.TTL)) + } + + if c.MaxStale == nil { + c.MaxStale = TimeDuration(DefaultDedupMaxStale) + } + + if c.Prefix == nil { + c.Prefix = String(DefaultDedupPrefix) + } + + if c.TTL == nil { + c.TTL = TimeDuration(DefaultDedupTTL) + } +} + +// GoString defines the printable version of this struct. +func (c *DedupConfig) GoString() string { + if c == nil { + return "(*DedupConfig)(nil)" + } + return fmt.Sprintf("&DedupConfig{"+ + "Enabled:%s, "+ + "MaxStale:%s, "+ + "Prefix:%s, "+ + "TTL:%s"+ + "}", + BoolGoString(c.Enabled), + TimeDurationGoString(c.MaxStale), + StringGoString(c.Prefix), + TimeDurationGoString(c.TTL), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/env.go b/vendor/github.com/hashicorp/consul-template/config/env.go new file mode 100644 index 000000000000..a9a4b1ebe848 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/env.go @@ -0,0 +1,209 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// EnvConfig is an embeddable struct for things that accept environment +// variable filtering. You should not use this directly and it is only public +// for mapstructure's decoding. +type EnvConfig struct { + // BlacklistEnv specifies a list of environment variables to explicitly + // exclude from the list of environment variables populated to the child. + // If both WhitelistEnv and BlacklistEnv are provided, BlacklistEnv takes + // precedence over the values in WhitelistEnv. + Blacklist []string `mapstructure:"blacklist"` + + // CustomEnv specifies custom environment variables to pass to the child + // process. These are provided programmatically, override any environment + // variables of the same name, are ignored from whitelist/blacklist, and + // are still included even if PristineEnv is set to true. + Custom []string `mapstructure:"custom"` + + // PristineEnv specifies if the child process should inherit the parent's + // environment. + Pristine *bool `mapstructure:"pristine"` + + // WhitelistEnv specifies a list of environment variables to exclusively + // include in the list of environment variables populated to the child. + Whitelist []string `mapstructure:"whitelist"` +} + +// DefaultEnvConfig returns a configuration that is populated with the +// default values. +func DefaultEnvConfig() *EnvConfig { + return &EnvConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *EnvConfig) Copy() *EnvConfig { + if c == nil { + return nil + } + + var o EnvConfig + + if c.Blacklist != nil { + o.Blacklist = append([]string{}, c.Blacklist...) + } + + if c.Custom != nil { + o.Custom = append([]string{}, c.Custom...) + } + + o.Pristine = c.Pristine + + if c.Whitelist != nil { + o.Whitelist = append([]string{}, c.Whitelist...) + } + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *EnvConfig) Merge(o *EnvConfig) *EnvConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Blacklist != nil { + r.Blacklist = append(r.Blacklist, o.Blacklist...) + } + + if o.Custom != nil { + r.Custom = append(r.Custom, o.Custom...) + } + + if o.Pristine != nil { + r.Pristine = o.Pristine + } + + if o.Whitelist != nil { + r.Whitelist = append(r.Whitelist, o.Whitelist...) + } + + return r +} + +// Env calculates and returns the finalized environment for this exec +// configuration. It takes into account pristine, custom environment, whitelist, +// and blacklist values. +func (c *EnvConfig) Env() []string { + // In pristine mode, just return the custom environment. If the user did not + // specify a custom environment, just return the empty slice to force an + // empty environment. We cannot return nil here because the later call to + // os/exec will think we want to inherit the parent. + if BoolVal(c.Pristine) { + if len(c.Custom) > 0 { + return c.Custom + } + return []string{} + } + + // Pull all the key-value pairs out of the environment + environ := os.Environ() + keys := make([]string, len(environ)) + env := make(map[string]string, len(environ)) + for i, v := range environ { + list := strings.SplitN(v, "=", 2) + keys[i] = list[0] + env[list[0]] = list[1] + } + + // anyGlobMatch is a helper function which checks if any of the given globs + // match the string. + anyGlobMatch := func(s string, patterns []string) bool { + for _, pattern := range patterns { + if matched, _ := filepath.Match(pattern, s); matched { + return true + } + } + return false + } + + // Pull out any envvars that match the whitelist. + if len(c.Whitelist) > 0 { + newKeys := make([]string, 0, len(keys)) + for _, k := range keys { + if anyGlobMatch(k, c.Whitelist) { + newKeys = append(newKeys, k) + } + } + keys = newKeys + } + + // Remove any envvars that match the blacklist. + if len(c.Blacklist) > 0 { + newKeys := make([]string, 0, len(keys)) + for _, k := range keys { + if !anyGlobMatch(k, c.Blacklist) { + newKeys = append(newKeys, k) + } + } + keys = newKeys + } + + // Build the final list using only the filtered keys. + finalEnv := make([]string, 0, len(keys)+len(c.Custom)) + for _, k := range keys { + finalEnv = append(finalEnv, k+"="+env[k]) + } + + // Append remaining custom environment. + finalEnv = append(finalEnv, c.Custom...) + + return finalEnv +} + +// Finalize ensures there no nil pointers. +func (c *EnvConfig) Finalize() { + if c.Blacklist == nil { + c.Blacklist = []string{} + } + + if c.Custom == nil { + c.Custom = []string{} + } + + if c.Pristine == nil { + c.Pristine = Bool(false) + } + + if c.Whitelist == nil { + c.Whitelist = []string{} + } +} + +// GoString defines the printable version of this struct. +func (c *EnvConfig) GoString() string { + if c == nil { + return "(*EnvConfig)(nil)" + } + + return fmt.Sprintf("&EnvConfig{"+ + "Blacklist:%v, "+ + "Custom:%v, "+ + "Pristine:%s, "+ + "Whitelist:%v"+ + "}", + c.Blacklist, + c.Custom, + BoolGoString(c.Pristine), + c.Whitelist, + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/exec.go b/vendor/github.com/hashicorp/consul-template/config/exec.go new file mode 100644 index 000000000000..22c7070a4665 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/exec.go @@ -0,0 +1,216 @@ +package config + +import ( + "fmt" + "os" + "syscall" + "time" +) + +const ( + // DefaultExecKillSignal is the default signal to send to the process to + // tell it to gracefully terminate. + DefaultExecKillSignal = syscall.SIGINT + + // DefaultExecKillTimeout is the maximum amount of time to wait for the + // process to gracefully terminate before force-killing it. + DefaultExecKillTimeout = 30 * time.Second + + // DefaultExecTimeout is the default amount of time to wait for a + // command to exit. By default, this is disabled, which means the command + // is allowed to run for an infinite amount of time. + DefaultExecTimeout = 0 * time.Second +) + +var ( + // DefaultExecReloadSignal is the default signal to send to the process to + // tell it to reload its configuration. + DefaultExecReloadSignal = (os.Signal)(nil) +) + +// ExecConfig is used to configure the application when it runs in +// exec/supervise mode. +type ExecConfig struct { + // Command is the command to execute and watch as a child process. + Command *string `mapstructure:"command"` + + // Enabled controls if this exec is enabled. + Enabled *bool `mapstructure:"enabled"` + + // EnvConfig is the environmental customizations. + Env *EnvConfig `mapstructure:"env"` + + // KillSignal is the signal to send to the command to kill it gracefully. The + // default value is "SIGTERM". + KillSignal *os.Signal `mapstructure:"kill_signal"` + + // KillTimeout is the amount of time to give the process to cleanup before + // hard-killing it. + KillTimeout *time.Duration `mapstructure:"kill_timeout"` + + // ReloadSignal is the signal to send to the child process when a template + // changes. This tells the child process that templates have + ReloadSignal *os.Signal `mapstructure:"reload_signal"` + + // Splay is the maximum amount of random time to wait to signal or kill the + // process. By default this is disabled, but it can be set to low values to + // reduce the "thundering herd" problem where all tasks are restarted at once. + Splay *time.Duration `mapstructure:"splay"` + + // Timeout is the maximum amount of time to wait for a command to complete. + // By default, this is 0, which means "wait forever". + Timeout *time.Duration `mapstructure:"timeout"` +} + +// DefaultExecConfig returns a configuration that is populated with the +// default values. +func DefaultExecConfig() *ExecConfig { + return &ExecConfig{ + Env: DefaultEnvConfig(), + } +} + +// Copy returns a deep copy of this configuration. +func (c *ExecConfig) Copy() *ExecConfig { + if c == nil { + return nil + } + + var o ExecConfig + + o.Command = c.Command + + o.Enabled = c.Enabled + + if c.Env != nil { + o.Env = c.Env.Copy() + } + + o.KillSignal = c.KillSignal + + o.KillTimeout = c.KillTimeout + + o.ReloadSignal = c.ReloadSignal + + o.Splay = c.Splay + + o.Timeout = c.Timeout + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *ExecConfig) Merge(o *ExecConfig) *ExecConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Command != nil { + r.Command = o.Command + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Env != nil { + r.Env = r.Env.Merge(o.Env) + } + + if o.KillSignal != nil { + r.KillSignal = o.KillSignal + } + + if o.KillTimeout != nil { + r.KillTimeout = o.KillTimeout + } + + if o.ReloadSignal != nil { + r.ReloadSignal = o.ReloadSignal + } + + if o.Splay != nil { + r.Splay = o.Splay + } + + if o.Timeout != nil { + r.Timeout = o.Timeout + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *ExecConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(StringPresent(c.Command)) + } + + if c.Command == nil { + c.Command = String("") + } + + if c.Env == nil { + c.Env = DefaultEnvConfig() + } + c.Env.Finalize() + + if c.KillSignal == nil { + c.KillSignal = Signal(DefaultExecKillSignal) + } + + if c.KillTimeout == nil { + c.KillTimeout = TimeDuration(DefaultExecKillTimeout) + } + + if c.ReloadSignal == nil { + c.ReloadSignal = Signal(DefaultExecReloadSignal) + } + + if c.Splay == nil { + c.Splay = TimeDuration(0 * time.Second) + } + + if c.Timeout == nil { + c.Timeout = TimeDuration(DefaultExecTimeout) + } +} + +// GoString defines the printable version of this struct. +func (c *ExecConfig) GoString() string { + if c == nil { + return "(*ExecConfig)(nil)" + } + + return fmt.Sprintf("&ExecConfig{"+ + "Command:%s, "+ + "Enabled:%s, "+ + "Env:%#v, "+ + "KillSignal:%s, "+ + "KillTimeout:%s, "+ + "ReloadSignal:%s, "+ + "Splay:%s, "+ + "Timeout:%s"+ + "}", + StringGoString(c.Command), + BoolGoString(c.Enabled), + c.Env, + SignalGoString(c.KillSignal), + TimeDurationGoString(c.KillTimeout), + SignalGoString(c.ReloadSignal), + TimeDurationGoString(c.Splay), + TimeDurationGoString(c.Timeout), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/mapstructure.go b/vendor/github.com/hashicorp/consul-template/config/mapstructure.go new file mode 100644 index 000000000000..64ea53933dc7 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/mapstructure.go @@ -0,0 +1,75 @@ +package config + +import ( + "log" + "os" + "reflect" + "strconv" + + "github.com/mitchellh/mapstructure" +) + +// StringToFileModeFunc returns a function that converts strings to os.FileMode +// value. This is designed to be used with mapstructure for parsing out a +// filemode value. +func StringToFileModeFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(os.FileMode(0)) { + return data, nil + } + + // Convert it by parsing + v, err := strconv.ParseUint(data.(string), 8, 12) + if err != nil { + return data, err + } + return os.FileMode(v), nil + } +} + +// StringToWaitDurationHookFunc returns a function that converts strings to wait +// value. This is designed to be used with mapstructure for parsing out a wait +// value. +func StringToWaitDurationHookFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(WaitConfig{}) { + return data, nil + } + + // Convert it by parsing + return ParseWaitConfig(data.(string)) + } +} + +// ConsulStringToStructFunc checks if the value set for the key should actually +// be a struct and sets the appropriate value in the struct. This is for +// backwards-compatability with older versions of Consul Template. +func ConsulStringToStructFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if t == reflect.TypeOf(ConsulConfig{}) && f.Kind() == reflect.String { + log.Println("[WARN] consul now accepts a stanza instead of a string. " + + "Update your configuration files and change consul = \"\" to " + + "consul { } instead.") + return &ConsulConfig{ + Address: String(data.(string)), + }, nil + } + + return data, nil + } +} diff --git a/vendor/github.com/hashicorp/consul-template/config/retry.go b/vendor/github.com/hashicorp/consul-template/config/retry.go new file mode 100644 index 000000000000..0a4346cff7c3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/retry.go @@ -0,0 +1,170 @@ +package config + +import ( + "fmt" + "math" + "time" +) + +const ( + // DefaultRetryAttempts is the default number of maximum retry attempts. + DefaultRetryAttempts = 12 + + // DefaultRetryBackoff is the default base for the exponential backoff + // algorithm. + DefaultRetryBackoff = 250 * time.Millisecond + + // DefaultRetryMaxBackoff is the default maximum of backoff time + DefaultRetryMaxBackoff = 1 * time.Minute +) + +// RetryFunc is the signature of a function that supports retries. +type RetryFunc func(int) (bool, time.Duration) + +// RetryConfig is a shared configuration for upstreams that support retires on +// failure. +type RetryConfig struct { + // Attempts is the total number of maximum attempts to retry before letting + // the error fall through. + // 0 means unlimited. + Attempts *int + + // Backoff is the base of the exponentialbackoff. This number will be + // multiplied by the next power of 2 on each iteration. + Backoff *time.Duration + + // MaxBackoff is an upper limit to the sleep time between retries + // A MaxBackoff of zero means there is no limit to the exponential growth of the backoff. + MaxBackoff *time.Duration `mapstructure:"max_backoff"` + + // Enabled signals if this retry is enabled. + Enabled *bool +} + +// DefaultRetryConfig returns a configuration that is populated with the +// default values. +func DefaultRetryConfig() *RetryConfig { + return &RetryConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *RetryConfig) Copy() *RetryConfig { + if c == nil { + return nil + } + + var o RetryConfig + + o.Attempts = c.Attempts + + o.Backoff = c.Backoff + + o.MaxBackoff = c.MaxBackoff + + o.Enabled = c.Enabled + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *RetryConfig) Merge(o *RetryConfig) *RetryConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Attempts != nil { + r.Attempts = o.Attempts + } + + if o.Backoff != nil { + r.Backoff = o.Backoff + } + + if o.MaxBackoff != nil { + r.MaxBackoff = o.MaxBackoff + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + return r +} + +// RetryFunc returns the retry function associated with this configuration. +func (c *RetryConfig) RetryFunc() RetryFunc { + return func(retry int) (bool, time.Duration) { + if !BoolVal(c.Enabled) { + return false, 0 + } + + if IntVal(c.Attempts) > 0 && retry > IntVal(c.Attempts)-1 { + return false, 0 + } + + baseSleep := TimeDurationVal(c.Backoff) + maxSleep := TimeDurationVal(c.MaxBackoff) + + if maxSleep > 0 { + attemptsTillMaxBackoff := int(math.Log2(maxSleep.Seconds() / baseSleep.Seconds())) + if retry > attemptsTillMaxBackoff { + return true, maxSleep + } + } + + base := math.Pow(2, float64(retry)) + sleep := time.Duration(base) * baseSleep + + return true, sleep + } +} + +// Finalize ensures there no nil pointers. +func (c *RetryConfig) Finalize() { + if c.Attempts == nil { + c.Attempts = Int(DefaultRetryAttempts) + } + + if c.Backoff == nil { + c.Backoff = TimeDuration(DefaultRetryBackoff) + } + + if c.MaxBackoff == nil { + c.MaxBackoff = TimeDuration(DefaultRetryMaxBackoff) + } + + if c.Enabled == nil { + c.Enabled = Bool(true) + } +} + +// GoString defines the printable version of this struct. +func (c *RetryConfig) GoString() string { + if c == nil { + return "(*RetryConfig)(nil)" + } + + return fmt.Sprintf("&RetryConfig{"+ + "Attempts:%s, "+ + "Backoff:%s, "+ + "MaxBackoff:%s, "+ + "Enabled:%s"+ + "}", + IntGoString(c.Attempts), + TimeDurationGoString(c.Backoff), + TimeDurationGoString(c.MaxBackoff), + BoolGoString(c.Enabled), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/ssl.go b/vendor/github.com/hashicorp/consul-template/config/ssl.go new file mode 100644 index 000000000000..ab3b77e614b0 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/ssl.go @@ -0,0 +1,153 @@ +package config + +import "fmt" + +const ( + // DefaultSSLVerify is the default value for SSL verification. + DefaultSSLVerify = true +) + +// SSLConfig is the configuration for SSL. +type SSLConfig struct { + CaCert *string `mapstructure:"ca_cert"` + CaPath *string `mapstructure:"ca_path"` + Cert *string `mapstructure:"cert"` + Enabled *bool `mapstructure:"enabled"` + Key *string `mapstructure:"key"` + ServerName *string `mapstructure:"server_name"` + Verify *bool `mapstructure:"verify"` +} + +// DefaultSSLConfig returns a configuration that is populated with the +// default values. +func DefaultSSLConfig() *SSLConfig { + return &SSLConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *SSLConfig) Copy() *SSLConfig { + if c == nil { + return nil + } + + var o SSLConfig + o.CaCert = c.CaCert + o.CaPath = c.CaPath + o.Cert = c.Cert + o.Enabled = c.Enabled + o.Key = c.Key + o.ServerName = c.ServerName + o.Verify = c.Verify + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *SSLConfig) Merge(o *SSLConfig) *SSLConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Cert != nil { + r.Cert = o.Cert + } + + if o.CaCert != nil { + r.CaCert = o.CaCert + } + + if o.CaPath != nil { + r.CaPath = o.CaPath + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Key != nil { + r.Key = o.Key + } + + if o.ServerName != nil { + r.ServerName = o.ServerName + } + + if o.Verify != nil { + r.Verify = o.Verify + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *SSLConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(false || + StringPresent(c.Cert) || + StringPresent(c.CaCert) || + StringPresent(c.CaPath) || + StringPresent(c.Key) || + StringPresent(c.ServerName) || + BoolPresent(c.Verify)) + } + + if c.Cert == nil { + c.Cert = String("") + } + + if c.CaCert == nil { + c.CaCert = String("") + } + + if c.CaPath == nil { + c.CaPath = String("") + } + + if c.Key == nil { + c.Key = String("") + } + + if c.ServerName == nil { + c.ServerName = String("") + } + + if c.Verify == nil { + c.Verify = Bool(DefaultSSLVerify) + } +} + +// GoString defines the printable version of this struct. +func (c *SSLConfig) GoString() string { + if c == nil { + return "(*SSLConfig)(nil)" + } + + return fmt.Sprintf("&SSLConfig{"+ + "CaCert:%s, "+ + "CaPath:%s, "+ + "Cert:%s, "+ + "Enabled:%s, "+ + "Key:%s, "+ + "ServerName:%s, "+ + "Verify:%s"+ + "}", + StringGoString(c.CaCert), + StringGoString(c.CaPath), + StringGoString(c.Cert), + BoolGoString(c.Enabled), + StringGoString(c.Key), + StringGoString(c.ServerName), + BoolGoString(c.Verify), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/syslog.go b/vendor/github.com/hashicorp/consul-template/config/syslog.go new file mode 100644 index 000000000000..0de67199d7d2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/syslog.go @@ -0,0 +1,87 @@ +package config + +import "fmt" + +const ( + // DefaultSyslogFacility is the default facility to log to. + DefaultSyslogFacility = "LOCAL0" +) + +// SyslogConfig is the configuration for syslog. +type SyslogConfig struct { + Enabled *bool `mapstructure:"enabled"` + Facility *string `mapstructure:"facility"` +} + +// DefaultSyslogConfig returns a configuration that is populated with the +// default values. +func DefaultSyslogConfig() *SyslogConfig { + return &SyslogConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *SyslogConfig) Copy() *SyslogConfig { + if c == nil { + return nil + } + + var o SyslogConfig + o.Enabled = c.Enabled + o.Facility = c.Facility + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *SyslogConfig) Merge(o *SyslogConfig) *SyslogConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Facility != nil { + r.Facility = o.Facility + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *SyslogConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(StringPresent(c.Facility)) + } + + if c.Facility == nil { + c.Facility = String(DefaultSyslogFacility) + } +} + +// GoString defines the printable version of this struct. +func (c *SyslogConfig) GoString() string { + if c == nil { + return "(*SyslogConfig)(nil)" + } + + return fmt.Sprintf("&SyslogConfig{"+ + "Enabled:%s, "+ + "Facility:%s"+ + "}", + BoolGoString(c.Enabled), + StringGoString(c.Facility), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/template.go b/vendor/github.com/hashicorp/consul-template/config/template.go new file mode 100644 index 000000000000..4f69bfb6033f --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/template.go @@ -0,0 +1,458 @@ +package config + +import ( + "errors" + "fmt" + "os" + "regexp" + "strings" + "time" +) + +const ( + // DefaultTemplateCommandTimeout is the amount of time to wait for a command + // to return. + DefaultTemplateCommandTimeout = 30 * time.Second +) + +var ( + // ErrTemplateStringEmpty is the error returned with the template contents + // are empty. + ErrTemplateStringEmpty = errors.New("template: cannot be empty") + + // configTemplateRe is the pattern to split the config template syntax. + configTemplateRe = regexp.MustCompile("([a-zA-Z]:)?([^:]+)") +) + +// TemplateConfig is a representation of a template on disk, as well as the +// associated commands and reload instructions. +type TemplateConfig struct { + // Backup determines if this template should retain a backup. The default + // value is false. + Backup *bool `mapstructure:"backup"` + + // Command is the arbitrary command to execute after a template has + // successfully rendered. This is DEPRECATED. Use Exec instead. + Command *string `mapstructure:"command"` + + // CommandTimeout is the amount of time to wait for the command to finish + // before force-killing it. This is DEPRECATED. Use Exec instead. + CommandTimeout *time.Duration `mapstructure:"command_timeout"` + + // Contents are the raw template contents to evaluate. Either this or Source + // must be specified, but not both. + Contents *string `mapstructure:"contents"` + + // CreateDestDirs tells Consul Template to create the parent directories of + // the destination path if they do not exist. The default value is true. + CreateDestDirs *bool `mapstructure:"create_dest_dirs"` + + // Destination is the location on disk where the template should be rendered. + // This is required unless running in debug/dry mode. + Destination *string `mapstructure:"destination"` + + // ErrMissingKey is used to control how the template behaves when attempting + // to index a struct or map key that does not exist. + ErrMissingKey *bool `mapstructure:"error_on_missing_key"` + + // Exec is the configuration for the command to run when the template renders + // successfully. + Exec *ExecConfig `mapstructure:"exec"` + + // Perms are the file system permissions to use when creating the file on + // disk. This is useful for when files contain sensitive information, such as + // secrets from Vault. + Perms *os.FileMode `mapstructure:"perms"` + + // Source is the path on disk to the template contents to evaluate. Either + // this or Contents should be specified, but not both. + Source *string `mapstructure:"source"` + + // Wait configures per-template quiescence timers. + Wait *WaitConfig `mapstructure:"wait"` + + // LeftDelim and RightDelim are optional configurations to control what + // delimiter is utilized when parsing the template. + LeftDelim *string `mapstructure:"left_delimiter"` + RightDelim *string `mapstructure:"right_delimiter"` + + // FunctionBlacklist is a list of functions that this template is not + // permitted to run. + FunctionBlacklist []string `mapstructure:"function_blacklist"` + + // SandboxPath adds a prefix to any path provided to the `file` function + // and causes an error if a relative path tries to traverse outside that + // prefix. + SandboxPath *string `mapstructure:"sandbox_path"` +} + +// DefaultTemplateConfig returns a configuration that is populated with the +// default values. +func DefaultTemplateConfig() *TemplateConfig { + return &TemplateConfig{ + Exec: DefaultExecConfig(), + Wait: DefaultWaitConfig(), + } +} + +// Copy returns a deep copy of this configuration. +func (c *TemplateConfig) Copy() *TemplateConfig { + if c == nil { + return nil + } + + var o TemplateConfig + + o.Backup = c.Backup + + o.Command = c.Command + + o.CommandTimeout = c.CommandTimeout + + o.Contents = c.Contents + + o.CreateDestDirs = c.CreateDestDirs + + o.Destination = c.Destination + + o.ErrMissingKey = c.ErrMissingKey + + if c.Exec != nil { + o.Exec = c.Exec.Copy() + } + + o.Perms = c.Perms + + o.Source = c.Source + + if c.Wait != nil { + o.Wait = c.Wait.Copy() + } + + o.LeftDelim = c.LeftDelim + o.RightDelim = c.RightDelim + + for _, fun := range c.FunctionBlacklist { + o.FunctionBlacklist = append(o.FunctionBlacklist, fun) + } + o.SandboxPath = c.SandboxPath + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *TemplateConfig) Merge(o *TemplateConfig) *TemplateConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Backup != nil { + r.Backup = o.Backup + } + + if o.Command != nil { + r.Command = o.Command + } + + if o.CommandTimeout != nil { + r.CommandTimeout = o.CommandTimeout + } + + if o.Contents != nil { + r.Contents = o.Contents + } + + if o.CreateDestDirs != nil { + r.CreateDestDirs = o.CreateDestDirs + } + + if o.Destination != nil { + r.Destination = o.Destination + } + + if o.ErrMissingKey != nil { + r.ErrMissingKey = o.ErrMissingKey + } + + if o.Exec != nil { + r.Exec = r.Exec.Merge(o.Exec) + } + + if o.Perms != nil { + r.Perms = o.Perms + } + + if o.Source != nil { + r.Source = o.Source + } + + if o.Wait != nil { + r.Wait = r.Wait.Merge(o.Wait) + } + + if o.LeftDelim != nil { + r.LeftDelim = o.LeftDelim + } + + if o.RightDelim != nil { + r.RightDelim = o.RightDelim + } + + for _, fun := range o.FunctionBlacklist { + r.FunctionBlacklist = append(r.FunctionBlacklist, fun) + } + if o.SandboxPath != nil { + r.SandboxPath = o.SandboxPath + } + + return r +} + +// Finalize ensures the configuration has no nil pointers and sets default +// values. +func (c *TemplateConfig) Finalize() { + if c.Backup == nil { + c.Backup = Bool(false) + } + + if c.Command == nil { + c.Command = String("") + } + + if c.CommandTimeout == nil { + c.CommandTimeout = TimeDuration(DefaultTemplateCommandTimeout) + } + + if c.Contents == nil { + c.Contents = String("") + } + + if c.CreateDestDirs == nil { + c.CreateDestDirs = Bool(true) + } + + if c.Destination == nil { + c.Destination = String("") + } + + if c.ErrMissingKey == nil { + c.ErrMissingKey = Bool(false) + } + + if c.Exec == nil { + c.Exec = DefaultExecConfig() + } + + // Backwards compat for specifying command directly + if c.Exec.Command == nil && c.Command != nil { + c.Exec.Command = c.Command + } + if c.Exec.Timeout == nil && c.CommandTimeout != nil { + c.Exec.Timeout = c.CommandTimeout + } + c.Exec.Finalize() + + if c.Perms == nil { + c.Perms = FileMode(0) + } + + if c.Source == nil { + c.Source = String("") + } + + if c.Wait == nil { + c.Wait = DefaultWaitConfig() + } + c.Wait.Finalize() + + if c.LeftDelim == nil { + c.LeftDelim = String("") + } + + if c.RightDelim == nil { + c.RightDelim = String("") + } + + if c.SandboxPath == nil { + c.SandboxPath = String("") + } +} + +// GoString defines the printable version of this struct. +func (c *TemplateConfig) GoString() string { + if c == nil { + return "(*TemplateConfig)(nil)" + } + + return fmt.Sprintf("&TemplateConfig{"+ + "Backup:%s, "+ + "Command:%s, "+ + "CommandTimeout:%s, "+ + "Contents:%s, "+ + "CreateDestDirs:%s, "+ + "Destination:%s, "+ + "ErrMissingKey:%s, "+ + "Exec:%#v, "+ + "Perms:%s, "+ + "Source:%s, "+ + "Wait:%#v, "+ + "LeftDelim:%s, "+ + "RightDelim:%s"+ + "FunctionBlacklist:%s"+ + "SandboxPath:%s"+ + "}", + BoolGoString(c.Backup), + StringGoString(c.Command), + TimeDurationGoString(c.CommandTimeout), + StringGoString(c.Contents), + BoolGoString(c.CreateDestDirs), + StringGoString(c.Destination), + BoolGoString(c.ErrMissingKey), + c.Exec, + FileModeGoString(c.Perms), + StringGoString(c.Source), + c.Wait, + StringGoString(c.LeftDelim), + StringGoString(c.RightDelim), + c.FunctionBlacklist, + StringGoString(c.SandboxPath), + ) +} + +// Display is the human-friendly form of this configuration. It tries to +// describe this template in as much detail as possible in a single line, so +// log consumers can uniquely identify it. +func (c *TemplateConfig) Display() string { + if c == nil { + return "" + } + + source := c.Source + if StringPresent(c.Contents) { + source = String("(dynamic)") + } + + return fmt.Sprintf("%q => %q", + StringVal(source), + StringVal(c.Destination), + ) +} + +// TemplateConfigs is a collection of TemplateConfigs +type TemplateConfigs []*TemplateConfig + +// DefaultTemplateConfigs returns a configuration that is populated with the +// default values. +func DefaultTemplateConfigs() *TemplateConfigs { + return &TemplateConfigs{} +} + +// Copy returns a deep copy of this configuration. +func (c *TemplateConfigs) Copy() *TemplateConfigs { + o := make(TemplateConfigs, len(*c)) + for i, t := range *c { + o[i] = t.Copy() + } + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *TemplateConfigs) Merge(o *TemplateConfigs) *TemplateConfigs { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + *r = append(*r, *o...) + + return r +} + +// Finalize ensures the configuration has no nil pointers and sets default +// values. +func (c *TemplateConfigs) Finalize() { + if c == nil { + *c = *DefaultTemplateConfigs() + } + + for _, t := range *c { + t.Finalize() + } +} + +// GoString defines the printable version of this struct. +func (c *TemplateConfigs) GoString() string { + if c == nil { + return "(*TemplateConfigs)(nil)" + } + + s := make([]string, len(*c)) + for i, t := range *c { + s[i] = t.GoString() + } + + return "{" + strings.Join(s, ", ") + "}" +} + +// ParseTemplateConfig parses a string in the form source:destination:command +// into a TemplateConfig. +func ParseTemplateConfig(s string) (*TemplateConfig, error) { + if len(strings.TrimSpace(s)) < 1 { + return nil, ErrTemplateStringEmpty + } + + var source, destination, command string + parts := configTemplateRe.FindAllString(s, -1) + + switch len(parts) { + case 1: + source = parts[0] + case 2: + source, destination = parts[0], parts[1] + case 3: + source, destination, command = parts[0], parts[1], parts[2] + default: + source, destination = parts[0], parts[1] + command = strings.Join(parts[2:], ":") + } + + var sourcePtr, destinationPtr, commandPtr *string + if source != "" { + sourcePtr = String(source) + } + if destination != "" { + destinationPtr = String(destination) + } + if command != "" { + commandPtr = String(command) + } + + return &TemplateConfig{ + Source: sourcePtr, + Destination: destinationPtr, + Command: commandPtr, + }, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/config/transport.go b/vendor/github.com/hashicorp/consul-template/config/transport.go new file mode 100644 index 000000000000..dc218daa2b01 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/transport.go @@ -0,0 +1,188 @@ +package config + +import ( + "fmt" + "runtime" + "time" +) + +const ( + // DefaultDialKeepAlive is the default amount of time to keep alive + // connections. + DefaultDialKeepAlive = 30 * time.Second + + // DefaultDialTimeout is the amount of time to attempt to dial before timing + // out. + DefaultDialTimeout = 30 * time.Second + + // DefaultIdleConnTimeout is the default connection timeout for idle + // connections. + DefaultIdleConnTimeout = 90 * time.Second + + // DefaultMaxIdleConns is the default number of maximum idle connections. + DefaultMaxIdleConns = 100 + + // DefaultTLSHandshakeTimeout is the amount of time to negotiate the TLS + // handshake. + DefaultTLSHandshakeTimeout = 10 * time.Second +) + +var ( + // DefaultMaxIdleConnsPerHost is the default number of idle connections to use + // per host. + DefaultMaxIdleConnsPerHost = runtime.GOMAXPROCS(0) + 1 +) + +// TransportConfig is the configuration to tune low-level APIs for the +// interactions on the wire. +type TransportConfig struct { + // DialKeepAlive is the amount of time for keep-alives. + DialKeepAlive *time.Duration `mapstructure:"dial_keep_alive"` + + // DialTimeout is the amount of time to wait to establish a connection. + DialTimeout *time.Duration `mapstructure:"dial_timeout"` + + // DisableKeepAlives determines if keep-alives should be used. Disabling this + // significantly decreases performance. + DisableKeepAlives *bool `mapstructure:"disable_keep_alives"` + + // IdleConnTimeout is the timeout for idle connections. + IdleConnTimeout *time.Duration `mapstructure:"idle_conn_timeout"` + + // MaxIdleConns is the maximum number of total idle connections. + MaxIdleConns *int `mapstructure:"max_idle_conns"` + + // MaxIdleConnsPerHost is the maximum number of idle connections per remote + // host. + MaxIdleConnsPerHost *int `mapstructure:"max_idle_conns_per_host"` + + // TLSHandshakeTimeout is the amount of time to wait to complete the TLS + // handshake. + TLSHandshakeTimeout *time.Duration `mapstructure:"tls_handshake_timeout"` +} + +// DefaultTransportConfig returns a configuration that is populated with the +// default values. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *TransportConfig) Copy() *TransportConfig { + if c == nil { + return nil + } + + var o TransportConfig + + o.DialKeepAlive = c.DialKeepAlive + o.DialTimeout = c.DialTimeout + o.DisableKeepAlives = c.DisableKeepAlives + o.IdleConnTimeout = c.IdleConnTimeout + o.MaxIdleConns = c.MaxIdleConns + o.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost + o.TLSHandshakeTimeout = c.TLSHandshakeTimeout + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *TransportConfig) Merge(o *TransportConfig) *TransportConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.DialKeepAlive != nil { + r.DialKeepAlive = o.DialKeepAlive + } + + if o.DialTimeout != nil { + r.DialTimeout = o.DialTimeout + } + + if o.DisableKeepAlives != nil { + r.DisableKeepAlives = o.DisableKeepAlives + } + + if o.IdleConnTimeout != nil { + r.IdleConnTimeout = o.IdleConnTimeout + } + + if o.MaxIdleConns != nil { + r.MaxIdleConns = o.MaxIdleConns + } + + if o.MaxIdleConnsPerHost != nil { + r.MaxIdleConnsPerHost = o.MaxIdleConnsPerHost + } + + if o.TLSHandshakeTimeout != nil { + r.TLSHandshakeTimeout = o.TLSHandshakeTimeout + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *TransportConfig) Finalize() { + if c.DialKeepAlive == nil { + c.DialKeepAlive = TimeDuration(DefaultDialKeepAlive) + } + + if c.DialTimeout == nil { + c.DialTimeout = TimeDuration(DefaultDialTimeout) + } + + if c.DisableKeepAlives == nil { + c.DisableKeepAlives = Bool(false) + } + + if c.IdleConnTimeout == nil { + c.IdleConnTimeout = TimeDuration(DefaultIdleConnTimeout) + } + + if c.MaxIdleConns == nil { + c.MaxIdleConns = Int(DefaultMaxIdleConns) + } + + if c.MaxIdleConnsPerHost == nil { + c.MaxIdleConnsPerHost = Int(DefaultMaxIdleConnsPerHost) + } + + if c.TLSHandshakeTimeout == nil { + c.TLSHandshakeTimeout = TimeDuration(DefaultTLSHandshakeTimeout) + } +} + +// GoString defines the printable version of this struct. +func (c *TransportConfig) GoString() string { + if c == nil { + return "(*TransportConfig)(nil)" + } + + return fmt.Sprintf("&TransportConfig{"+ + "DialKeepAlive:%s, "+ + "DialTimeout:%s, "+ + "DisableKeepAlives:%t, "+ + "MaxIdleConnsPerHost:%d, "+ + "TLSHandshakeTimeout:%s"+ + "}", + TimeDurationVal(c.DialKeepAlive), + TimeDurationVal(c.DialTimeout), + BoolVal(c.DisableKeepAlives), + IntVal(c.MaxIdleConnsPerHost), + TimeDurationVal(c.TLSHandshakeTimeout), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/vault.go b/vendor/github.com/hashicorp/consul-template/config/vault.go new file mode 100644 index 000000000000..0ba4cce73a65 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/vault.go @@ -0,0 +1,327 @@ +package config + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/api" +) + +const ( + // XXX Change use to api.EnvVaultSkipVerify once we've updated vendored + // vault to version 1.1.0 or newer. + EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" + + // DefaultVaultGrace is the default grace period before which to read a new + // secret from Vault. If a lease is due to expire in 15 seconds, Consul + // Template will read a new secret at that time minus this value. + DefaultVaultGrace = 15 * time.Second + + // DefaultVaultRenewToken is the default value for if the Vault token should + // be renewed. + DefaultVaultRenewToken = true + + // DefaultVaultUnwrapToken is the default value for if the Vault token should + // be unwrapped. + DefaultVaultUnwrapToken = false + + // DefaultVaultRetryBase is the default value for the base time to use for + // exponential backoff. + DefaultVaultRetryBase = 250 * time.Millisecond + + // DefaultVaultRetryMaxAttempts is the default maximum number of attempts to + // retry before quitting. + DefaultVaultRetryMaxAttempts = 5 +) + +// VaultConfig is the configuration for connecting to a vault server. +type VaultConfig struct { + // Address is the URI to the Vault server. + Address *string `mapstructure:"address"` + + // Enabled controls whether the Vault integration is active. + Enabled *bool `mapstructure:"enabled"` + + // Grace is the amount of time before a lease is about to expire to force a + // new secret to be read. + Grace *time.Duration `mapstructure:"grace"` + + // Namespace is the Vault namespace to use for reading/writing secrets. This can + // also be set via the VAULT_NAMESPACE environment variable. + Namespace *string `mapstructure:"namespace"` + + // RenewToken renews the Vault token. + RenewToken *bool `mapstructure:"renew_token"` + + // Retry is the configuration for specifying how to behave on failure. + Retry *RetryConfig `mapstructure:"retry"` + + // SSL indicates we should use a secure connection while talking to Vault. + SSL *SSLConfig `mapstructure:"ssl"` + + // Token is the Vault token to communicate with for requests. It may be + // a wrapped token or a real token. This can also be set via the VAULT_TOKEN + // environment variable, or via the VaultAgentTokenFile. + Token *string `mapstructure:"token" json:"-"` + + // VaultAgentTokenFile is the path of file that contains a Vault Agent token. + // If vault_agent_token_file is specified: + // - Consul Template will not try to renew the Vault token. + // - Consul Template will periodically stat the file and update the token if it has + // changed. + VaultAgentTokenFile *string `mapstructure:"vault_agent_token_file" json:"-"` + + // Transport configures the low-level network connection details. + Transport *TransportConfig `mapstructure:"transport"` + + // UnwrapToken unwraps the provided Vault token as a wrapped token. + UnwrapToken *bool `mapstructure:"unwrap_token"` +} + +// DefaultVaultConfig returns a configuration that is populated with the +// default values. +func DefaultVaultConfig() *VaultConfig { + v := &VaultConfig{ + Retry: DefaultRetryConfig(), + SSL: DefaultSSLConfig(), + Transport: DefaultTransportConfig(), + } + + // Force SSL when communicating with Vault. + v.SSL.Enabled = Bool(true) + + return v +} + +// Copy returns a deep copy of this configuration. +func (c *VaultConfig) Copy() *VaultConfig { + if c == nil { + return nil + } + + var o VaultConfig + o.Address = c.Address + + o.Enabled = c.Enabled + + o.Grace = c.Grace + + o.Namespace = c.Namespace + + o.RenewToken = c.RenewToken + + if c.Retry != nil { + o.Retry = c.Retry.Copy() + } + + if c.SSL != nil { + o.SSL = c.SSL.Copy() + } + + o.Token = c.Token + + o.VaultAgentTokenFile = c.VaultAgentTokenFile + + if c.Transport != nil { + o.Transport = c.Transport.Copy() + } + + o.UnwrapToken = c.UnwrapToken + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *VaultConfig) Merge(o *VaultConfig) *VaultConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Address != nil { + r.Address = o.Address + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Grace != nil { + r.Grace = o.Grace + } + + if o.Namespace != nil { + r.Namespace = o.Namespace + } + + if o.RenewToken != nil { + r.RenewToken = o.RenewToken + } + + if o.Retry != nil { + r.Retry = r.Retry.Merge(o.Retry) + } + + if o.SSL != nil { + r.SSL = r.SSL.Merge(o.SSL) + } + + if o.Token != nil { + r.Token = o.Token + } + + if o.VaultAgentTokenFile != nil { + r.VaultAgentTokenFile = o.VaultAgentTokenFile + } + + if o.Transport != nil { + r.Transport = r.Transport.Merge(o.Transport) + } + + if o.UnwrapToken != nil { + r.UnwrapToken = o.UnwrapToken + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *VaultConfig) Finalize() { + if c.Address == nil { + c.Address = stringFromEnv([]string{ + api.EnvVaultAddress, + }, "") + } + + if c.Grace == nil { + c.Grace = TimeDuration(DefaultVaultGrace) + } + + if c.Namespace == nil { + c.Namespace = stringFromEnv([]string{"VAULT_NAMESPACE"}, "") + } + + if c.RenewToken == nil { + default_renew := DefaultVaultRenewToken + if c.VaultAgentTokenFile != nil { + default_renew = false + } + c.RenewToken = boolFromEnv([]string{ + "VAULT_RENEW_TOKEN", + }, default_renew) + } + + if c.Retry == nil { + c.Retry = DefaultRetryConfig() + } + c.Retry.Finalize() + + // Vault has custom SSL settings + if c.SSL == nil { + c.SSL = DefaultSSLConfig() + } + if c.SSL.Enabled == nil { + c.SSL.Enabled = Bool(true) + } + if c.SSL.CaCert == nil { + c.SSL.CaCert = stringFromEnv([]string{api.EnvVaultCACert}, "") + } + if c.SSL.CaPath == nil { + c.SSL.CaPath = stringFromEnv([]string{api.EnvVaultCAPath}, "") + } + if c.SSL.Cert == nil { + c.SSL.Cert = stringFromEnv([]string{api.EnvVaultClientCert}, "") + } + if c.SSL.Key == nil { + c.SSL.Key = stringFromEnv([]string{api.EnvVaultClientKey}, "") + } + if c.SSL.ServerName == nil { + c.SSL.ServerName = stringFromEnv([]string{api.EnvVaultTLSServerName}, "") + } + if c.SSL.Verify == nil { + c.SSL.Verify = antiboolFromEnv([]string{ + EnvVaultSkipVerify, api.EnvVaultInsecure}, true) + } + c.SSL.Finalize() + + // Order of precedence + // 1. `vault_agent_token_file` configuration value + // 2. `token` configuration value` + // 3. `VAULT_TOKEN` environment variable + if c.Token == nil { + c.Token = stringFromEnv([]string{ + "VAULT_TOKEN", + }, "") + } + + if c.VaultAgentTokenFile == nil { + if StringVal(c.Token) == "" { + if homePath != "" { + c.Token = stringFromFile([]string{ + homePath + "/.vault-token", + }, "") + } + } + } else { + c.Token = stringFromFile([]string{*c.VaultAgentTokenFile}, "") + } + + if c.Transport == nil { + c.Transport = DefaultTransportConfig() + } + c.Transport.Finalize() + + if c.UnwrapToken == nil { + c.UnwrapToken = boolFromEnv([]string{ + "VAULT_UNWRAP_TOKEN", + }, DefaultVaultUnwrapToken) + } + + if c.Enabled == nil { + c.Enabled = Bool(StringPresent(c.Address)) + } +} + +// GoString defines the printable version of this struct. +func (c *VaultConfig) GoString() string { + if c == nil { + return "(*VaultConfig)(nil)" + } + + return fmt.Sprintf("&VaultConfig{"+ + "Address:%s, "+ + "Enabled:%s, "+ + "Grace:%s, "+ + "Namespace:%s,"+ + "RenewToken:%s, "+ + "Retry:%#v, "+ + "SSL:%#v, "+ + "Token:%t, "+ + "VaultAgentTokenFile:%t, "+ + "Transport:%#v, "+ + "UnwrapToken:%s"+ + "}", + StringGoString(c.Address), + BoolGoString(c.Enabled), + TimeDurationGoString(c.Grace), + StringGoString(c.Namespace), + BoolGoString(c.RenewToken), + c.Retry, + c.SSL, + StringPresent(c.Token), + StringPresent(c.VaultAgentTokenFile), + c.Transport, + BoolGoString(c.UnwrapToken), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/wait.go b/vendor/github.com/hashicorp/consul-template/config/wait.go new file mode 100644 index 000000000000..8e3d56c19683 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/wait.go @@ -0,0 +1,191 @@ +package config + +import ( + "errors" + "fmt" + "strings" + "time" +) + +var ( + // ErrWaitStringEmpty is the error returned when wait is specified as an empty + // string. + ErrWaitStringEmpty = errors.New("wait: cannot be empty") + + // ErrWaitInvalidFormat is the error returned when the wait is specified + // incorrectly. + ErrWaitInvalidFormat = errors.New("wait: invalid format") + + // ErrWaitNegative is the error returned with the wait is negative. + ErrWaitNegative = errors.New("wait: cannot be negative") + + // ErrWaitMinLTMax is the error returned with the minimum wait time is not + // less than the maximum wait time. + ErrWaitMinLTMax = errors.New("wait: min must be less than max") +) + +// WaitConfig is the Min/Max duration used by the Watcher +type WaitConfig struct { + // Enabled determines if this wait is enabled. + Enabled *bool `mapstructure:"bool"` + + // Min and Max are the minimum and maximum time, respectively, to wait for + // data changes before rendering a new template to disk. + Min *time.Duration `mapstructure:"min"` + Max *time.Duration `mapstructure:"max"` +} + +// DefaultWaitConfig is the default configuration. +func DefaultWaitConfig() *WaitConfig { + return &WaitConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *WaitConfig) Copy() *WaitConfig { + if c == nil { + return nil + } + + var o WaitConfig + o.Enabled = c.Enabled + o.Min = c.Min + o.Max = c.Max + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *WaitConfig) Merge(o *WaitConfig) *WaitConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Min != nil { + r.Min = o.Min + } + + if o.Max != nil { + r.Max = o.Max + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *WaitConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(TimeDurationPresent(c.Min)) + } + + if c.Min == nil { + c.Min = TimeDuration(0 * time.Second) + } + + if c.Max == nil { + c.Max = TimeDuration(4 * *c.Min) + } +} + +// GoString defines the printable version of this struct. +func (c *WaitConfig) GoString() string { + if c == nil { + return "(*WaitConfig)(nil)" + } + + return fmt.Sprintf("&WaitConfig{"+ + "Enabled:%s, "+ + "Min:%s, "+ + "Max:%s"+ + "}", + BoolGoString(c.Enabled), + TimeDurationGoString(c.Min), + TimeDurationGoString(c.Max), + ) +} + +// ParseWaitConfig parses a string of the format `minimum(:maximum)` into a +// WaitConfig. +func ParseWaitConfig(s string) (*WaitConfig, error) { + s = strings.TrimSpace(s) + if len(s) < 1 { + return nil, ErrWaitStringEmpty + } + + parts := strings.Split(s, ":") + + var min, max time.Duration + var err error + + switch len(parts) { + case 1: + min, err = time.ParseDuration(strings.TrimSpace(parts[0])) + if err != nil { + return nil, err + } + + max = 4 * min + case 2: + min, err = time.ParseDuration(strings.TrimSpace(parts[0])) + if err != nil { + return nil, err + } + + max, err = time.ParseDuration(strings.TrimSpace(parts[1])) + if err != nil { + return nil, err + } + default: + return nil, ErrWaitInvalidFormat + } + + if min < 0 || max < 0 { + return nil, ErrWaitNegative + } + + if max < min { + return nil, ErrWaitMinLTMax + } + + var c WaitConfig + c.Min = TimeDuration(min) + c.Max = TimeDuration(max) + + return &c, nil +} + +// WaitVar implements the Flag.Value interface and allows the user to specify +// a watch interval using Go's flag parsing library. +type WaitVar WaitConfig + +// Set sets the value in the format min[:max] for a wait timer. +func (w *WaitVar) Set(value string) error { + wait, err := ParseWaitConfig(value) + if err != nil { + return err + } + + w.Min = wait.Min + w.Max = wait.Max + + return nil +} + +// String returns the string format for this wait variable +func (w *WaitVar) String() string { + return fmt.Sprintf("%s:%s", w.Min, w.Max) +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go new file mode 100644 index 000000000000..a78d33cf2365 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go @@ -0,0 +1,112 @@ +package dependency + +import ( + "log" + "net/url" + "sort" + "time" + + "github.com/hashicorp/consul/api" + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogDatacentersQuery)(nil) + + // CatalogDatacentersQuerySleepTime is the amount of time to sleep between + // queries, since the endpoint does not support blocking queries. + CatalogDatacentersQuerySleepTime = 15 * time.Second +) + +// CatalogDatacentersQuery is the dependency to query all datacenters +type CatalogDatacentersQuery struct { + ignoreFailing bool + + stopCh chan struct{} +} + +// NewCatalogDatacentersQuery creates a new datacenter dependency. +func NewCatalogDatacentersQuery(ignoreFailing bool) (*CatalogDatacentersQuery, error) { + return &CatalogDatacentersQuery{ + ignoreFailing: ignoreFailing, + stopCh: make(chan struct{}, 1), + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of strings representing the datacenters +func (d *CatalogDatacentersQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + opts = opts.Merge(&QueryOptions{}) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/datacenters", + RawQuery: opts.String(), + }) + + // This is pretty ghetto, but the datacenters endpoint does not support + // blocking queries, so we are going to "fake it until we make it". When we + // first query, the LastIndex will be "0", meaning we should immediately + // return data, but future calls will include a LastIndex. If we have a + // LastIndex in the query metadata, sleep for 15 seconds before asking Consul + // again. + // + // This is probably okay given the frequency in which datacenters actually + // change, but is technically not edge-triggering. + if opts.WaitIndex != 0 { + log.Printf("[TRACE] %s: long polling for %s", d, CatalogDatacentersQuerySleepTime) + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(CatalogDatacentersQuerySleepTime): + } + } + + result, err := clients.Consul().Catalog().Datacenters() + if err != nil { + return nil, nil, errors.Wrapf(err, d.String()) + } + + // If the user opted in for skipping "down" datacenters, figure out which + // datacenters are down. + if d.ignoreFailing { + dcs := make([]string, 0, len(result)) + for _, dc := range result { + if _, _, err := clients.Consul().Catalog().Services(&api.QueryOptions{ + Datacenter: dc, + AllowStale: false, + RequireConsistent: true, + }); err == nil { + dcs = append(dcs, dc) + } + } + result = dcs + } + + log.Printf("[TRACE] %s: returned %d results", d, len(result)) + + sort.Strings(result) + + return respWithMetadata(result) +} + +// CanShare returns if this dependency is shareable. +func (d *CatalogDatacentersQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogDatacentersQuery) String() string { + return "catalog.datacenters" +} + +// Stop terminates this dependency's fetch. +func (d *CatalogDatacentersQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogDatacentersQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go new file mode 100644 index 000000000000..12ef7633dd08 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go @@ -0,0 +1,181 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "sort" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogNodeQuery)(nil) + + // CatalogNodeQueryRe is the regular expression to use. + CatalogNodeQueryRe = regexp.MustCompile(`\A` + nodeNameRe + dcRe + `\z`) +) + +func init() { + gob.Register([]*CatalogNode{}) + gob.Register([]*CatalogNodeService{}) +} + +// CatalogNodeQuery represents a single node from the Consul catalog. +type CatalogNodeQuery struct { + stopCh chan struct{} + + dc string + name string +} + +// CatalogNode is a wrapper around the node and its services. +type CatalogNode struct { + Node *Node + Services []*CatalogNodeService +} + +// CatalogNodeService is a service on a single node. +type CatalogNodeService struct { + ID string + Service string + Tags ServiceTags + Meta map[string]string + Port int + Address string + EnableTagOverride bool +} + +// NewCatalogNodeQuery parses the given string into a dependency. If the name is +// empty then the name of the local agent is used. +func NewCatalogNodeQuery(s string) (*CatalogNodeQuery, error) { + if s != "" && !CatalogNodeQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.node: invalid format: %q", s) + } + + m := regexpMatch(CatalogNodeQueryRe, s) + return &CatalogNodeQuery{ + dc: m["dc"], + name: m["name"], + stopCh: make(chan struct{}, 1), + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a +// of CatalogNode object. +func (d *CatalogNodeQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + // Grab the name + name := d.name + + if name == "" { + log.Printf("[TRACE] %s: getting local agent name", d) + var err error + name, err = clients.Consul().Agent().NodeName() + if err != nil { + return nil, nil, errors.Wrapf(err, d.String()) + } + } + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/node/" + name, + RawQuery: opts.String(), + }) + node, qm, err := clients.Consul().Catalog().Node(name, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned response", d) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + if node == nil { + log.Printf("[WARN] %s: no node exists with the name %q", d, name) + var node CatalogNode + return &node, rm, nil + } + + services := make([]*CatalogNodeService, 0, len(node.Services)) + for _, v := range node.Services { + services = append(services, &CatalogNodeService{ + ID: v.ID, + Service: v.Service, + Tags: ServiceTags(deepCopyAndSortTags(v.Tags)), + Meta: v.Meta, + Port: v.Port, + Address: v.Address, + EnableTagOverride: v.EnableTagOverride, + }) + } + sort.Stable(ByService(services)) + + detail := &CatalogNode{ + Node: &Node{ + ID: node.Node.ID, + Node: node.Node.Node, + Address: node.Node.Address, + Datacenter: node.Node.Datacenter, + TaggedAddresses: node.Node.TaggedAddresses, + Meta: node.Node.Meta, + }, + Services: services, + } + + return detail, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogNodeQuery) CanShare() bool { + return false +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogNodeQuery) String() string { + name := d.name + if d.dc != "" { + name = name + "@" + d.dc + } + + if name == "" { + return "catalog.node" + } + return fmt.Sprintf("catalog.node(%s)", name) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogNodeQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogNodeQuery) Type() Type { + return TypeConsul +} + +// ByService is a sorter of node services by their service name and then ID. +type ByService []*CatalogNodeService + +func (s ByService) Len() int { return len(s) } +func (s ByService) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByService) Less(i, j int) bool { + if s[i].Service == s[j].Service { + return s[i].ID <= s[j].ID + } + return s[i].Service <= s[j].Service +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go new file mode 100644 index 000000000000..d570cf0fac3d --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go @@ -0,0 +1,150 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "sort" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogNodesQuery)(nil) + + // CatalogNodesQueryRe is the regular expression to use. + CatalogNodesQueryRe = regexp.MustCompile(`\A` + dcRe + nearRe + `\z`) +) + +func init() { + gob.Register([]*Node{}) +} + +// Node is a node entry in Consul +type Node struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + Meta map[string]string +} + +// CatalogNodesQuery is the representation of all registered nodes in Consul. +type CatalogNodesQuery struct { + stopCh chan struct{} + + dc string + near string +} + +// NewCatalogNodesQuery parses the given string into a dependency. If the name is +// empty then the name of the local agent is used. +func NewCatalogNodesQuery(s string) (*CatalogNodesQuery, error) { + if !CatalogNodesQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.nodes: invalid format: %q", s) + } + + m := regexpMatch(CatalogNodesQueryRe, s) + return &CatalogNodesQuery{ + dc: m["dc"], + near: m["near"], + stopCh: make(chan struct{}, 1), + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of Node objects +func (d *CatalogNodesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + Near: d.near, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/nodes", + RawQuery: opts.String(), + }) + n, qm, err := clients.Consul().Catalog().Nodes(opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(n)) + + nodes := make([]*Node, 0, len(n)) + for _, node := range n { + nodes = append(nodes, &Node{ + ID: node.ID, + Node: node.Node, + Address: node.Address, + Datacenter: node.Datacenter, + TaggedAddresses: node.TaggedAddresses, + Meta: node.Meta, + }) + } + + // Sort unless the user explicitly asked for nearness + if d.near == "" { + sort.Stable(ByNode(nodes)) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return nodes, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogNodesQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogNodesQuery) String() string { + name := "" + if d.dc != "" { + name = name + "@" + d.dc + } + if d.near != "" { + name = name + "~" + d.near + } + + if name == "" { + return "catalog.nodes" + } + return fmt.Sprintf("catalog.nodes(%s)", name) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogNodesQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogNodesQuery) Type() Type { + return TypeConsul +} + +// ByNode is a sortable list of nodes by name and then IP address. +type ByNode []*Node + +func (s ByNode) Len() int { return len(s) } +func (s ByNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByNode) Less(i, j int) bool { + if s[i].Node == s[j].Node { + return s[i].Address <= s[j].Address + } + return s[i].Node <= s[j].Node +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go new file mode 100644 index 000000000000..8b94a59961db --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go @@ -0,0 +1,154 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogServiceQuery)(nil) + + // CatalogServiceQueryRe is the regular expression to use. + CatalogServiceQueryRe = regexp.MustCompile(`\A` + tagRe + serviceNameRe + dcRe + nearRe + `\z`) +) + +func init() { + gob.Register([]*CatalogSnippet{}) +} + +// CatalogService is a catalog entry in Consul. +type CatalogService struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags ServiceTags + ServiceMeta map[string]string + ServicePort int +} + +// CatalogServiceQuery is the representation of a requested catalog services +// dependency from inside a template. +type CatalogServiceQuery struct { + stopCh chan struct{} + + dc string + name string + near string + tag string +} + +// NewCatalogServiceQuery parses a string into a CatalogServiceQuery. +func NewCatalogServiceQuery(s string) (*CatalogServiceQuery, error) { + if !CatalogServiceQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.service: invalid format: %q", s) + } + + m := regexpMatch(CatalogServiceQueryRe, s) + return &CatalogServiceQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + name: m["name"], + near: m["near"], + tag: m["tag"], + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of CatalogService objects. +func (d *CatalogServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + Near: d.near, + }) + + u := &url.URL{ + Path: "/v1/catalog/service/" + d.name, + RawQuery: opts.String(), + } + if d.tag != "" { + q := u.Query() + q.Set("tag", d.tag) + u.RawQuery = q.Encode() + } + log.Printf("[TRACE] %s: GET %s", d, u) + + entries, qm, err := clients.Consul().Catalog().Service(d.name, d.tag, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(entries)) + + var list []*CatalogService + for _, s := range entries { + list = append(list, &CatalogService{ + ID: s.ID, + Node: s.Node, + Address: s.Address, + Datacenter: s.Datacenter, + TaggedAddresses: s.TaggedAddresses, + NodeMeta: s.NodeMeta, + ServiceID: s.ServiceID, + ServiceName: s.ServiceName, + ServiceAddress: s.ServiceAddress, + ServiceTags: ServiceTags(deepCopyAndSortTags(s.ServiceTags)), + ServiceMeta: s.ServiceMeta, + ServicePort: s.ServicePort, + }) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return list, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogServiceQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogServiceQuery) String() string { + name := d.name + if d.tag != "" { + name = d.tag + "." + name + } + if d.dc != "" { + name = name + "@" + d.dc + } + if d.near != "" { + name = name + "~" + d.near + } + return fmt.Sprintf("catalog.service(%s)", name) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogServiceQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogServiceQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go new file mode 100644 index 000000000000..06ce03a77be4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go @@ -0,0 +1,129 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "sort" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogServicesQuery)(nil) + + // CatalogServicesQueryRe is the regular expression to use for CatalogNodesQuery. + CatalogServicesQueryRe = regexp.MustCompile(`\A` + dcRe + `\z`) +) + +func init() { + gob.Register([]*CatalogSnippet{}) +} + +// CatalogSnippet is a catalog entry in Consul. +type CatalogSnippet struct { + Name string + Tags ServiceTags +} + +// CatalogServicesQuery is the representation of a requested catalog service +// dependency from inside a template. +type CatalogServicesQuery struct { + stopCh chan struct{} + + dc string +} + +// NewCatalogServicesQuery parses a string of the format @dc. +func NewCatalogServicesQuery(s string) (*CatalogServicesQuery, error) { + if !CatalogServicesQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.services: invalid format: %q", s) + } + + m := regexpMatch(CatalogServicesQueryRe, s) + return &CatalogServicesQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of CatalogService objects. +func (d *CatalogServicesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/services", + RawQuery: opts.String(), + }) + + entries, qm, err := clients.Consul().Catalog().Services(opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(entries)) + + var catalogServices []*CatalogSnippet + for name, tags := range entries { + catalogServices = append(catalogServices, &CatalogSnippet{ + Name: name, + Tags: ServiceTags(deepCopyAndSortTags(tags)), + }) + } + + sort.Stable(ByName(catalogServices)) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return catalogServices, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogServicesQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogServicesQuery) String() string { + if d.dc != "" { + return fmt.Sprintf("catalog.services(@%s)", d.dc) + } + return "catalog.services" +} + +// Stop halts the dependency's fetch function. +func (d *CatalogServicesQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogServicesQuery) Type() Type { + return TypeConsul +} + +// ByName is a sortable slice of CatalogService structs. +type ByName []*CatalogSnippet + +func (s ByName) Len() int { return len(s) } +func (s ByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByName) Less(i, j int) bool { + if s[i].Name <= s[j].Name { + return true + } + return false +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/client_set.go b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go new file mode 100644 index 000000000000..e2bceb773820 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go @@ -0,0 +1,338 @@ +package dependency + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "net/http" + "sync" + "time" + + consulapi "github.com/hashicorp/consul/api" + rootcerts "github.com/hashicorp/go-rootcerts" + vaultapi "github.com/hashicorp/vault/api" +) + +// ClientSet is a collection of clients that dependencies use to communicate +// with remote services like Consul or Vault. +type ClientSet struct { + sync.RWMutex + + vault *vaultClient + consul *consulClient +} + +// consulClient is a wrapper around a real Consul API client. +type consulClient struct { + client *consulapi.Client + transport *http.Transport +} + +// vaultClient is a wrapper around a real Vault API client. +type vaultClient struct { + client *vaultapi.Client + httpClient *http.Client +} + +// CreateConsulClientInput is used as input to the CreateConsulClient function. +type CreateConsulClientInput struct { + Address string + Token string + AuthEnabled bool + AuthUsername string + AuthPassword string + SSLEnabled bool + SSLVerify bool + SSLCert string + SSLKey string + SSLCACert string + SSLCAPath string + ServerName string + + TransportDialKeepAlive time.Duration + TransportDialTimeout time.Duration + TransportDisableKeepAlives bool + TransportIdleConnTimeout time.Duration + TransportMaxIdleConns int + TransportMaxIdleConnsPerHost int + TransportTLSHandshakeTimeout time.Duration +} + +// CreateVaultClientInput is used as input to the CreateVaultClient function. +type CreateVaultClientInput struct { + Address string + Namespace string + Token string + UnwrapToken bool + SSLEnabled bool + SSLVerify bool + SSLCert string + SSLKey string + SSLCACert string + SSLCAPath string + ServerName string + + TransportDialKeepAlive time.Duration + TransportDialTimeout time.Duration + TransportDisableKeepAlives bool + TransportIdleConnTimeout time.Duration + TransportMaxIdleConns int + TransportMaxIdleConnsPerHost int + TransportTLSHandshakeTimeout time.Duration +} + +// NewClientSet creates a new client set that is ready to accept clients. +func NewClientSet() *ClientSet { + return &ClientSet{} +} + +// CreateConsulClient creates a new Consul API client from the given input. +func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { + consulConfig := consulapi.DefaultConfig() + + if i.Address != "" { + consulConfig.Address = i.Address + } + + if i.Token != "" { + consulConfig.Token = i.Token + } + + if i.AuthEnabled { + consulConfig.HttpAuth = &consulapi.HttpBasicAuth{ + Username: i.AuthUsername, + Password: i.AuthPassword, + } + } + + // This transport will attempt to keep connections open to the Consul server. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: i.TransportDialTimeout, + KeepAlive: i.TransportDialKeepAlive, + }).Dial, + DisableKeepAlives: i.TransportDisableKeepAlives, + MaxIdleConns: i.TransportMaxIdleConns, + IdleConnTimeout: i.TransportIdleConnTimeout, + MaxIdleConnsPerHost: i.TransportMaxIdleConnsPerHost, + TLSHandshakeTimeout: i.TransportTLSHandshakeTimeout, + } + + // Configure SSL + if i.SSLEnabled { + consulConfig.Scheme = "https" + + var tlsConfig tls.Config + + // Custom certificate or certificate and key + if i.SSLCert != "" && i.SSLKey != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } else if i.SSLCert != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + // Custom CA certificate + if i.SSLCACert != "" || i.SSLCAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: i.SSLCACert, + CAPath: i.SSLCAPath, + } + if err := rootcerts.ConfigureTLS(&tlsConfig, rootConfig); err != nil { + return fmt.Errorf("client set: consul configuring TLS failed: %s", err) + } + } + + // Construct all the certificates now + tlsConfig.BuildNameToCertificate() + + // SSL verification + if i.ServerName != "" { + tlsConfig.ServerName = i.ServerName + tlsConfig.InsecureSkipVerify = false + } + if !i.SSLVerify { + log.Printf("[WARN] (clients) disabling consul SSL verification") + tlsConfig.InsecureSkipVerify = true + } + + // Save the TLS config on our transport + transport.TLSClientConfig = &tlsConfig + } + + // Setup the new transport + consulConfig.Transport = transport + + // Create the API client + client, err := consulapi.NewClient(consulConfig) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + + // Save the data on ourselves + c.Lock() + c.consul = &consulClient{ + client: client, + transport: transport, + } + c.Unlock() + + return nil +} + +func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error { + vaultConfig := vaultapi.DefaultConfig() + + if i.Address != "" { + vaultConfig.Address = i.Address + } + + // This transport will attempt to keep connections open to the Vault server. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: i.TransportDialTimeout, + KeepAlive: i.TransportDialKeepAlive, + }).Dial, + DisableKeepAlives: i.TransportDisableKeepAlives, + MaxIdleConns: i.TransportMaxIdleConns, + IdleConnTimeout: i.TransportIdleConnTimeout, + MaxIdleConnsPerHost: i.TransportMaxIdleConnsPerHost, + TLSHandshakeTimeout: i.TransportTLSHandshakeTimeout, + } + + // Configure SSL + if i.SSLEnabled { + var tlsConfig tls.Config + + // Custom certificate or certificate and key + if i.SSLCert != "" && i.SSLKey != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } else if i.SSLCert != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + // Custom CA certificate + if i.SSLCACert != "" || i.SSLCAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: i.SSLCACert, + CAPath: i.SSLCAPath, + } + if err := rootcerts.ConfigureTLS(&tlsConfig, rootConfig); err != nil { + return fmt.Errorf("client set: vault configuring TLS failed: %s", err) + } + } + + // Construct all the certificates now + tlsConfig.BuildNameToCertificate() + + // SSL verification + if i.ServerName != "" { + tlsConfig.ServerName = i.ServerName + tlsConfig.InsecureSkipVerify = false + } + if !i.SSLVerify { + log.Printf("[WARN] (clients) disabling vault SSL verification") + tlsConfig.InsecureSkipVerify = true + } + + // Save the TLS config on our transport + transport.TLSClientConfig = &tlsConfig + } + + // Setup the new transport + vaultConfig.HttpClient.Transport = transport + + // Create the client + client, err := vaultapi.NewClient(vaultConfig) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + + // Set the namespace if given. + if i.Namespace != "" { + client.SetNamespace(i.Namespace) + } + + // Set the token if given + if i.Token != "" { + client.SetToken(i.Token) + } + + // Check if we are unwrapping + if i.UnwrapToken { + secret, err := client.Logical().Unwrap(i.Token) + if err != nil { + return fmt.Errorf("client set: vault unwrap: %s", err) + } + + if secret == nil { + return fmt.Errorf("client set: vault unwrap: no secret") + } + + if secret.Auth == nil { + return fmt.Errorf("client set: vault unwrap: no secret auth") + } + + if secret.Auth.ClientToken == "" { + return fmt.Errorf("client set: vault unwrap: no token returned") + } + + client.SetToken(secret.Auth.ClientToken) + } + + // Save the data on ourselves + c.Lock() + c.vault = &vaultClient{ + client: client, + httpClient: vaultConfig.HttpClient, + } + c.Unlock() + + return nil +} + +// Consul returns the Consul client for this set. +func (c *ClientSet) Consul() *consulapi.Client { + c.RLock() + defer c.RUnlock() + return c.consul.client +} + +// Vault returns the Vault client for this set. +func (c *ClientSet) Vault() *vaultapi.Client { + c.RLock() + defer c.RUnlock() + return c.vault.client +} + +// Stop closes all idle connections for any attached clients. +func (c *ClientSet) Stop() { + c.Lock() + defer c.Unlock() + + if c.consul != nil { + c.consul.transport.CloseIdleConnections() + } + + if c.vault != nil { + c.vault.httpClient.Transport.(*http.Transport).CloseIdleConnections() + } +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/dependency.go b/vendor/github.com/hashicorp/consul-template/dependency/dependency.go new file mode 100644 index 000000000000..c9161f82f498 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/dependency.go @@ -0,0 +1,189 @@ +package dependency + +import ( + "net/url" + "regexp" + "sort" + "strconv" + "time" + + consulapi "github.com/hashicorp/consul/api" +) + +const ( + dcRe = `(@(?P[[:word:]\.\-\_]+))?` + keyRe = `/?(?P[^@]+)` + filterRe = `(\|(?P[[:word:]\,]+))?` + serviceNameRe = `(?P[[:word:]\-\_]+)` + nodeNameRe = `(?P[[:word:]\.\-\_]+)` + nearRe = `(~(?P[[:word:]\.\-\_]+))?` + prefixRe = `/?(?P[^@]+)` + tagRe = `((?P[[:word:]=:\.\-\_]+)\.)?` +) + +type Type int + +const ( + TypeConsul Type = iota + TypeVault + TypeLocal +) + +// Dependency is an interface for a dependency that Consul Template is capable +// of watching. +type Dependency interface { + Fetch(*ClientSet, *QueryOptions) (interface{}, *ResponseMetadata, error) + CanShare() bool + String() string + Stop() + Type() Type +} + +// ServiceTags is a slice of tags assigned to a Service +type ServiceTags []string + +// QueryOptions is a list of options to send with the query. These options are +// client-agnostic, and the dependency determines which, if any, of the options +// to use. +type QueryOptions struct { + AllowStale bool + Datacenter string + Near string + RequireConsistent bool + VaultGrace time.Duration + WaitIndex uint64 + WaitTime time.Duration +} + +func (q *QueryOptions) Merge(o *QueryOptions) *QueryOptions { + var r QueryOptions + + if q == nil { + if o == nil { + return &QueryOptions{} + } + r = *o + return &r + } + + r = *q + + if o == nil { + return &r + } + + if o.AllowStale != false { + r.AllowStale = o.AllowStale + } + + if o.Datacenter != "" { + r.Datacenter = o.Datacenter + } + + if o.Near != "" { + r.Near = o.Near + } + + if o.RequireConsistent != false { + r.RequireConsistent = o.RequireConsistent + } + + if o.WaitIndex != 0 { + r.WaitIndex = o.WaitIndex + } + + if o.WaitTime != 0 { + r.WaitTime = o.WaitTime + } + + return &r +} + +func (q *QueryOptions) ToConsulOpts() *consulapi.QueryOptions { + return &consulapi.QueryOptions{ + AllowStale: q.AllowStale, + Datacenter: q.Datacenter, + Near: q.Near, + RequireConsistent: q.RequireConsistent, + WaitIndex: q.WaitIndex, + WaitTime: q.WaitTime, + } +} + +func (q *QueryOptions) String() string { + u := &url.Values{} + + if q.AllowStale { + u.Add("stale", strconv.FormatBool(q.AllowStale)) + } + + if q.Datacenter != "" { + u.Add("dc", q.Datacenter) + } + + if q.Near != "" { + u.Add("near", q.Near) + } + + if q.RequireConsistent { + u.Add("consistent", strconv.FormatBool(q.RequireConsistent)) + } + + if q.WaitIndex != 0 { + u.Add("index", strconv.FormatUint(q.WaitIndex, 10)) + } + + if q.WaitTime != 0 { + u.Add("wait", q.WaitTime.String()) + } + + return u.Encode() +} + +// ResponseMetadata is a struct that contains metadata about the response. This +// is returned from a Fetch function call. +type ResponseMetadata struct { + LastIndex uint64 + LastContact time.Duration + Block bool +} + +// deepCopyAndSortTags deep copies the tags in the given string slice and then +// sorts and returns the copied result. +func deepCopyAndSortTags(tags []string) []string { + newTags := make([]string, 0, len(tags)) + for _, tag := range tags { + newTags = append(newTags, tag) + } + sort.Strings(newTags) + return newTags +} + +// respWithMetadata is a short wrapper to return the given interface with fake +// response metadata for non-Consul dependencies. +func respWithMetadata(i interface{}) (interface{}, *ResponseMetadata, error) { + return i, &ResponseMetadata{ + LastContact: 0, + LastIndex: uint64(time.Now().Unix()), + }, nil +} + +// regexpMatch matches the given regexp and extracts the match groups into a +// named map. +func regexpMatch(re *regexp.Regexp, q string) map[string]string { + names := re.SubexpNames() + match := re.FindAllStringSubmatch(q, -1) + + if len(match) == 0 { + return map[string]string{} + } + + m := map[string]string{} + for i, n := range match[0] { + if names[i] != "" { + m[names[i]] = n + } + } + + return m +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/errors.go b/vendor/github.com/hashicorp/consul-template/dependency/errors.go new file mode 100644 index 000000000000..dd03ac877157 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/errors.go @@ -0,0 +1,13 @@ +package dependency + +import "errors" + +// ErrStopped is a special error that is returned when a dependency is +// prematurely stopped, usually due to a configuration reload or a process +// interrupt. +var ErrStopped = errors.New("dependency stopped") + +// ErrContinue is a special error which says to continue (retry) on error. +var ErrContinue = errors.New("dependency continue") + +var ErrLeaseExpired = errors.New("lease expired or is not renewable") diff --git a/vendor/github.com/hashicorp/consul-template/dependency/file.go b/vendor/github.com/hashicorp/consul-template/dependency/file.go new file mode 100644 index 000000000000..3f9fb52e8d19 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/file.go @@ -0,0 +1,129 @@ +package dependency + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*FileQuery)(nil) + + // FileQuerySleepTime is the amount of time to sleep between queries, since + // the fsnotify library is not compatible with solaris and other OSes yet. + FileQuerySleepTime = 2 * time.Second +) + +// FileQuery represents a local file dependency. +type FileQuery struct { + stopCh chan struct{} + + path string + stat os.FileInfo +} + +// NewFileQuery creates a file dependency from the given path. +func NewFileQuery(s string) (*FileQuery, error) { + s = strings.TrimSpace(s) + if s == "" { + return nil, fmt.Errorf("file: invalid format: %q", s) + } + + return &FileQuery{ + stopCh: make(chan struct{}, 1), + path: s, + }, nil +} + +// Fetch retrieves this dependency and returns the result or any errors that +// occur in the process. +func (d *FileQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + log.Printf("[TRACE] %s: READ %s", d, d.path) + + select { + case <-d.stopCh: + log.Printf("[TRACE] %s: stopped", d) + return "", nil, ErrStopped + case r := <-d.watch(d.stat): + if r.err != nil { + return "", nil, errors.Wrap(r.err, d.String()) + } + + log.Printf("[TRACE] %s: reported change", d) + + data, err := ioutil.ReadFile(d.path) + if err != nil { + return "", nil, errors.Wrap(err, d.String()) + } + + d.stat = r.stat + return respWithMetadata(string(data)) + } +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *FileQuery) CanShare() bool { + return false +} + +// Stop halts the dependency's fetch function. +func (d *FileQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *FileQuery) String() string { + return fmt.Sprintf("file(%s)", d.path) +} + +// Type returns the type of this dependency. +func (d *FileQuery) Type() Type { + return TypeLocal +} + +type watchResult struct { + stat os.FileInfo + err error +} + +// watch watchers the file for changes +func (d *FileQuery) watch(lastStat os.FileInfo) <-chan *watchResult { + ch := make(chan *watchResult, 1) + + go func(lastStat os.FileInfo) { + for { + stat, err := os.Stat(d.path) + if err != nil { + select { + case <-d.stopCh: + return + case ch <- &watchResult{err: err}: + return + } + } + + changed := lastStat == nil || + lastStat.Size() != stat.Size() || + lastStat.ModTime() != stat.ModTime() + + if changed { + select { + case <-d.stopCh: + return + case ch <- &watchResult{stat: stat}: + return + } + } + + time.Sleep(FileQuerySleepTime) + } + }(lastStat) + + return ch +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/health_service.go b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go new file mode 100644 index 000000000000..215f53d0d2a2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go @@ -0,0 +1,248 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "sort" + "strings" + + "github.com/hashicorp/consul/api" + "github.com/pkg/errors" +) + +const ( + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" + + NodeMaint = "_node_maintenance" + ServiceMaint = "_service_maintenance:" +) + +var ( + // Ensure implements + _ Dependency = (*HealthServiceQuery)(nil) + + // HealthServiceQueryRe is the regular expression to use. + HealthServiceQueryRe = regexp.MustCompile(`\A` + tagRe + serviceNameRe + dcRe + nearRe + filterRe + `\z`) +) + +func init() { + gob.Register([]*HealthService{}) +} + +// HealthService is a service entry in Consul. +type HealthService struct { + Node string + NodeID string + NodeAddress string + NodeTaggedAddresses map[string]string + NodeMeta map[string]string + ServiceMeta map[string]string + Address string + ID string + Name string + Tags ServiceTags + Checks api.HealthChecks + Status string + Port int +} + +// HealthServiceQuery is the representation of all a service query in Consul. +type HealthServiceQuery struct { + stopCh chan struct{} + + dc string + filters []string + name string + near string + tag string +} + +// NewHealthServiceQuery processes the strings to build a service dependency. +func NewHealthServiceQuery(s string) (*HealthServiceQuery, error) { + if !HealthServiceQueryRe.MatchString(s) { + return nil, fmt.Errorf("health.service: invalid format: %q", s) + } + + m := regexpMatch(HealthServiceQueryRe, s) + + var filters []string + if filter := m["filter"]; filter != "" { + split := strings.Split(filter, ",") + for _, f := range split { + f = strings.TrimSpace(f) + switch f { + case HealthAny, + HealthPassing, + HealthWarning, + HealthCritical, + HealthMaint: + filters = append(filters, f) + case "": + default: + return nil, fmt.Errorf("health.service: invalid filter: %q in %q", f, s) + } + } + sort.Strings(filters) + } else { + filters = []string{HealthPassing} + } + + return &HealthServiceQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + filters: filters, + name: m["name"], + near: m["near"], + tag: m["tag"], + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of HealthService objects. +func (d *HealthServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + Near: d.near, + }) + + u := &url.URL{ + Path: "/v1/health/service/" + d.name, + RawQuery: opts.String(), + } + if d.tag != "" { + q := u.Query() + q.Set("tag", d.tag) + u.RawQuery = q.Encode() + } + log.Printf("[TRACE] %s: GET %s", d, u) + + // Check if a user-supplied filter was given. If so, we may be querying for + // more than healthy services, so we need to implement client-side filtering. + passingOnly := len(d.filters) == 1 && d.filters[0] == HealthPassing + + entries, qm, err := clients.Consul().Health().Service(d.name, d.tag, passingOnly, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(entries)) + + list := make([]*HealthService, 0, len(entries)) + for _, entry := range entries { + // Get the status of this service from its checks. + status := entry.Checks.AggregatedStatus() + + // If we are not checking only healthy services, filter out services that do + // not match the given filter. + if !acceptStatus(d.filters, status) { + continue + } + + // Get the address of the service, falling back to the address of the node. + address := entry.Service.Address + if address == "" { + address = entry.Node.Address + } + + list = append(list, &HealthService{ + Node: entry.Node.Node, + NodeID: entry.Node.ID, + NodeAddress: entry.Node.Address, + NodeTaggedAddresses: entry.Node.TaggedAddresses, + NodeMeta: entry.Node.Meta, + ServiceMeta: entry.Service.Meta, + Address: address, + ID: entry.Service.ID, + Name: entry.Service.Service, + Tags: ServiceTags(deepCopyAndSortTags(entry.Service.Tags)), + Status: status, + Checks: entry.Checks, + Port: entry.Service.Port, + }) + } + + log.Printf("[TRACE] %s: returned %d results after filtering", d, len(list)) + + // Sort unless the user explicitly asked for nearness + if d.near == "" { + sort.Stable(ByNodeThenID(list)) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return list, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *HealthServiceQuery) CanShare() bool { + return true +} + +// Stop halts the dependency's fetch function. +func (d *HealthServiceQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *HealthServiceQuery) String() string { + name := d.name + if d.tag != "" { + name = d.tag + "." + name + } + if d.dc != "" { + name = name + "@" + d.dc + } + if d.near != "" { + name = name + "~" + d.near + } + if len(d.filters) > 0 { + name = name + "|" + strings.Join(d.filters, ",") + } + return fmt.Sprintf("health.service(%s)", name) +} + +// Type returns the type of this dependency. +func (d *HealthServiceQuery) Type() Type { + return TypeConsul +} + +// acceptStatus allows us to check if a slice of health checks pass this filter. +func acceptStatus(list []string, s string) bool { + for _, status := range list { + if status == s || status == HealthAny { + return true + } + } + return false +} + +// ByNodeThenID is a sortable slice of Service +type ByNodeThenID []*HealthService + +// Len, Swap, and Less are used to implement the sort.Sort interface. +func (s ByNodeThenID) Len() int { return len(s) } +func (s ByNodeThenID) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByNodeThenID) Less(i, j int) bool { + if s[i].Node < s[j].Node { + return true + } else if s[i].Node == s[j].Node { + return s[i].ID <= s[j].ID + } + return false +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go new file mode 100644 index 000000000000..a075ea5dfcb5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go @@ -0,0 +1,112 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "regexp" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*KVGetQuery)(nil) + + // KVGetQueryRe is the regular expression to use. + KVGetQueryRe = regexp.MustCompile(`\A` + keyRe + dcRe + `\z`) +) + +// KVGetQuery queries the KV store for a single key. +type KVGetQuery struct { + stopCh chan struct{} + + dc string + key string + block bool +} + +// NewKVGetQuery parses a string into a dependency. +func NewKVGetQuery(s string) (*KVGetQuery, error) { + if s != "" && !KVGetQueryRe.MatchString(s) { + return nil, fmt.Errorf("kv.get: invalid format: %q", s) + } + + m := regexpMatch(KVGetQueryRe, s) + return &KVGetQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + key: m["key"], + }, nil +} + +// Fetch queries the Consul API defined by the given client. +func (d *KVGetQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/kv/" + d.key, + RawQuery: opts.String(), + }) + + pair, qm, err := clients.Consul().KV().Get(d.key, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + Block: d.block, + } + + if pair == nil { + log.Printf("[TRACE] %s: returned nil", d) + return nil, rm, nil + } + + value := string(pair.Value) + log.Printf("[TRACE] %s: returned %q", d, value) + return value, rm, nil +} + +// EnableBlocking turns this into a blocking KV query. +func (d *KVGetQuery) EnableBlocking() { + d.block = true +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *KVGetQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *KVGetQuery) String() string { + key := d.key + if d.dc != "" { + key = key + "@" + d.dc + } + + if d.block { + return fmt.Sprintf("kv.block(%s)", key) + } + return fmt.Sprintf("kv.get(%s)", key) +} + +// Stop halts the dependency's fetch function. +func (d *KVGetQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *KVGetQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go new file mode 100644 index 000000000000..60e1ef7e4a1b --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go @@ -0,0 +1,104 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*KVKeysQuery)(nil) + + // KVKeysQueryRe is the regular expression to use. + KVKeysQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`) +) + +// KVKeysQuery queries the KV store for a single key. +type KVKeysQuery struct { + stopCh chan struct{} + + dc string + prefix string +} + +// NewKVKeysQuery parses a string into a dependency. +func NewKVKeysQuery(s string) (*KVKeysQuery, error) { + if s != "" && !KVKeysQueryRe.MatchString(s) { + return nil, fmt.Errorf("kv.keys: invalid format: %q", s) + } + + m := regexpMatch(KVKeysQueryRe, s) + return &KVKeysQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + prefix: m["prefix"], + }, nil +} + +// Fetch queries the Consul API defined by the given client. +func (d *KVKeysQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/kv/" + d.prefix, + RawQuery: opts.String(), + }) + + list, qm, err := clients.Consul().KV().Keys(d.prefix, "", opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + keys := make([]string, len(list)) + for i, v := range list { + v = strings.TrimPrefix(v, d.prefix) + v = strings.TrimLeft(v, "/") + keys[i] = v + } + + log.Printf("[TRACE] %s: returned %d results", d, len(list)) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return keys, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *KVKeysQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *KVKeysQuery) String() string { + prefix := d.prefix + if d.dc != "" { + prefix = prefix + "@" + d.dc + } + return fmt.Sprintf("kv.keys(%s)", prefix) +} + +// Stop halts the dependency's fetch function. +func (d *KVKeysQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *KVKeysQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go new file mode 100644 index 000000000000..929dfa423c73 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go @@ -0,0 +1,133 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*KVListQuery)(nil) + + // KVListQueryRe is the regular expression to use. + KVListQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`) +) + +func init() { + gob.Register([]*KeyPair{}) +} + +// KeyPair is a simple Key-Value pair +type KeyPair struct { + Path string + Key string + Value string + + // Lesser-used, but still valuable keys from api.KV + CreateIndex uint64 + ModifyIndex uint64 + LockIndex uint64 + Flags uint64 + Session string +} + +// KVListQuery queries the KV store for a single key. +type KVListQuery struct { + stopCh chan struct{} + + dc string + prefix string +} + +// NewKVListQuery parses a string into a dependency. +func NewKVListQuery(s string) (*KVListQuery, error) { + if s != "" && !KVListQueryRe.MatchString(s) { + return nil, fmt.Errorf("kv.list: invalid format: %q", s) + } + + m := regexpMatch(KVListQueryRe, s) + return &KVListQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + prefix: m["prefix"], + }, nil +} + +// Fetch queries the Consul API defined by the given client. +func (d *KVListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/kv/" + d.prefix, + RawQuery: opts.String(), + }) + + list, qm, err := clients.Consul().KV().List(d.prefix, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d pairs", d, len(list)) + + pairs := make([]*KeyPair, 0, len(list)) + for _, pair := range list { + key := strings.TrimPrefix(pair.Key, d.prefix) + key = strings.TrimLeft(key, "/") + + pairs = append(pairs, &KeyPair{ + Path: pair.Key, + Key: key, + Value: string(pair.Value), + CreateIndex: pair.CreateIndex, + ModifyIndex: pair.ModifyIndex, + LockIndex: pair.LockIndex, + Flags: pair.Flags, + Session: pair.Session, + }) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return pairs, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *KVListQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *KVListQuery) String() string { + prefix := d.prefix + if d.dc != "" { + prefix = prefix + "@" + d.dc + } + return fmt.Sprintf("kv.list(%s)", prefix) +} + +// Stop halts the dependency's fetch function. +func (d *KVListQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *KVListQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/set.go b/vendor/github.com/hashicorp/consul-template/dependency/set.go new file mode 100644 index 000000000000..d3a5df3ab95f --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/set.go @@ -0,0 +1,72 @@ +package dependency + +import ( + "strings" + "sync" +) + +// Set is a dependency-specific set implementation. Relative ordering is +// preserved. +type Set struct { + once sync.Once + sync.RWMutex + list []string + set map[string]Dependency +} + +// Add adds a new element to the set if it does not already exist. +func (s *Set) Add(d Dependency) bool { + s.init() + s.Lock() + defer s.Unlock() + if _, ok := s.set[d.String()]; !ok { + s.list = append(s.list, d.String()) + s.set[d.String()] = d + return true + } + return false +} + +// Get retrieves a single element from the set by name. +func (s *Set) Get(v string) Dependency { + s.RLock() + defer s.RUnlock() + return s.set[v] +} + +// List returns the insertion-ordered list of dependencies. +func (s *Set) List() []Dependency { + s.RLock() + defer s.RUnlock() + r := make([]Dependency, len(s.list)) + for i, k := range s.list { + r[i] = s.set[k] + } + return r +} + +// Len is the size of the set. +func (s *Set) Len() int { + s.RLock() + defer s.RUnlock() + return len(s.list) +} + +// String is a string representation of the set. +func (s *Set) String() string { + s.RLock() + defer s.RUnlock() + return strings.Join(s.list, ", ") +} + +func (s *Set) init() { + s.once.Do(func() { + if s.list == nil { + s.list = make([]string, 0, 8) + } + + if s.set == nil { + s.set = make(map[string]Dependency) + } + }) +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go new file mode 100644 index 000000000000..1ce339ea6e25 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go @@ -0,0 +1,121 @@ +package dependency + +import ( + "io/ioutil" + "log" + "os" + "strings" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultAgentTokenQuery)(nil) +) + +const ( + // VaultAgentTokenSleepTime is the amount of time to sleep between queries, since + // the fsnotify library is not compatible with solaris and other OSes yet. + VaultAgentTokenSleepTime = 15 * time.Second +) + +// VaultAgentTokenQuery is the dependency to Vault Agent token +type VaultAgentTokenQuery struct { + stopCh chan struct{} + path string + stat os.FileInfo +} + +// NewVaultAgentTokenQuery creates a new dependency. +func NewVaultAgentTokenQuery(path string) (*VaultAgentTokenQuery, error) { + return &VaultAgentTokenQuery{ + stopCh: make(chan struct{}, 1), + path: path, + }, nil +} + +// Fetch retrieves this dependency and returns the result or any errors that +// occur in the process. +func (d *VaultAgentTokenQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + log.Printf("[TRACE] %s: READ %s", d, d.path) + + select { + case <-d.stopCh: + log.Printf("[TRACE] %s: stopped", d) + return "", nil, ErrStopped + case r := <-d.watch(d.stat): + if r.err != nil { + return "", nil, errors.Wrap(r.err, d.String()) + } + + log.Printf("[TRACE] %s: reported change", d) + + token, err := ioutil.ReadFile(d.path) + if err != nil { + return "", nil, errors.Wrap(err, d.String()) + } + + d.stat = r.stat + clients.Vault().SetToken(strings.TrimSpace(string(token))) + } + + return respWithMetadata("") +} + +// CanShare returns if this dependency is sharable. +func (d *VaultAgentTokenQuery) CanShare() bool { + return false +} + +// Stop halts the dependency's fetch function. +func (d *VaultAgentTokenQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultAgentTokenQuery) String() string { + return "vault-agent.token" +} + +// Type returns the type of this dependency. +func (d *VaultAgentTokenQuery) Type() Type { + return TypeVault +} + +// watch watches the file for changes +func (d *VaultAgentTokenQuery) watch(lastStat os.FileInfo) <-chan *watchResult { + ch := make(chan *watchResult, 1) + + go func(lastStat os.FileInfo) { + for { + stat, err := os.Stat(d.path) + if err != nil { + select { + case <-d.stopCh: + return + case ch <- &watchResult{err: err}: + return + } + } + + changed := lastStat == nil || + lastStat.Size() != stat.Size() || + lastStat.ModTime() != stat.ModTime() + + if changed { + select { + case <-d.stopCh: + return + case ch <- &watchResult{stat: stat}: + return + } + } + + time.Sleep(VaultAgentTokenSleepTime) + } + }(lastStat) + + return ch +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go new file mode 100644 index 000000000000..6abe69cfd142 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go @@ -0,0 +1,348 @@ +package dependency + +import ( + "log" + "math/rand" + "path" + "strings" + "time" + + "crypto/x509" + "encoding/pem" + + "github.com/hashicorp/vault/api" +) + +var ( + // VaultDefaultLeaseDuration is the default lease duration in seconds. + VaultDefaultLeaseDuration = 5 * time.Minute +) + +// Secret is the structure returned for every secret within Vault. +type Secret struct { + // The request ID that generated this response + RequestID string + + LeaseID string + LeaseDuration int + Renewable bool + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} + + // Warnings contains any warnings related to the operation. These + // are not issues that caused the command to fail, but that the + // client should be aware of. + Warnings []string + + // Auth, if non-nil, means that there was authentication information + // attached to this response. + Auth *SecretAuth + + // WrapInfo, if non-nil, means that the initial response was wrapped in the + // cubbyhole of the given token (which has a TTL of the given number of + // seconds) + WrapInfo *SecretWrapInfo +} + +// SecretAuth is the structure containing auth information if we have it. +type SecretAuth struct { + ClientToken string + Accessor string + Policies []string + Metadata map[string]string + + LeaseDuration int + Renewable bool +} + +// SecretWrapInfo contains wrapping information if we have it. If what is +// contained is an authentication token, the accessor for the token will be +// available in WrappedAccessor. +type SecretWrapInfo struct { + Token string + TTL int + CreationTime time.Time + WrappedAccessor string +} + +// +type renewer interface { + Dependency + stopChan() chan struct{} + secrets() (*Secret, *api.Secret) +} + +func renewSecret(clients *ClientSet, d renewer) error { + log.Printf("[TRACE] %s: starting renewer", d) + + secret, vaultSecret := d.secrets() + renewer, err := clients.Vault().NewRenewer(&api.RenewerInput{ + Secret: vaultSecret, + }) + if err != nil { + return err + } + go renewer.Renew() + defer renewer.Stop() + + for { + select { + case err := <-renewer.DoneCh(): + if err != nil { + log.Printf("[WARN] %s: failed to renew: %s", d, err) + } + log.Printf("[WARN] %s: renewer done (maybe the lease expired)", d) + return nil + case renewal := <-renewer.RenewCh(): + log.Printf("[TRACE] %s: successfully renewed", d) + printVaultWarnings(d, renewal.Secret.Warnings) + updateSecret(secret, renewal.Secret) + case <-d.stopChan(): + return ErrStopped + } + } +} + +// durationFrom cert gets the duration of validity from cert data and +// returns that value as an integer number of seconds +func durationFromCert(certData string) int { + block, _ := pem.Decode([]byte(certData)) + if block == nil { + return -1 + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + log.Printf("[WARN] Unable to parse certificate data: %s", err) + return -1 + } + + return int(cert.NotAfter.Sub(cert.NotBefore).Seconds()) +} + +// leaseCheckWait accepts a secret and returns the recommended amount of +// time to sleep. +func leaseCheckWait(s *Secret) time.Duration { + // Handle whether this is an auth or a regular secret. + base := s.LeaseDuration + if s.Auth != nil && s.Auth.LeaseDuration > 0 { + base = s.Auth.LeaseDuration + } + + // Handle if this is a certificate with no lease + if certInterface, ok := s.Data["certificate"]; ok && s.LeaseID == "" { + if certData, ok := certInterface.(string); ok { + newDuration := durationFromCert(certData) + if newDuration > 0 { + log.Printf("[DEBUG] Found certificate and set lease duration to %d seconds", newDuration) + base = newDuration + } + } + } + + // Ensure we have a lease duration, since sometimes this can be zero. + if base <= 0 { + base = int(VaultDefaultLeaseDuration.Seconds()) + } + + // Convert to float seconds. + sleep := float64(time.Duration(base) * time.Second) + + if vaultSecretRenewable(s) { + // Renew at 1/3 the remaining lease. This will give us an opportunity to retry + // at least one more time should the first renewal fail. + sleep = sleep / 3.0 + + // Use some randomness so many clients do not hit Vault simultaneously. + sleep = sleep * (rand.Float64() + 1) / 2.0 + } else { + // For non-renewable leases set the renew duration to use much of the secret + // lease as possible. Use a stagger over 85%-95% of the lease duration so that + // many clients do not hit Vault simultaneously. + sleep = sleep * (.85 + rand.Float64()*0.1) + } + + return time.Duration(sleep) +} + +// printVaultWarnings prints warnings for a given dependency. +func printVaultWarnings(d Dependency, warnings []string) { + for _, w := range warnings { + log.Printf("[WARN] %s: %s", d, w) + } +} + +// vaultSecretRenewable determines if the given secret is renewable. +func vaultSecretRenewable(s *Secret) bool { + if s.Auth != nil { + return s.Auth.Renewable + } + return s.Renewable +} + +// transformSecret transforms an api secret into our secret. This does not deep +// copy underlying deep data structures, so it's not safe to modify the vault +// secret as that may modify the data in the transformed secret. +func transformSecret(theirs *api.Secret) *Secret { + var ours Secret + updateSecret(&ours, theirs) + return &ours +} + +// updateSecret updates our secret with the new data from the api, careful to +// not overwrite missing data. Renewals don't include the original secret, and +// we don't want to delete that data accidentally. +func updateSecret(ours *Secret, theirs *api.Secret) { + if theirs.RequestID != "" { + ours.RequestID = theirs.RequestID + } + + if theirs.LeaseID != "" { + ours.LeaseID = theirs.LeaseID + } + + if theirs.LeaseDuration != 0 { + ours.LeaseDuration = theirs.LeaseDuration + } + + if theirs.Renewable { + ours.Renewable = theirs.Renewable + } + + if len(theirs.Data) != 0 { + ours.Data = theirs.Data + } + + if len(theirs.Warnings) != 0 { + ours.Warnings = theirs.Warnings + } + + if theirs.Auth != nil { + if ours.Auth == nil { + ours.Auth = &SecretAuth{} + } + + if theirs.Auth.ClientToken != "" { + ours.Auth.ClientToken = theirs.Auth.ClientToken + } + + if theirs.Auth.Accessor != "" { + ours.Auth.Accessor = theirs.Auth.Accessor + } + + if len(theirs.Auth.Policies) != 0 { + ours.Auth.Policies = theirs.Auth.Policies + } + + if len(theirs.Auth.Metadata) != 0 { + ours.Auth.Metadata = theirs.Auth.Metadata + } + + if theirs.Auth.LeaseDuration != 0 { + ours.Auth.LeaseDuration = theirs.Auth.LeaseDuration + } + + if theirs.Auth.Renewable { + ours.Auth.Renewable = theirs.Auth.Renewable + } + } + + if theirs.WrapInfo != nil { + if ours.WrapInfo == nil { + ours.WrapInfo = &SecretWrapInfo{} + } + + if theirs.WrapInfo.Token != "" { + ours.WrapInfo.Token = theirs.WrapInfo.Token + } + + if theirs.WrapInfo.TTL != 0 { + ours.WrapInfo.TTL = theirs.WrapInfo.TTL + } + + if !theirs.WrapInfo.CreationTime.IsZero() { + ours.WrapInfo.CreationTime = theirs.WrapInfo.CreationTime + } + + if theirs.WrapInfo.WrappedAccessor != "" { + ours.WrapInfo.WrappedAccessor = theirs.WrapInfo.WrappedAccessor + } + } +} + +func isKVv2(client *api.Client, path string) (string, bool, error) { + // We don't want to use a wrapping call here so save any custom value and + // restore after + currentWrappingLookupFunc := client.CurrentWrappingLookupFunc() + client.SetWrappingLookupFunc(nil) + defer client.SetWrappingLookupFunc(currentWrappingLookupFunc) + currentOutputCurlString := client.OutputCurlString() + client.SetOutputCurlString(false) + defer client.SetOutputCurlString(currentOutputCurlString) + + r := client.NewRequest("GET", "/v1/sys/internal/ui/mounts/"+path) + resp, err := client.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + // If we get a 404 we are using an older version of vault, default to + // version 1 + if resp != nil && resp.StatusCode == 404 { + return "", false, nil + } + + // anonymous requests may fail to access /sys/internal/ui path + // Vault v1.1.3 returns 500 status code but may return 4XX in future + if client.Token() == "" { + return "", false, nil + } + + return "", false, err + } + + secret, err := api.ParseSecret(resp.Body) + if err != nil { + return "", false, err + } + var mountPath string + if mountPathRaw, ok := secret.Data["path"]; ok { + mountPath = mountPathRaw.(string) + } + var mountType string + if mountTypeRaw, ok := secret.Data["type"]; ok { + mountType = mountTypeRaw.(string) + } + options := secret.Data["options"] + if options == nil { + return mountPath, false, nil + } + versionRaw := options.(map[string]interface{})["version"] + if versionRaw == nil { + return mountPath, false, nil + } + version := versionRaw.(string) + switch version { + case "", "1": + return mountPath, false, nil + case "2": + return mountPath, mountType == "kv", nil + } + + return mountPath, false, nil +} + +func addPrefixToVKVPath(p, mountPath, apiPrefix string) string { + switch { + case p == mountPath, p == strings.TrimSuffix(mountPath, "/"): + return path.Join(mountPath, apiPrefix) + default: + p = strings.TrimPrefix(p, mountPath) + // Don't add /data to the path if it's been added manually. + if strings.HasPrefix(p, apiPrefix) { + return path.Join(mountPath, p) + } + return path.Join(mountPath, apiPrefix, p) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go new file mode 100644 index 000000000000..3e80fd293fe6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go @@ -0,0 +1,126 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "sort" + "strings" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultListQuery)(nil) +) + +// VaultListQuery is the dependency to Vault for a secret +type VaultListQuery struct { + stopCh chan struct{} + + path string +} + +// NewVaultListQuery creates a new datacenter dependency. +func NewVaultListQuery(s string) (*VaultListQuery, error) { + s = strings.TrimSpace(s) + s = strings.Trim(s, "/") + if s == "" { + return nil, fmt.Errorf("vault.list: invalid format: %q", s) + } + + return &VaultListQuery{ + stopCh: make(chan struct{}, 1), + path: s, + }, nil +} + +// Fetch queries the Vault API +func (d *VaultListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{}) + + // If this is not the first query, poll to simulate blocking-queries. + if opts.WaitIndex != 0 { + dur := VaultDefaultLeaseDuration + log.Printf("[TRACE] %s: long polling for %s", d, dur) + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(dur): + } + } + + // If we got this far, we either didn't have a secret to renew, the secret was + // not renewable, or the renewal failed, so attempt a fresh list. + log.Printf("[TRACE] %s: LIST %s", d, &url.URL{ + Path: "/v1/" + d.path, + RawQuery: opts.String(), + }) + secret, err := clients.Vault().Logical().List(d.path) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + var result []string + + // The secret could be nil if it does not exist. + if secret == nil || secret.Data == nil { + log.Printf("[TRACE] %s: no data", d) + return respWithMetadata(result) + } + + // This is a weird thing that happened once... + keys, ok := secret.Data["keys"] + if !ok { + log.Printf("[TRACE] %s: no keys", d) + return respWithMetadata(result) + } + + list, ok := keys.([]interface{}) + if !ok { + log.Printf("[TRACE] %s: not list", d) + return nil, nil, fmt.Errorf("%s: unexpected response", d) + } + + for _, v := range list { + typed, ok := v.(string) + if !ok { + return nil, nil, fmt.Errorf("%s: non-string in list", d) + } + result = append(result, typed) + } + sort.Strings(result) + + log.Printf("[TRACE] %s: returned %d results", d, len(result)) + + return respWithMetadata(result) +} + +// CanShare returns if this dependency is shareable. +func (d *VaultListQuery) CanShare() bool { + return false +} + +// Stop halts the given dependency's fetch. +func (d *VaultListQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultListQuery) String() string { + return fmt.Sprintf("vault.list(%s)", d.path) +} + +// Type returns the type of this dependency. +func (d *VaultListQuery) Type() Type { + return TypeVault +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go new file mode 100644 index 000000000000..00ebf27ec0db --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go @@ -0,0 +1,175 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultReadQuery)(nil) +) + +// VaultReadQuery is the dependency to Vault for a secret +type VaultReadQuery struct { + stopCh chan struct{} + sleepCh chan time.Duration + + rawPath string + queryValues url.Values + secret *Secret + isKVv2 *bool + secretPath string + + // vaultSecret is the actual Vault secret which we are renewing + vaultSecret *api.Secret +} + +// NewVaultReadQuery creates a new datacenter dependency. +func NewVaultReadQuery(s string) (*VaultReadQuery, error) { + s = strings.TrimSpace(s) + s = strings.Trim(s, "/") + if s == "" { + return nil, fmt.Errorf("vault.read: invalid format: %q", s) + } + + secretURL, err := url.Parse(s) + if err != nil { + return nil, err + } + + return &VaultReadQuery{ + stopCh: make(chan struct{}, 1), + sleepCh: make(chan time.Duration, 1), + rawPath: secretURL.Path, + queryValues: secretURL.Query(), + }, nil +} + +// Fetch queries the Vault API +func (d *VaultReadQuery) Fetch(clients *ClientSet, opts *QueryOptions, +) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + select { + case dur := <-d.sleepCh: + time.Sleep(dur) + default: + } + + firstRun := d.secret == nil + + if !firstRun && vaultSecretRenewable(d.secret) { + err := renewSecret(clients, d) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + } + + err := d.fetchSecret(clients, opts) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + if !vaultSecretRenewable(d.secret) { + dur := leaseCheckWait(d.secret) + log.Printf("[TRACE] %s: non-renewable secret, set sleep for %s", d, dur) + d.sleepCh <- dur + } + + return respWithMetadata(d.secret) +} + +func (d *VaultReadQuery) fetchSecret(clients *ClientSet, opts *QueryOptions, +) error { + opts = opts.Merge(&QueryOptions{}) + vaultSecret, err := d.readSecret(clients, opts) + if err == nil { + printVaultWarnings(d, vaultSecret.Warnings) + d.vaultSecret = vaultSecret + // the cloned secret which will be exposed to the template + d.secret = transformSecret(vaultSecret) + } + return err +} + +func (d *VaultReadQuery) stopChan() chan struct{} { + return d.stopCh +} + +func (d *VaultReadQuery) secrets() (*Secret, *api.Secret) { + return d.secret, d.vaultSecret +} + +// CanShare returns if this dependency is shareable. +func (d *VaultReadQuery) CanShare() bool { + return false +} + +// Stop halts the given dependency's fetch. +func (d *VaultReadQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultReadQuery) String() string { + return fmt.Sprintf("vault.read(%s)", d.rawPath) +} + +// Type returns the type of this dependency. +func (d *VaultReadQuery) Type() Type { + return TypeVault +} + +func (d *VaultReadQuery) readSecret(clients *ClientSet, opts *QueryOptions) (*api.Secret, error) { + vaultClient := clients.Vault() + + // Check whether this secret refers to a KV v2 entry if we haven't yet. + if d.isKVv2 == nil { + mountPath, isKVv2, err := isKVv2(vaultClient, d.rawPath) + if err != nil { + log.Printf("[WARN] %s: failed to check if %s is KVv2, "+ + "assume not: %s", d, d.rawPath, err) + isKVv2 = false + d.secretPath = d.rawPath + } else if isKVv2 { + d.secretPath = addPrefixToVKVPath(d.rawPath, mountPath, "data") + } else { + d.secretPath = d.rawPath + } + d.isKVv2 = &isKVv2 + } + + queryString := d.queryValues.Encode() + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/" + d.secretPath, + RawQuery: queryString, + }) + vaultSecret, err := vaultClient.Logical().ReadWithData(d.secretPath, + d.queryValues) + + if err != nil { + return nil, errors.Wrap(err, d.String()) + } + if vaultSecret == nil || deletedKVv2(vaultSecret) { + return nil, fmt.Errorf("no secret exists at %s", d.secretPath) + } + return vaultSecret, nil +} + +func deletedKVv2(s *api.Secret) bool { + switch md := s.Data["metadata"].(type) { + case map[string]interface{}: + return md["deletion_time"] != "" + } + return false +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go new file mode 100644 index 000000000000..61fa29cfa0fc --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go @@ -0,0 +1,95 @@ +package dependency + +import ( + "log" + "time" + + "github.com/hashicorp/vault/api" +) + +var ( + // Ensure implements + _ Dependency = (*VaultTokenQuery)(nil) +) + +// VaultTokenQuery is the dependency to Vault for a secret +type VaultTokenQuery struct { + stopCh chan struct{} + secret *Secret + vaultSecret *api.Secret +} + +// NewVaultTokenQuery creates a new dependency. +func NewVaultTokenQuery(token string) (*VaultTokenQuery, error) { + vaultSecret := &api.Secret{ + Auth: &api.SecretAuth{ + ClientToken: token, + Renewable: true, + LeaseDuration: 1, + }, + } + return &VaultTokenQuery{ + stopCh: make(chan struct{}, 1), + vaultSecret: vaultSecret, + secret: transformSecret(vaultSecret), + }, nil +} + +// Fetch queries the Vault API +func (d *VaultTokenQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + if vaultSecretRenewable(d.secret) { + renewSecret(clients, d) + } + + // The secret isn't renewable, probably the generic secret backend. + // TODO This is incorrect when given a non-renewable template. We should + // instead to a lookup self to determine the lease duration. + opts = opts.Merge(&QueryOptions{}) + dur := leaseCheckWait(d.secret) + if dur < opts.VaultGrace { + dur = opts.VaultGrace + } + + log.Printf("[TRACE] %s: token is not renewable, sleeping for %s", d, dur) + select { + case <-time.After(dur): + case <-d.stopCh: + return nil, nil, ErrStopped + } + + return nil, nil, ErrLeaseExpired +} + +func (d *VaultTokenQuery) stopChan() chan struct{} { + return d.stopCh +} + +func (d *VaultTokenQuery) secrets() (*Secret, *api.Secret) { + return d.secret, d.vaultSecret +} + +// CanShare returns if this dependency is shareable. +func (d *VaultTokenQuery) CanShare() bool { + return false +} + +// Stop halts the dependency's fetch function. +func (d *VaultTokenQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultTokenQuery) String() string { + return "vault.token" +} + +// Type returns the type of this dependency. +func (d *VaultTokenQuery) Type() Type { + return TypeVault +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go new file mode 100644 index 000000000000..c2841712f77d --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go @@ -0,0 +1,177 @@ +package dependency + +import ( + "crypto/sha1" + "fmt" + "io" + "log" + "net/url" + "sort" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultWriteQuery)(nil) +) + +// VaultWriteQuery is the dependency to Vault for a secret +type VaultWriteQuery struct { + stopCh chan struct{} + sleepCh chan time.Duration + + path string + data map[string]interface{} + dataHash string + secret *Secret + + // vaultSecret is the actual Vault secret which we are renewing + vaultSecret *api.Secret +} + +// NewVaultWriteQuery creates a new datacenter dependency. +func NewVaultWriteQuery(s string, d map[string]interface{}) (*VaultWriteQuery, error) { + s = strings.TrimSpace(s) + s = strings.Trim(s, "/") + if s == "" { + return nil, fmt.Errorf("vault.write: invalid format: %q", s) + } + + return &VaultWriteQuery{ + stopCh: make(chan struct{}, 1), + sleepCh: make(chan time.Duration, 1), + path: s, + data: d, + dataHash: sha1Map(d), + }, nil +} + +// Fetch queries the Vault API +func (d *VaultWriteQuery) Fetch(clients *ClientSet, opts *QueryOptions, +) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + select { + case dur := <-d.sleepCh: + time.Sleep(dur) + default: + } + + firstRun := d.secret == nil + + if !firstRun && vaultSecretRenewable(d.secret) { + err := renewSecret(clients, d) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + } + + opts = opts.Merge(&QueryOptions{}) + vaultSecret, err := d.writeSecret(clients, opts) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + // vaultSecret == nil when writing to KVv1 engines + if vaultSecret == nil { + return respWithMetadata(d.secret) + } + + printVaultWarnings(d, vaultSecret.Warnings) + d.vaultSecret = vaultSecret + // cloned secret which will be exposed to the template + d.secret = transformSecret(vaultSecret) + + if !vaultSecretRenewable(d.secret) { + dur := leaseCheckWait(d.secret) + log.Printf("[TRACE] %s: non-renewable secret, set sleep for %s", d, dur) + d.sleepCh <- dur + } + + return respWithMetadata(d.secret) +} + +// meet renewer interface +func (d *VaultWriteQuery) stopChan() chan struct{} { + return d.stopCh +} + +func (d *VaultWriteQuery) secrets() (*Secret, *api.Secret) { + return d.secret, d.vaultSecret +} + +// CanShare returns if this dependency is shareable. +func (d *VaultWriteQuery) CanShare() bool { + return false +} + +// Stop halts the given dependency's fetch. +func (d *VaultWriteQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultWriteQuery) String() string { + return fmt.Sprintf("vault.write(%s -> %s)", d.path, d.dataHash) +} + +// Type returns the type of this dependency. +func (d *VaultWriteQuery) Type() Type { + return TypeVault +} + +// sha1Map returns the sha1 hash of the data in the map. The reason this data is +// hashed is because it appears in the output and could contain sensitive +// information. +func sha1Map(m map[string]interface{}) string { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + h := sha1.New() + for _, k := range keys { + io.WriteString(h, fmt.Sprintf("%s=%q", k, m[k])) + } + + return fmt.Sprintf("%.4x", h.Sum(nil)) +} + +func (d *VaultWriteQuery) printWarnings(warnings []string) { + for _, w := range warnings { + log.Printf("[WARN] %s: %s", d, w) + } +} + +func (d *VaultWriteQuery) writeSecret(clients *ClientSet, opts *QueryOptions) (*api.Secret, error) { + log.Printf("[TRACE] %s: PUT %s", d, &url.URL{ + Path: "/v1/" + d.path, + RawQuery: opts.String(), + }) + + data := d.data + + _, isv2, _ := isKVv2(clients.Vault(), d.path) + if isv2 { + data = map[string]interface{}{"data": d.data} + } + + vaultSecret, err := clients.Vault().Logical().Write(d.path, data) + if err != nil { + return nil, errors.Wrap(err, d.String()) + } + // vaultSecret is always nil when KVv1 engine (isv2==false) + if isv2 && vaultSecret == nil { + return nil, fmt.Errorf("no secret exists at %s", d.path) + } + + return vaultSecret, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/dedup.go b/vendor/github.com/hashicorp/consul-template/manager/dedup.go new file mode 100644 index 000000000000..3f5f9a9503d0 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/dedup.go @@ -0,0 +1,512 @@ +package manager + +import ( + "bytes" + "compress/lzw" + "encoding/gob" + "fmt" + "log" + "path" + "sync" + "time" + + "github.com/mitchellh/hashstructure" + + "github.com/hashicorp/consul-template/config" + dep "github.com/hashicorp/consul-template/dependency" + "github.com/hashicorp/consul-template/template" + "github.com/hashicorp/consul-template/version" + consulapi "github.com/hashicorp/consul/api" +) + +var ( + // sessionCreateRetry is the amount of time we wait + // to recreate a session when lost. + sessionCreateRetry = 15 * time.Second + + // lockRetry is the interval on which we try to re-acquire locks + lockRetry = 10 * time.Second + + // listRetry is the interval on which we retry listing a data path + listRetry = 10 * time.Second + + // timeout passed through to consul api client Lock + // here to override in testing (see ./dedup_test.go) + lockWaitTime = 15 * time.Second +) + +const ( + templateNoDataStr = "__NO_DATA__" +) + +// templateData is GOB encoded share the dependency values +type templateData struct { + // Version is the version of Consul Template which created this template data. + // This is important because users may be running multiple versions of CT + // with the same templates. This provides a nicer upgrade path. + Version string + + // Data is the actual template data. + Data map[string]interface{} +} + +func templateNoData() []byte { + return []byte(templateNoDataStr) +} + +// DedupManager is used to de-duplicate which instance of Consul-Template +// is handling each template. For each template, a lock path is determined +// using the MD5 of the template. This path is used to elect a "leader" +// instance. +// +// The leader instance operations like usual, but any time a template is +// rendered, any of the data required for rendering is stored in the +// Consul KV store under the lock path. +// +// The follower instances depend on the leader to do the primary watching +// and rendering, and instead only watch the aggregated data in the KV. +// Followers wait for updates and re-render the template. +// +// If a template depends on 50 views, and is running on 50 machines, that +// would normally require 2500 blocking queries. Using deduplication, one +// instance has 50 view queries, plus 50 additional queries on the lock +// path for a total of 100. +// +type DedupManager struct { + // config is the deduplicate configuration + config *config.DedupConfig + + // clients is used to access the underlying clients + clients *dep.ClientSet + + // Brain is where we inject updates + brain *template.Brain + + // templates is the set of templates we are trying to dedup + templates []*template.Template + + // leader tracks if we are currently the leader + leader map[*template.Template]<-chan struct{} + leaderLock sync.RWMutex + + // lastWrite tracks the hash of the data paths + lastWrite map[*template.Template]uint64 + lastWriteLock sync.RWMutex + + // updateCh is used to indicate an update watched data + updateCh chan struct{} + + // wg is used to wait for a clean shutdown + wg sync.WaitGroup + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewDedupManager creates a new Dedup manager +func NewDedupManager(config *config.DedupConfig, clients *dep.ClientSet, brain *template.Brain, templates []*template.Template) (*DedupManager, error) { + d := &DedupManager{ + config: config, + clients: clients, + brain: brain, + templates: templates, + leader: make(map[*template.Template]<-chan struct{}), + lastWrite: make(map[*template.Template]uint64), + updateCh: make(chan struct{}, 1), + stopCh: make(chan struct{}), + } + return d, nil +} + +// Start is used to start the de-duplication manager +func (d *DedupManager) Start() error { + log.Printf("[INFO] (dedup) starting de-duplication manager") + + client := d.clients.Consul() + go d.createSession(client) + + // Start to watch each template + for _, t := range d.templates { + go d.watchTemplate(client, t) + } + return nil +} + +// Stop is used to stop the de-duplication manager +func (d *DedupManager) Stop() error { + d.stopLock.Lock() + defer d.stopLock.Unlock() + if d.stop { + return nil + } + + log.Printf("[INFO] (dedup) stopping de-duplication manager") + d.stop = true + close(d.stopCh) + d.wg.Wait() + return nil +} + +// createSession is used to create and maintain a session to Consul +func (d *DedupManager) createSession(client *consulapi.Client) { +START: + log.Printf("[INFO] (dedup) attempting to create session") + session := client.Session() + sessionCh := make(chan struct{}) + ttl := fmt.Sprintf("%.6fs", float64(*d.config.TTL)/float64(time.Second)) + se := &consulapi.SessionEntry{ + Name: "Consul-Template de-duplication", + Behavior: "delete", + TTL: ttl, + LockDelay: 1 * time.Millisecond, + } + id, _, err := session.Create(se, nil) + if err != nil { + log.Printf("[ERR] (dedup) failed to create session: %v", err) + goto WAIT + } + log.Printf("[INFO] (dedup) created session %s", id) + + // Attempt to lock each template + for _, t := range d.templates { + d.wg.Add(1) + go d.attemptLock(client, id, sessionCh, t) + } + + // Renew our session periodically + if err := session.RenewPeriodic("15s", id, nil, d.stopCh); err != nil { + log.Printf("[ERR] (dedup) failed to renew session: %v", err) + } + close(sessionCh) + d.wg.Wait() + +WAIT: + select { + case <-time.After(sessionCreateRetry): + goto START + case <-d.stopCh: + return + } +} + +// IsLeader checks if we are currently the leader instance +func (d *DedupManager) IsLeader(tmpl *template.Template) bool { + d.leaderLock.RLock() + defer d.leaderLock.RUnlock() + + lockCh, ok := d.leader[tmpl] + if !ok { + return false + } + select { + case <-lockCh: + return false + default: + return true + } +} + +// UpdateDeps is used to update the values of the dependencies for a template +func (d *DedupManager) UpdateDeps(t *template.Template, deps []dep.Dependency) error { + // Calculate the path to write updates to + dataPath := path.Join(*d.config.Prefix, t.ID(), "data") + + // Package up the dependency data + td := templateData{ + Version: version.Version, + Data: make(map[string]interface{}), + } + for _, dp := range deps { + // Skip any dependencies that can't be shared + if !dp.CanShare() { + continue + } + + // Pull the current value from the brain + val, ok := d.brain.Recall(dp) + if ok { + td.Data[dp.String()] = val + } + } + + // Compute stable hash of the data. Note we don't compute this over the actual + // encoded value since gob encoding does not guarantee stable ordering for + // maps so spuriously returns a different hash most times. See + // https://github.com/hashicorp/consul-template/issues/1099. + hash, err := hashstructure.Hash(td, nil) + if err != nil { + return fmt.Errorf("calculating hash failed: %v", err) + } + d.lastWriteLock.RLock() + existing, ok := d.lastWrite[t] + d.lastWriteLock.RUnlock() + if ok && existing == hash { + log.Printf("[INFO] (dedup) de-duplicate data '%s' already current", + dataPath) + return nil + } + + // Encode via GOB and LZW compress + var buf bytes.Buffer + compress := lzw.NewWriter(&buf, lzw.LSB, 8) + enc := gob.NewEncoder(compress) + if err := enc.Encode(&td); err != nil { + return fmt.Errorf("encode failed: %v", err) + } + compress.Close() + + // Write the KV update + kvPair := consulapi.KVPair{ + Key: dataPath, + Value: buf.Bytes(), + Flags: consulapi.LockFlagValue, + } + client := d.clients.Consul() + if _, err := client.KV().Put(&kvPair, nil); err != nil { + return fmt.Errorf("failed to write '%s': %v", dataPath, err) + } + log.Printf("[INFO] (dedup) updated de-duplicate data '%s'", dataPath) + d.lastWriteLock.Lock() + d.lastWrite[t] = hash + d.lastWriteLock.Unlock() + return nil +} + +// UpdateCh returns a channel to watch for dependency updates +func (d *DedupManager) UpdateCh() <-chan struct{} { + return d.updateCh +} + +// setLeader sets if we are currently the leader instance +func (d *DedupManager) setLeader(tmpl *template.Template, lockCh <-chan struct{}) { + // Update the lock state + d.leaderLock.Lock() + if lockCh != nil { + d.leader[tmpl] = lockCh + } else { + delete(d.leader, tmpl) + } + d.leaderLock.Unlock() + + // Clear the lastWrite hash if we've lost leadership + if lockCh == nil { + d.lastWriteLock.Lock() + delete(d.lastWrite, tmpl) + d.lastWriteLock.Unlock() + } + + // Do an async notify of an update + select { + case d.updateCh <- struct{}{}: + default: + } +} + +func (d *DedupManager) watchTemplate(client *consulapi.Client, t *template.Template) { + log.Printf("[INFO] (dedup) starting watch for template hash %s", t.ID()) + path := path.Join(*d.config.Prefix, t.ID(), "data") + + // Determine if stale queries are allowed + var allowStale bool + if *d.config.MaxStale != 0 { + allowStale = true + } + + // Setup our query options + opts := &consulapi.QueryOptions{ + AllowStale: allowStale, + WaitTime: 60 * time.Second, + } + + var lastData []byte + var lastIndex uint64 + +START: + // Stop listening if we're stopped + select { + case <-d.stopCh: + return + default: + } + + // If we are current the leader, wait for leadership lost + d.leaderLock.RLock() + lockCh, ok := d.leader[t] + d.leaderLock.RUnlock() + if ok { + select { + case <-lockCh: + goto START + case <-d.stopCh: + return + } + } + + // Block for updates on the data key + log.Printf("[INFO] (dedup) listing data for template hash %s", t.ID()) + pair, meta, err := client.KV().Get(path, opts) + if err != nil { + log.Printf("[ERR] (dedup) failed to get '%s': %v", path, err) + select { + case <-time.After(listRetry): + goto START + case <-d.stopCh: + return + } + } + opts.WaitIndex = meta.LastIndex + + // Stop listening if we're stopped + select { + case <-d.stopCh: + return + default: + } + + // If we've exceeded the maximum staleness, retry without stale + if allowStale && meta.LastContact > *d.config.MaxStale { + allowStale = false + log.Printf("[DEBUG] (dedup) %s stale data (last contact exceeded max_stale)", path) + goto START + } + + // Re-enable stale queries if allowed + if *d.config.MaxStale > 0 { + allowStale = true + } + + if meta.LastIndex == lastIndex { + log.Printf("[TRACE] (dedup) %s no new data (index was the same)", path) + goto START + } + + if meta.LastIndex < lastIndex { + log.Printf("[TRACE] (dedup) %s had a lower index, resetting", path) + lastIndex = 0 + goto START + } + lastIndex = meta.LastIndex + + var data []byte + if pair != nil { + data = pair.Value + } + if bytes.Equal(lastData, data) { + log.Printf("[TRACE] (dedup) %s no new data (contents were the same)", path) + goto START + } + lastData = data + + // If we are current the leader, wait for leadership lost + d.leaderLock.RLock() + lockCh, ok = d.leader[t] + d.leaderLock.RUnlock() + if ok { + select { + case <-lockCh: + goto START + case <-d.stopCh: + return + } + } + + // Parse the data file + if pair != nil && pair.Flags == consulapi.LockFlagValue && !bytes.Equal(pair.Value, templateNoData()) { + d.parseData(pair.Key, pair.Value) + } + goto START +} + +// parseData is used to update brain from a KV data pair +func (d *DedupManager) parseData(path string, raw []byte) { + // Setup the decompression and decoders + r := bytes.NewReader(raw) + decompress := lzw.NewReader(r, lzw.LSB, 8) + defer decompress.Close() + dec := gob.NewDecoder(decompress) + + // Decode the data + var td templateData + if err := dec.Decode(&td); err != nil { + log.Printf("[ERR] (dedup) failed to decode '%s': %v", + path, err) + return + } + if td.Version != version.Version { + log.Printf("[WARN] (dedup) created with different version (%s vs %s)", + td.Version, version.Version) + return + } + log.Printf("[INFO] (dedup) loading %d dependencies from '%s'", + len(td.Data), path) + + // Update the data in the brain + for hashCode, value := range td.Data { + d.brain.ForceSet(hashCode, value) + } + + // Trigger the updateCh + select { + case d.updateCh <- struct{}{}: + default: + } +} + +func (d *DedupManager) attemptLock(client *consulapi.Client, session string, sessionCh chan struct{}, t *template.Template) { + defer d.wg.Done() + for { + log.Printf("[INFO] (dedup) attempting lock for template hash %s", t.ID()) + basePath := path.Join(*d.config.Prefix, t.ID()) + lopts := &consulapi.LockOptions{ + Key: path.Join(basePath, "data"), + Value: templateNoData(), + Session: session, + MonitorRetries: 3, + MonitorRetryTime: 3 * time.Second, + LockWaitTime: lockWaitTime, + } + lock, err := client.LockOpts(lopts) + if err != nil { + log.Printf("[ERR] (dedup) failed to create lock '%s': %v", + lopts.Key, err) + return + } + + var retryCh <-chan time.Time + leaderCh, err := lock.Lock(sessionCh) + if err != nil { + log.Printf("[ERR] (dedup) failed to acquire lock '%s': %v", + lopts.Key, err) + retryCh = time.After(lockRetry) + } else { + log.Printf("[INFO] (dedup) acquired lock '%s'", lopts.Key) + d.setLeader(t, leaderCh) + } + + select { + case <-retryCh: + retryCh = nil + continue + case <-leaderCh: + log.Printf("[WARN] (dedup) lost lock ownership '%s'", lopts.Key) + d.setLeader(t, nil) + continue + case <-sessionCh: + log.Printf("[INFO] (dedup) releasing session '%s'", lopts.Key) + d.setLeader(t, nil) + _, err = client.Session().Destroy(session, nil) + if err != nil { + log.Printf("[ERROR] (dedup) failed destroying session '%s', %s", session, err) + } + return + case <-d.stopCh: + log.Printf("[INFO] (dedup) releasing lock '%s'", lopts.Key) + _, err = client.Session().Destroy(session, nil) + if err != nil { + log.Printf("[ERROR] (dedup) failed destroying session '%s', %s", session, err) + } + return + } + } +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/errors.go b/vendor/github.com/hashicorp/consul-template/manager/errors.go new file mode 100644 index 000000000000..dbb84c36e438 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/errors.go @@ -0,0 +1,31 @@ +package manager + +import "fmt" + +// ErrExitable is an interface that defines an integer ExitStatus() function. +type ErrExitable interface { + ExitStatus() int +} + +var _ error = new(ErrChildDied) +var _ ErrExitable = new(ErrChildDied) + +// ErrChildDied is the error returned when the child process prematurely dies. +type ErrChildDied struct { + code int +} + +// NewErrChildDied creates a new error with the given exit code. +func NewErrChildDied(c int) *ErrChildDied { + return &ErrChildDied{code: c} +} + +// Error implements the error interface. +func (e *ErrChildDied) Error() string { + return fmt.Sprintf("child process died with exit code %d", e.code) +} + +// ExitStatus implements the ErrExitable interface. +func (e *ErrChildDied) ExitStatus() int { + return e.code +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/runner.go b/vendor/github.com/hashicorp/consul-template/manager/runner.go new file mode 100644 index 000000000000..877f4bf945f8 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/runner.go @@ -0,0 +1,1305 @@ +package manager + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + "strconv" + "sync" + "time" + + "github.com/hashicorp/consul-template/child" + "github.com/hashicorp/consul-template/config" + dep "github.com/hashicorp/consul-template/dependency" + "github.com/hashicorp/consul-template/renderer" + "github.com/hashicorp/consul-template/template" + "github.com/hashicorp/consul-template/watch" + multierror "github.com/hashicorp/go-multierror" + shellwords "github.com/mattn/go-shellwords" + "github.com/pkg/errors" +) + +const ( + // saneViewLimit is the number of views that we consider "sane" before we + // warn the user that they might be DDoSing their Consul cluster. + saneViewLimit = 128 +) + +// Runner responsible rendering Templates and invoking Commands. +type Runner struct { + // ErrCh and DoneCh are channels where errors and finish notifications occur. + ErrCh chan error + DoneCh chan struct{} + + // config is the Config that created this Runner. It is used internally to + // construct other objects and pass data. + config *config.Config + + // signals sending output to STDOUT instead of to a file. + dry bool + + // outStream and errStream are the io.Writer streams where the runner will + // write information. These can be modified by calling SetOutStream and + // SetErrStream accordingly. + + // inStream is the ioReader where the runner will read information. + outStream, errStream io.Writer + inStream io.Reader + + // ctemplatesMap is a map of each template ID to the TemplateConfigs + // that made it. + ctemplatesMap map[string]config.TemplateConfigs + + // templates is the list of calculated templates. + templates []*template.Template + + // renderEvents is a mapping of a template ID to the render event. + renderEvents map[string]*RenderEvent + + // renderEventLock protects access into the renderEvents map + renderEventsLock sync.RWMutex + + // renderedCh is used to signal that a template has been rendered + renderedCh chan struct{} + + // renderEventCh is used to signal that there is a new render event. A + // render event doesn't necessarily mean that a template has been rendered, + // only that templates attempted to render and may have updated their + // dependency sets. + renderEventCh chan struct{} + + // dependencies is the list of dependencies this runner is watching. + dependencies map[string]dep.Dependency + + // dependenciesLock is a lock around touching the dependencies map. + dependenciesLock sync.Mutex + + // watcher is the watcher this runner is using. + watcher *watch.Watcher + + // brain is the internal storage database of returned dependency data. + brain *template.Brain + + // child is the child process under management. This may be nil if not running + // in exec mode. + child *child.Child + + // childLock is the internal lock around the child process. + childLock sync.RWMutex + + // quiescenceMap is the map of templates to their quiescence timers. + // quiescenceCh is the channel where templates report returns from quiescence + // fires. + quiescenceMap map[string]*quiescence + quiescenceCh chan *template.Template + + // dedup is the deduplication manager if enabled + dedup *DedupManager + + // Env represents a custom set of environment variables to populate the + // template and command runtime with. These environment variables will be + // available in both the command's environment as well as the template's + // environment. + Env map[string]string + + // stopLock is the lock around checking if the runner can be stopped + stopLock sync.Mutex + + // stopped is a boolean of whether the runner is stopped + stopped bool +} + +// RenderEvent captures the time and events that occurred for a template +// rendering. +type RenderEvent struct { + // Missing is the list of dependencies that we do not yet have data for, but + // are contained in the watcher. This is different from unwatched dependencies, + // which includes dependencies the watcher has not yet started querying for + // data. + MissingDeps *dep.Set + + // Template is the template attempting to be rendered. + Template *template.Template + + // Contents is the raw, rendered contents from the template. + Contents []byte + + // TemplateConfigs is the list of template configs that correspond to this + // template. + TemplateConfigs []*config.TemplateConfig + + // Unwatched is the list of dependencies that are not present in the watcher. + // This value may change over time due to the n-pass evaluation. + UnwatchedDeps *dep.Set + + // UpdatedAt is the last time this render event was updated. + UpdatedAt time.Time + + // Used is the full list of dependencies seen in the template. Because of + // the n-pass evaluation, this number can change over time. The dependencies + // in this list may or may not have data. This just contains the list of all + // dependencies parsed out of the template with the current data. + UsedDeps *dep.Set + + // WouldRender determines if the template would have been rendered. A template + // would have been rendered if all the dependencies are satisfied, but may + // not have actually rendered if the file was already present or if an error + // occurred when trying to write the file. + WouldRender bool + + // LastWouldRender marks the last time the template would have rendered. + LastWouldRender time.Time + + // DidRender determines if the Template was actually written to disk. In dry + // mode, this will always be false, since templates are not written to disk + // in dry mode. A template is only rendered to disk if all dependencies are + // satisfied and the template is not already in place with the same contents. + DidRender bool + + // LastDidRender marks the last time the template was written to disk. + LastDidRender time.Time + + // ForQuiescence determines if this event is returned early in the + // render loop due to quiescence. When evaluating if all templates have + // been rendered we need to know if the event is triggered by quiesence + // and if we can skip evaluating it as a render event for those purposes + ForQuiescence bool +} + +// NewRunner accepts a slice of TemplateConfigs and returns a pointer to the new +// Runner and any error that occurred during creation. +func NewRunner(config *config.Config, dry bool) (*Runner, error) { + log.Printf("[INFO] (runner) creating new runner (dry: %v, once: %v)", + dry, config.Once) + + runner := &Runner{ + config: config, + dry: dry, + } + + if err := runner.init(); err != nil { + return nil, err + } + + return runner, nil +} + +// Start begins the polling for this runner. Any errors that occur will cause +// this function to push an item onto the runner's error channel and the halt +// execution. This function is blocking and should be called as a goroutine. +func (r *Runner) Start() { + log.Printf("[INFO] (runner) starting") + + // Create the pid before doing anything. + if err := r.storePid(); err != nil { + r.ErrCh <- err + return + } + + // Start the de-duplication manager + var dedupCh <-chan struct{} + if r.dedup != nil { + if err := r.dedup.Start(); err != nil { + r.ErrCh <- err + return + } + dedupCh = r.dedup.UpdateCh() + } + + // Setup the child process exit channel + var childExitCh <-chan int + + // Fire an initial run to parse all the templates and setup the first-pass + // dependencies. This also forces any templates that have no dependencies to + // be rendered immediately (since they are already renderable). + log.Printf("[DEBUG] (runner) running initial templates") + if err := r.Run(); err != nil { + r.ErrCh <- err + return + } + + for { + // Warn the user if they are watching too many dependencies. + if r.watcher.Size() > saneViewLimit { + log.Printf("[WARN] (runner) watching %d dependencies - watching this "+ + "many dependencies could DDoS your consul cluster", r.watcher.Size()) + } else { + log.Printf("[DEBUG] (runner) watching %d dependencies", r.watcher.Size()) + } + + if r.allTemplatesRendered() { + log.Printf("[DEBUG] (runner) all templates rendered") + // Enable quiescence for all templates if we have specified wait + // intervals. + NEXT_Q: + for _, t := range r.templates { + if _, ok := r.quiescenceMap[t.ID()]; ok { + continue NEXT_Q + } + + for _, c := range r.templateConfigsFor(t) { + if *c.Wait.Enabled { + log.Printf("[DEBUG] (runner) enabling template-specific "+ + "quiescence for %q", t.ID()) + r.quiescenceMap[t.ID()] = newQuiescence( + r.quiescenceCh, *c.Wait.Min, *c.Wait.Max, t) + continue NEXT_Q + } + } + + if *r.config.Wait.Enabled { + log.Printf("[DEBUG] (runner) enabling global quiescence for %q", + t.ID()) + r.quiescenceMap[t.ID()] = newQuiescence( + r.quiescenceCh, *r.config.Wait.Min, *r.config.Wait.Max, t) + continue NEXT_Q + } + } + + // If an exec command was given and a command is not currently running, + // spawn the child process for supervision. + if config.StringPresent(r.config.Exec.Command) { + // Lock the child because we are about to check if it exists. + r.childLock.Lock() + + log.Printf("[TRACE] (runner) acquired child lock for command, spawning") + + if r.child == nil { + env := r.config.Exec.Env.Copy() + env.Custom = append(r.childEnv(), env.Custom...) + child, err := spawnChild(&spawnChildInput{ + Stdin: r.inStream, + Stdout: r.outStream, + Stderr: r.errStream, + Command: config.StringVal(r.config.Exec.Command), + Env: env.Env(), + ReloadSignal: config.SignalVal(r.config.Exec.ReloadSignal), + KillSignal: config.SignalVal(r.config.Exec.KillSignal), + KillTimeout: config.TimeDurationVal(r.config.Exec.KillTimeout), + Splay: config.TimeDurationVal(r.config.Exec.Splay), + }) + if err != nil { + r.ErrCh <- err + r.childLock.Unlock() + return + } + r.child = child + } + + // Unlock the child, we are done now. + r.childLock.Unlock() + + // It's possible that we didn't start a process, in which case no + // channel is returned. If we did get a new exitCh, that means a child + // was spawned, so we need to watch a new exitCh. It is also possible + // that during a run, the child process was restarted, which means a + // new exit channel should be used. + nexitCh := r.child.ExitCh() + if nexitCh != nil { + childExitCh = nexitCh + } + } + + // If we are running in once mode and all our templates are rendered, + // then we should exit here. + if r.config.Once { + log.Printf("[INFO] (runner) once mode and all templates rendered") + + if r.child != nil { + r.stopDedup() + r.stopWatcher() + + log.Printf("[INFO] (runner) waiting for child process to exit") + select { + case c := <-childExitCh: + log.Printf("[INFO] (runner) child process died") + r.ErrCh <- NewErrChildDied(c) + return + case <-r.DoneCh: + } + } + + r.Stop() + return + } + } + + OUTER: + select { + case view := <-r.watcher.DataCh(): + // Receive this update + r.Receive(view.Dependency(), view.Data()) + + // Drain all dependency data. Given a large number of dependencies, it is + // feasible that we have data for more than one of them. Instead of + // wasting CPU cycles rendering templates when we have more dependencies + // waiting to be added to the brain, we drain the entire buffered channel + // on the watcher and then reports when it is done receiving new data + // which the parent select listens for. + // + // Please see https://github.com/hashicorp/consul-template/issues/168 for + // more information about this optimization and the entire backstory. + for { + select { + case view := <-r.watcher.DataCh(): + r.Receive(view.Dependency(), view.Data()) + default: + break OUTER + } + } + + case <-dedupCh: + // We may get triggered by the de-duplication manager for either a change + // in leadership (acquired or lost lock), or an update of data for a template + // that we are watching. + log.Printf("[INFO] (runner) watcher triggered by de-duplication manager") + break OUTER + + case err := <-r.watcher.ErrCh(): + // Push the error back up the stack + log.Printf("[ERR] (runner) watcher reported error: %s", err) + r.ErrCh <- err + return + + case tmpl := <-r.quiescenceCh: + // Remove the quiescence for this template from the map. This will force + // the upcoming Run call to actually evaluate and render the template. + log.Printf("[DEBUG] (runner) received template %q from quiescence", tmpl.ID()) + delete(r.quiescenceMap, tmpl.ID()) + + case c := <-childExitCh: + log.Printf("[INFO] (runner) child process died") + r.ErrCh <- NewErrChildDied(c) + return + + case <-r.DoneCh: + log.Printf("[INFO] (runner) received finish") + return + } + + // If we got this far, that means we got new data or one of the timers + // fired, so attempt to re-render. + if err := r.Run(); err != nil { + r.ErrCh <- err + return + } + } +} + +// Stop halts the execution of this runner and its subprocesses. +func (r *Runner) Stop() { + r.internalStop(false) +} + +// StopImmediately behaves like Stop but won't wait for any splay on any child +// process it may be running. +func (r *Runner) StopImmediately() { + r.internalStop(true) +} + +// TemplateRenderedCh returns a channel that will be triggered when one or more +// templates are rendered. +func (r *Runner) TemplateRenderedCh() <-chan struct{} { + return r.renderedCh +} + +// RenderEventCh returns a channel that will be triggered when there is a new +// render event. +func (r *Runner) RenderEventCh() <-chan struct{} { + return r.renderEventCh +} + +// RenderEvents returns the render events for each template was rendered. The +// map is keyed by template ID. +func (r *Runner) RenderEvents() map[string]*RenderEvent { + r.renderEventsLock.RLock() + defer r.renderEventsLock.RUnlock() + + times := make(map[string]*RenderEvent, len(r.renderEvents)) + for k, v := range r.renderEvents { + times[k] = v + } + return times +} + +func (r *Runner) internalStop(immediately bool) { + r.stopLock.Lock() + defer r.stopLock.Unlock() + + if r.stopped { + return + } + + log.Printf("[INFO] (runner) stopping") + r.stopDedup() + r.stopWatcher() + r.stopChild(immediately) + + if err := r.deletePid(); err != nil { + log.Printf("[WARN] (runner) could not remove pid at %q: %s", + *r.config.PidFile, err) + } + + r.stopped = true + + close(r.DoneCh) +} + +func (r *Runner) stopDedup() { + if r.dedup != nil { + log.Printf("[DEBUG] (runner) stopping de-duplication manager") + r.dedup.Stop() + } +} + +func (r *Runner) stopWatcher() { + if r.watcher != nil { + log.Printf("[DEBUG] (runner) stopping watcher") + r.watcher.Stop() + } +} + +func (r *Runner) stopChild(immediately bool) { + r.childLock.RLock() + defer r.childLock.RUnlock() + + if r.child != nil { + if immediately { + log.Printf("[DEBUG] (runner) stopping child process immediately") + r.child.StopImmediately() + } else { + log.Printf("[DEBUG] (runner) stopping child process") + r.child.Stop() + } + } +} + +// Receive accepts a Dependency and data for that dep. This data is +// cached on the Runner. This data is then used to determine if a Template +// is "renderable" (i.e. all its Dependencies have been downloaded at least +// once). +func (r *Runner) Receive(d dep.Dependency, data interface{}) { + r.dependenciesLock.Lock() + defer r.dependenciesLock.Unlock() + + // Just because we received data, it does not mean that we are actually + // watching for that data. How is that possible you may ask? Well, this + // Runner's data channel is pooled, meaning it accepts multiple data views + // before actually blocking. Whilest this runner is performing a Run() and + // executing diffs, it may be possible that more data was pushed onto the + // data channel pool for a dependency that we no longer care about. + // + // Accepting this dependency would introduce stale data into the brain, and + // that is simply unacceptable. In fact, it is a fun little bug: + // + // https://github.com/hashicorp/consul-template/issues/198 + // + // and by "little" bug, I mean really big bug. + if _, ok := r.dependencies[d.String()]; ok { + log.Printf("[DEBUG] (runner) receiving dependency %s", d) + r.brain.Remember(d, data) + } +} + +// Signal sends a signal to the child process, if it exists. Any errors that +// occur are returned. +func (r *Runner) Signal(s os.Signal) error { + r.childLock.RLock() + defer r.childLock.RUnlock() + if r.child == nil { + return nil + } + return r.child.Signal(s) +} + +// Run iterates over each template in this Runner and conditionally executes +// the template rendering and command execution. +// +// The template is rendered atomically. If and only if the template render +// completes successfully, the optional commands will be executed, if given. +// Please note that all templates are rendered **and then** any commands are +// executed. +func (r *Runner) Run() error { + log.Printf("[DEBUG] (runner) initiating run") + + var newRenderEvent, wouldRenderAny, renderedAny bool + runCtx := &templateRunCtx{ + depsMap: make(map[string]dep.Dependency), + } + + for _, tmpl := range r.templates { + event, err := r.runTemplate(tmpl, runCtx) + if err != nil { + return err + } + + // If there was a render event store it + if event != nil { + r.renderEventsLock.Lock() + r.renderEvents[tmpl.ID()] = event + r.renderEventsLock.Unlock() + + // Record that there is at least one new render event + newRenderEvent = true + + // Record that at least one template would have been rendered. + if event.WouldRender { + wouldRenderAny = true + } + + // Record that at least one template was rendered. + if event.DidRender { + renderedAny = true + } + } + } + + // Check if we need to deliver any rendered signals + if wouldRenderAny || renderedAny { + // Send the signal that a template got rendered + select { + case r.renderedCh <- struct{}{}: + default: + } + } + + // Check if we need to deliver any event signals + if newRenderEvent { + select { + case r.renderEventCh <- struct{}{}: + default: + } + } + + // Perform the diff and update the known dependencies. + r.diffAndUpdateDeps(runCtx.depsMap) + + // Execute each command in sequence, collecting any errors that occur - this + // ensures all commands execute at least once. + var errs []error + for _, t := range runCtx.commands { + command := config.StringVal(t.Exec.Command) + log.Printf("[INFO] (runner) executing command %q from %s", command, t.Display()) + env := t.Exec.Env.Copy() + env.Custom = append(r.childEnv(), env.Custom...) + if _, err := spawnChild(&spawnChildInput{ + Stdin: r.inStream, + Stdout: r.outStream, + Stderr: r.errStream, + Command: command, + Env: env.Env(), + Timeout: config.TimeDurationVal(t.Exec.Timeout), + ReloadSignal: config.SignalVal(t.Exec.ReloadSignal), + KillSignal: config.SignalVal(t.Exec.KillSignal), + KillTimeout: config.TimeDurationVal(t.Exec.KillTimeout), + Splay: config.TimeDurationVal(t.Exec.Splay), + }); err != nil { + s := fmt.Sprintf("failed to execute command %q from %s", command, t.Display()) + errs = append(errs, errors.Wrap(err, s)) + } + } + + // If we got this far and have a child process, we need to send the reload + // signal to the child process. + if renderedAny && r.child != nil { + r.childLock.RLock() + if err := r.child.Reload(); err != nil { + errs = append(errs, err) + } + r.childLock.RUnlock() + } + + // If any errors were returned, convert them to an ErrorList for human + // readability. + if len(errs) != 0 { + var result *multierror.Error + for _, err := range errs { + result = multierror.Append(result, err) + } + return result.ErrorOrNil() + } + + return nil +} + +type templateRunCtx struct { + // commands is the set of commands that will be executed after all templates + // have run. When adding to the commands, care should be taken not to + // duplicate any existing command from a previous template. + commands []*config.TemplateConfig + + // depsMap is the set of dependencies shared across all templates. + depsMap map[string]dep.Dependency +} + +// runTemplate is used to run a particular template. It takes as input the +// template to run and a shared run context that allows sharing of information +// between templates. The run returns a potentially nil render event and any +// error that occured. The render event is nil in the case that the template has +// been already rendered and is a once template or if there is an error. +func (r *Runner) runTemplate(tmpl *template.Template, runCtx *templateRunCtx) (*RenderEvent, error) { + log.Printf("[DEBUG] (runner) checking template %s", tmpl.ID()) + + // Grab the last event + r.renderEventsLock.RLock() + lastEvent := r.renderEvents[tmpl.ID()] + r.renderEventsLock.RUnlock() + + // Create the event + event := &RenderEvent{ + Template: tmpl, + TemplateConfigs: r.templateConfigsFor(tmpl), + } + + if lastEvent != nil { + event.LastWouldRender = lastEvent.LastWouldRender + event.LastDidRender = lastEvent.LastDidRender + } + + // Check if we are currently the leader instance + isLeader := true + if r.dedup != nil { + isLeader = r.dedup.IsLeader(tmpl) + } + + // If we are in once mode and this template was already rendered, move + // onto the next one. We do not want to re-render the template if we are + // in once mode, and we certainly do not want to re-run any commands. + if r.config.Once { + r.renderEventsLock.RLock() + event, ok := r.renderEvents[tmpl.ID()] + r.renderEventsLock.RUnlock() + if ok && (event.WouldRender || event.DidRender) { + log.Printf("[DEBUG] (runner) once mode and already rendered") + return nil, nil + } + } + + // Attempt to render the template, returning any missing dependencies and + // the rendered contents. If there are any missing dependencies, the + // contents cannot be rendered or trusted! + result, err := tmpl.Execute(&template.ExecuteInput{ + Brain: r.brain, + Env: r.childEnv(), + }) + if err != nil { + return nil, errors.Wrap(err, tmpl.Source()) + } + + // Grab the list of used and missing dependencies. + missing, used := result.Missing, result.Used + + // Add the dependency to the list of dependencies for this runner. + for _, d := range used.List() { + // If we've taken over leadership for a template, we may have data + // that is cached, but not have the watcher. We must treat this as + // missing so that we create the watcher and re-run the template. + if isLeader && !r.watcher.Watching(d) { + missing.Add(d) + } + if _, ok := runCtx.depsMap[d.String()]; !ok { + runCtx.depsMap[d.String()] = d + } + } + + // Diff any missing dependencies the template reported with dependencies + // the watcher is watching. + unwatched := new(dep.Set) + for _, d := range missing.List() { + if !r.watcher.Watching(d) { + unwatched.Add(d) + } + } + + // Update the event with the new dependency information + event.MissingDeps = missing + event.UnwatchedDeps = unwatched + event.UsedDeps = used + event.UpdatedAt = time.Now().UTC() + + // If there are unwatched dependencies, start the watcher and exit since we + // won't have data. + if l := unwatched.Len(); l > 0 { + log.Printf("[DEBUG] (runner) was not watching %d dependencies", l) + for _, d := range unwatched.List() { + // If we are deduplicating, we must still handle non-sharable + // dependencies, since those will be ignored. + if isLeader || !d.CanShare() { + r.watcher.Add(d) + } + } + return event, nil + } + + // If the template is missing data for some dependencies then we are not + // ready to render and need to move on to the next one. + if l := missing.Len(); l > 0 { + log.Printf("[DEBUG] (runner) missing data for %d dependencies", l) + return event, nil + } + + // Trigger an update of the de-duplication manager + if r.dedup != nil && isLeader { + if err := r.dedup.UpdateDeps(tmpl, used.List()); err != nil { + log.Printf("[ERR] (runner) failed to update dependency data for de-duplication: %v", err) + } + } + + // If quiescence is activated, start/update the timers and loop back around. + // We do not want to render the templates yet. + if q, ok := r.quiescenceMap[tmpl.ID()]; ok { + q.tick() + // This event is being returned early for quiescence + event.ForQuiescence = true + return event, nil + } + + // For each template configuration that is tied to this template, attempt to + // render it to disk and accumulate commands for later use. + for _, templateConfig := range r.templateConfigsFor(tmpl) { + log.Printf("[DEBUG] (runner) rendering %s", templateConfig.Display()) + + // Render the template, taking dry mode into account + result, err := renderer.Render(&renderer.RenderInput{ + Backup: config.BoolVal(templateConfig.Backup), + Contents: result.Output, + CreateDestDirs: config.BoolVal(templateConfig.CreateDestDirs), + Dry: r.dry, + DryStream: r.outStream, + Path: config.StringVal(templateConfig.Destination), + Perms: config.FileModeVal(templateConfig.Perms), + }) + if err != nil { + return nil, errors.Wrap(err, "error rendering "+templateConfig.Display()) + } + + renderTime := time.Now().UTC() + + // If we would have rendered this template (but we did not because the + // contents were the same or something), we should consider this template + // rendered even though the contents on disk have not been updated. We + // will not fire commands unless the template was _actually_ rendered to + // disk though. + if result.WouldRender { + // This event would have rendered + event.WouldRender = true + event.LastWouldRender = renderTime + } + + // If we _actually_ rendered the template to disk, we want to run the + // appropriate commands. + if result.DidRender { + log.Printf("[INFO] (runner) rendered %s", templateConfig.Display()) + + // This event did render + event.DidRender = true + event.LastDidRender = renderTime + + // Update the contents + event.Contents = result.Contents + + if !r.dry { + // If the template was rendered (changed) and we are not in dry-run mode, + // aggregate commands, ignoring previously known commands + // + // Future-self Q&A: Why not use a map for the commands instead of an + // array with an expensive lookup option? Well I'm glad you asked that + // future-self! One of the API promises is that commands are executed + // in the order in which they are provided in the TemplateConfig + // definitions. If we inserted commands into a map, we would lose that + // relative ordering and people would be unhappy. + // if config.StringPresent(ctemplate.Command) + if c := config.StringVal(templateConfig.Exec.Command); c != "" { + existing := findCommand(templateConfig, runCtx.commands) + if existing != nil { + log.Printf("[DEBUG] (runner) skipping command %q from %s (already appended from %s)", + c, templateConfig.Display(), existing.Display()) + } else { + log.Printf("[DEBUG] (runner) appending command %q from %s", + c, templateConfig.Display()) + runCtx.commands = append(runCtx.commands, templateConfig) + } + } + } + } + } + + return event, nil +} + +// init() creates the Runner's underlying data structures and returns an error +// if any problems occur. +func (r *Runner) init() error { + // Ensure default configuration values + r.config = config.DefaultConfig().Merge(r.config) + r.config.Finalize() + + // Print the final config for debugging + result, err := json.Marshal(r.config) + if err != nil { + return err + } + log.Printf("[DEBUG] (runner) final config: %s", result) + + // Create the clientset + clients, err := newClientSet(r.config) + if err != nil { + return fmt.Errorf("runner: %s", err) + } + + // Create the watcher + watcher, err := newWatcher(r.config, clients, r.config.Once) + if err != nil { + return fmt.Errorf("runner: %s", err) + } + r.watcher = watcher + + numTemplates := len(*r.config.Templates) + templates := make([]*template.Template, 0, numTemplates) + ctemplatesMap := make(map[string]config.TemplateConfigs) + + // Iterate over each TemplateConfig, creating a new Template resource for each + // entry. Templates are parsed and saved, and a map of templates to their + // config templates is kept so templates can lookup their commands and output + // destinations. + for _, ctmpl := range *r.config.Templates { + tmpl, err := template.NewTemplate(&template.NewTemplateInput{ + Source: config.StringVal(ctmpl.Source), + Contents: config.StringVal(ctmpl.Contents), + ErrMissingKey: config.BoolVal(ctmpl.ErrMissingKey), + LeftDelim: config.StringVal(ctmpl.LeftDelim), + RightDelim: config.StringVal(ctmpl.RightDelim), + FunctionBlacklist: ctmpl.FunctionBlacklist, + SandboxPath: config.StringVal(ctmpl.SandboxPath), + }) + if err != nil { + return err + } + + if _, ok := ctemplatesMap[tmpl.ID()]; !ok { + templates = append(templates, tmpl) + } + + if _, ok := ctemplatesMap[tmpl.ID()]; !ok { + ctemplatesMap[tmpl.ID()] = make([]*config.TemplateConfig, 0, 1) + } + ctemplatesMap[tmpl.ID()] = append(ctemplatesMap[tmpl.ID()], ctmpl) + } + + // Convert the map of templates (which was only used to ensure uniqueness) + // back into an array of templates. + r.templates = templates + + r.renderEvents = make(map[string]*RenderEvent, numTemplates) + r.dependencies = make(map[string]dep.Dependency) + + r.renderedCh = make(chan struct{}, 1) + r.renderEventCh = make(chan struct{}, 1) + + r.ctemplatesMap = ctemplatesMap + r.inStream = os.Stdin + r.outStream = os.Stdout + r.errStream = os.Stderr + r.brain = template.NewBrain() + + r.ErrCh = make(chan error) + r.DoneCh = make(chan struct{}) + + r.quiescenceMap = make(map[string]*quiescence) + r.quiescenceCh = make(chan *template.Template) + + if *r.config.Dedup.Enabled { + if r.config.Once { + log.Printf("[INFO] (runner) disabling de-duplication in once mode") + } else { + r.dedup, err = NewDedupManager(r.config.Dedup, clients, r.brain, r.templates) + if err != nil { + return err + } + } + } + + return nil +} + +// diffAndUpdateDeps iterates through the current map of dependencies on this +// runner and stops the watcher for any deps that are no longer required. +// +// At the end of this function, the given depsMap is converted to a slice and +// stored on the runner. +func (r *Runner) diffAndUpdateDeps(depsMap map[string]dep.Dependency) { + r.dependenciesLock.Lock() + defer r.dependenciesLock.Unlock() + + // Diff and up the list of dependencies, stopping any unneeded watchers. + log.Printf("[DEBUG] (runner) diffing and updating dependencies") + + for key, d := range r.dependencies { + if _, ok := depsMap[key]; !ok { + log.Printf("[DEBUG] (runner) %s is no longer needed", d) + r.watcher.Remove(d) + r.brain.Forget(d) + } else { + log.Printf("[DEBUG] (runner) %s is still needed", d) + } + } + + r.dependencies = depsMap +} + +// TemplateConfigFor returns the TemplateConfig for the given Template +func (r *Runner) templateConfigsFor(tmpl *template.Template) []*config.TemplateConfig { + return r.ctemplatesMap[tmpl.ID()] +} + +// TemplateConfigMapping returns a mapping between the template ID and the set +// of TemplateConfig represented by the template ID +func (r *Runner) TemplateConfigMapping() map[string][]*config.TemplateConfig { + // this method is primarily used to support embedding consul-template + // in other applications (ex. Nomad) + m := make(map[string][]*config.TemplateConfig, len(r.ctemplatesMap)) + + for id, set := range r.ctemplatesMap { + ctmpls := make([]*config.TemplateConfig, len(set)) + m[id] = ctmpls + for i, ctmpl := range set { + ctmpls[i] = ctmpl + } + } + + return m +} + +// allTemplatesRendered returns true if all the templates in this Runner have +// been rendered at least one time. +func (r *Runner) allTemplatesRendered() bool { + r.renderEventsLock.RLock() + defer r.renderEventsLock.RUnlock() + + for _, tmpl := range r.templates { + event, rendered := r.renderEvents[tmpl.ID()] + if !rendered { + return false + } + + // Skip evaluation of events from quiescence as they will + // be default unrendered as we are still waiting for the + // specified period + if event.ForQuiescence { + continue + } + + // The template might already exist on disk with the exact contents, but + // we still want to count that as "rendered" [GH-1000]. + if !event.DidRender && !event.WouldRender { + return false + } + } + + return true +} + +// childEnv creates a map of environment variables for child processes to have +// access to configurations in Consul Template's configuration. +func (r *Runner) childEnv() []string { + var m = make(map[string]string) + + if config.StringPresent(r.config.Consul.Address) { + m["CONSUL_HTTP_ADDR"] = config.StringVal(r.config.Consul.Address) + } + + if config.BoolVal(r.config.Consul.Auth.Enabled) { + m["CONSUL_HTTP_AUTH"] = r.config.Consul.Auth.String() + } + + m["CONSUL_HTTP_SSL"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Enabled)) + m["CONSUL_HTTP_SSL_VERIFY"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Verify)) + + if config.StringPresent(r.config.Vault.Address) { + m["VAULT_ADDR"] = config.StringVal(r.config.Vault.Address) + } + + if !config.BoolVal(r.config.Vault.SSL.Verify) { + m["VAULT_SKIP_VERIFY"] = "true" + } + + if config.StringPresent(r.config.Vault.SSL.Cert) { + m["VAULT_CLIENT_CERT"] = config.StringVal(r.config.Vault.SSL.Cert) + } + + if config.StringPresent(r.config.Vault.SSL.Key) { + m["VAULT_CLIENT_KEY"] = config.StringVal(r.config.Vault.SSL.Key) + } + + if config.StringPresent(r.config.Vault.SSL.CaPath) { + m["VAULT_CAPATH"] = config.StringVal(r.config.Vault.SSL.CaPath) + } + + if config.StringPresent(r.config.Vault.SSL.CaCert) { + m["VAULT_CACERT"] = config.StringVal(r.config.Vault.SSL.CaCert) + } + + if config.StringPresent(r.config.Vault.SSL.ServerName) { + m["VAULT_TLS_SERVER_NAME"] = config.StringVal(r.config.Vault.SSL.ServerName) + } + + // Append runner-supplied env (this is supplied programmatically). + for k, v := range r.Env { + m[k] = v + } + + e := make([]string, 0, len(m)) + for k, v := range m { + e = append(e, k+"="+v) + } + return e +} + +// storePid is used to write out a PID file to disk. +func (r *Runner) storePid() error { + path := config.StringVal(r.config.PidFile) + if path == "" { + return nil + } + + log.Printf("[INFO] creating pid file at %q", path) + + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + return fmt.Errorf("runner: could not open pid file: %s", err) + } + defer f.Close() + + pid := os.Getpid() + _, err = f.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("runner: could not write to pid file: %s", err) + } + return nil +} + +// deletePid is used to remove the PID on exit. +func (r *Runner) deletePid() error { + path := config.StringVal(r.config.PidFile) + if path == "" { + return nil + } + + log.Printf("[DEBUG] removing pid file at %q", path) + + stat, err := os.Stat(path) + if err != nil { + return fmt.Errorf("runner: could not remove pid file: %s", err) + } + if stat.IsDir() { + return fmt.Errorf("runner: specified pid file path is directory") + } + + err = os.Remove(path) + if err != nil { + return fmt.Errorf("runner: could not remove pid file: %s", err) + } + return nil +} + +// SetOutStream modifies runner output stream. Defaults to stdout. +func (r *Runner) SetOutStream(out io.Writer) { + r.outStream = out +} + +// SetErrStream modifies runner error stream. Defaults to stderr. +func (r *Runner) SetErrStream(err io.Writer) { + r.errStream = err +} + +// spawnChildInput is used as input to spawn a child process. +type spawnChildInput struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Command string + Timeout time.Duration + Env []string + ReloadSignal os.Signal + KillSignal os.Signal + KillTimeout time.Duration + Splay time.Duration +} + +// spawnChild spawns a child process with the given inputs and returns the +// resulting child. +func spawnChild(i *spawnChildInput) (*child.Child, error) { + p := shellwords.NewParser() + p.ParseEnv = true + p.ParseBacktick = true + args, err := p.Parse(i.Command) + if err != nil { + return nil, errors.Wrap(err, "failed parsing command") + } + + child, err := child.New(&child.NewInput{ + Stdin: i.Stdin, + Stdout: i.Stdout, + Stderr: i.Stderr, + Command: args[0], + Args: args[1:], + Env: i.Env, + Timeout: i.Timeout, + ReloadSignal: i.ReloadSignal, + KillSignal: i.KillSignal, + KillTimeout: i.KillTimeout, + Splay: i.Splay, + }) + if err != nil { + return nil, errors.Wrap(err, "error creating child") + } + + if err := child.Start(); err != nil { + return nil, errors.Wrap(err, "child") + } + return child, nil +} + +// quiescence is an internal representation of a single template's quiescence +// state. +type quiescence struct { + template *template.Template + min time.Duration + max time.Duration + ch chan *template.Template + timer *time.Timer + deadline time.Time +} + +// newQuiescence creates a new quiescence timer for the given template. +func newQuiescence(ch chan *template.Template, min, max time.Duration, t *template.Template) *quiescence { + return &quiescence{ + template: t, + min: min, + max: max, + ch: ch, + } +} + +// tick updates the minimum quiescence timer. +func (q *quiescence) tick() { + now := time.Now() + + // If this is the first tick, set up the timer and calculate the max + // deadline. + if q.timer == nil { + q.timer = time.NewTimer(q.min) + go func() { + select { + case <-q.timer.C: + q.ch <- q.template + } + }() + + q.deadline = now.Add(q.max) + return + } + + // Snooze the timer for the min time, or snooze less if we are coming + // up against the max time. If the timer has already fired and the reset + // doesn't work that's ok because we guarantee that the channel gets our + // template which means that we are obsolete and a fresh quiescence will + // be set up. + if now.Add(q.min).Before(q.deadline) { + q.timer.Reset(q.min) + } else if dur := q.deadline.Sub(now); dur > 0 { + q.timer.Reset(dur) + } +} + +// findCommand searches the list of template configs for the given command and +// returns it if it exists. +func findCommand(c *config.TemplateConfig, templates []*config.TemplateConfig) *config.TemplateConfig { + needle := config.StringVal(c.Exec.Command) + for _, t := range templates { + if needle == config.StringVal(t.Exec.Command) { + return t + } + } + return nil +} + +// newClientSet creates a new client set from the given config. +func newClientSet(c *config.Config) (*dep.ClientSet, error) { + clients := dep.NewClientSet() + + if err := clients.CreateConsulClient(&dep.CreateConsulClientInput{ + Address: config.StringVal(c.Consul.Address), + Token: config.StringVal(c.Consul.Token), + AuthEnabled: config.BoolVal(c.Consul.Auth.Enabled), + AuthUsername: config.StringVal(c.Consul.Auth.Username), + AuthPassword: config.StringVal(c.Consul.Auth.Password), + SSLEnabled: config.BoolVal(c.Consul.SSL.Enabled), + SSLVerify: config.BoolVal(c.Consul.SSL.Verify), + SSLCert: config.StringVal(c.Consul.SSL.Cert), + SSLKey: config.StringVal(c.Consul.SSL.Key), + SSLCACert: config.StringVal(c.Consul.SSL.CaCert), + SSLCAPath: config.StringVal(c.Consul.SSL.CaPath), + ServerName: config.StringVal(c.Consul.SSL.ServerName), + TransportDialKeepAlive: config.TimeDurationVal(c.Consul.Transport.DialKeepAlive), + TransportDialTimeout: config.TimeDurationVal(c.Consul.Transport.DialTimeout), + TransportDisableKeepAlives: config.BoolVal(c.Consul.Transport.DisableKeepAlives), + TransportIdleConnTimeout: config.TimeDurationVal(c.Consul.Transport.IdleConnTimeout), + TransportMaxIdleConns: config.IntVal(c.Consul.Transport.MaxIdleConns), + TransportMaxIdleConnsPerHost: config.IntVal(c.Consul.Transport.MaxIdleConnsPerHost), + TransportTLSHandshakeTimeout: config.TimeDurationVal(c.Consul.Transport.TLSHandshakeTimeout), + }); err != nil { + return nil, fmt.Errorf("runner: %s", err) + } + + if err := clients.CreateVaultClient(&dep.CreateVaultClientInput{ + Address: config.StringVal(c.Vault.Address), + Namespace: config.StringVal(c.Vault.Namespace), + Token: config.StringVal(c.Vault.Token), + UnwrapToken: config.BoolVal(c.Vault.UnwrapToken), + SSLEnabled: config.BoolVal(c.Vault.SSL.Enabled), + SSLVerify: config.BoolVal(c.Vault.SSL.Verify), + SSLCert: config.StringVal(c.Vault.SSL.Cert), + SSLKey: config.StringVal(c.Vault.SSL.Key), + SSLCACert: config.StringVal(c.Vault.SSL.CaCert), + SSLCAPath: config.StringVal(c.Vault.SSL.CaPath), + ServerName: config.StringVal(c.Vault.SSL.ServerName), + TransportDialKeepAlive: config.TimeDurationVal(c.Vault.Transport.DialKeepAlive), + TransportDialTimeout: config.TimeDurationVal(c.Vault.Transport.DialTimeout), + TransportDisableKeepAlives: config.BoolVal(c.Vault.Transport.DisableKeepAlives), + TransportIdleConnTimeout: config.TimeDurationVal(c.Vault.Transport.IdleConnTimeout), + TransportMaxIdleConns: config.IntVal(c.Vault.Transport.MaxIdleConns), + TransportMaxIdleConnsPerHost: config.IntVal(c.Vault.Transport.MaxIdleConnsPerHost), + TransportTLSHandshakeTimeout: config.TimeDurationVal(c.Vault.Transport.TLSHandshakeTimeout), + }); err != nil { + return nil, fmt.Errorf("runner: %s", err) + } + + return clients, nil +} + +// newWatcher creates a new watcher. +func newWatcher(c *config.Config, clients *dep.ClientSet, once bool) (*watch.Watcher, error) { + log.Printf("[INFO] (runner) creating watcher") + + w, err := watch.NewWatcher(&watch.NewWatcherInput{ + Clients: clients, + MaxStale: config.TimeDurationVal(c.MaxStale), + Once: c.Once, + RenewVault: clients.Vault().Token() != "" && config.BoolVal(c.Vault.RenewToken), + VaultAgentTokenFile: config.StringVal(c.Vault.VaultAgentTokenFile), + RetryFuncConsul: watch.RetryFunc(c.Consul.Retry.RetryFunc()), + // TODO: Add a sane default retry - right now this only affects "local" + // dependencies like reading a file from disk. + RetryFuncDefault: nil, + RetryFuncVault: watch.RetryFunc(c.Vault.Retry.RetryFunc()), + VaultGrace: config.TimeDurationVal(c.Vault.Grace), + VaultToken: clients.Vault().Token(), + }) + if err != nil { + return nil, errors.Wrap(err, "runner") + } + return w, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/renderer/file_perms.go b/vendor/github.com/hashicorp/consul-template/renderer/file_perms.go new file mode 100644 index 000000000000..d89b2f02c6ea --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/renderer/file_perms.go @@ -0,0 +1,22 @@ +//+build !windows + +package renderer + +import ( + "os" + "syscall" +) + +func preserveFilePermissions(path string, fileInfo os.FileInfo) error { + sysInfo := fileInfo.Sys() + if sysInfo != nil { + stat, ok := sysInfo.(*syscall.Stat_t) + if ok { + if err := os.Chown(path, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/consul-template/renderer/file_perms_windows.go b/vendor/github.com/hashicorp/consul-template/renderer/file_perms_windows.go new file mode 100644 index 000000000000..cae35cf51ac5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/renderer/file_perms_windows.go @@ -0,0 +1,9 @@ +//+build windows + +package renderer + +import "os" + +func preserveFilePermissions(path string, fileInfo os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/hashicorp/consul-template/renderer/renderer.go b/vendor/github.com/hashicorp/consul-template/renderer/renderer.go new file mode 100644 index 000000000000..59931c19e628 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/renderer/renderer.go @@ -0,0 +1,182 @@ +package renderer + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +const ( + // DefaultFilePerms are the default file permissions for files rendered onto + // disk when a specific file permission has not already been specified. + DefaultFilePerms = 0644 +) + +var ( + // ErrNoParentDir is the error returned with the parent directory is missing + // and the user disabled it. + ErrNoParentDir = errors.New("parent directory is missing") + + // ErrMissingDest is the error returned with the destination is empty. + ErrMissingDest = errors.New("missing destination") +) + +// RenderInput is used as input to the render function. +type RenderInput struct { + Backup bool + Contents []byte + CreateDestDirs bool + Dry bool + DryStream io.Writer + Path string + Perms os.FileMode +} + +// RenderResult is returned and stored. It contains the status of the render +// operation. +type RenderResult struct { + // DidRender indicates if the template rendered to disk. This will be false in + // the event of an error, but it will also be false in dry mode or when the + // template on disk matches the new result. + DidRender bool + + // WouldRender indicates if the template would have rendered to disk. This + // will return false in the event of an error, but will return true in dry + // mode or when the template on disk matches the new result. + WouldRender bool + + // Contents are the actual contents of the resulting template from the render + // operation. + Contents []byte +} + +// Render atomically renders a file contents to disk, returning a result of +// whether it would have rendered and actually did render. +func Render(i *RenderInput) (*RenderResult, error) { + existing, err := ioutil.ReadFile(i.Path) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "failed reading file") + } + + if bytes.Equal(existing, i.Contents) { + return &RenderResult{ + DidRender: false, + WouldRender: true, + Contents: existing, + }, nil + } + + if i.Dry { + fmt.Fprintf(i.DryStream, "> %s\n%s", i.Path, i.Contents) + } else { + if err := AtomicWrite(i.Path, i.CreateDestDirs, i.Contents, i.Perms, i.Backup); err != nil { + return nil, errors.Wrap(err, "failed writing file") + } + } + + return &RenderResult{ + DidRender: true, + WouldRender: true, + Contents: i.Contents, + }, nil +} + +// AtomicWrite accepts a destination path and the template contents. It writes +// the template contents to a TempFile on disk, returning if any errors occur. +// +// If the parent destination directory does not exist, it will be created +// automatically with permissions 0755. To use a different permission, create +// the directory first or use `chmod` in a Command. +// +// If the destination path exists, all attempts will be made to preserve the +// existing file permissions. If those permissions cannot be read, an error is +// returned. If the file does not exist, it will be created automatically with +// permissions 0644. To use a different permission, create the destination file +// first or use `chmod` in a Command. +// +// If no errors occur, the Tempfile is "renamed" (moved) to the destination +// path. +func AtomicWrite(path string, createDestDirs bool, contents []byte, perms os.FileMode, backup bool) error { + if path == "" { + return ErrMissingDest + } + + parent := filepath.Dir(path) + if _, err := os.Stat(parent); os.IsNotExist(err) { + if createDestDirs { + if err := os.MkdirAll(parent, 0755); err != nil { + return err + } + } else { + return ErrNoParentDir + } + } + + f, err := ioutil.TempFile(parent, "") + if err != nil { + return err + } + defer os.Remove(f.Name()) + + if _, err := f.Write(contents); err != nil { + return err + } + + if err := f.Sync(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + // If the user did not explicitly set permissions, attempt to lookup the + // current permissions on the file. If the file does not exist, fall back to + // the default. Otherwise, inherit the current permissions. + if perms == 0 { + currentInfo, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + perms = DefaultFilePerms + } else { + return err + } + } else { + perms = currentInfo.Mode() + + // The file exists, so try to preserve the ownership as well. + if err := preserveFilePermissions(f.Name(), currentInfo); err != nil { + log.Printf("[WARN] (runner) could not preserve file permissions for %q: %v", + f.Name(), err) + } + } + } + + if err := os.Chmod(f.Name(), perms); err != nil { + return err + } + + // If we got this far, it means we are about to save the file. Copy the + // current file so we have a backup. Note that os.Link preserves the Mode. + if backup { + bak, old := path+".bak", path+".old.bak" + os.Rename(bak, old) // ignore error + if err := os.Link(path, bak); err != nil { + log.Printf("[WARN] (runner) could not backup %q: %v", path, err) + } else { + os.Remove(old) // ignore error + } + } + + if err := os.Rename(f.Name(), path); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/mapstructure.go b/vendor/github.com/hashicorp/consul-template/signals/mapstructure.go new file mode 100644 index 000000000000..f21cbd5d60af --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/mapstructure.go @@ -0,0 +1,32 @@ +package signals + +import ( + "reflect" + + "github.com/mitchellh/mapstructure" +) + +// StringToSignalFunc parses a string as a signal based on the signal lookup +// table. If the user supplied an empty string or nil, a special "nil signal" +// is returned. Clients should check for this value and set the response back +// nil after mapstructure finishes parsing. +func StringToSignalFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + + if t.String() != "os.Signal" { + return data, nil + } + + if data == nil || data.(string) == "" { + return SIGNIL, nil + } + + return Parse(data.(string)) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/nil.go b/vendor/github.com/hashicorp/consul-template/signals/nil.go new file mode 100644 index 000000000000..2c20645b3bef --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/nil.go @@ -0,0 +1,7 @@ +package signals + +// NilSignal is a special signal that is blank or "nil" +type NilSignal int + +func (s *NilSignal) String() string { return "SIGNIL" } +func (s *NilSignal) Signal() {} diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals.go b/vendor/github.com/hashicorp/consul-template/signals/signals.go new file mode 100644 index 000000000000..dacc3e62c1b2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/signals.go @@ -0,0 +1,35 @@ +package signals + +import ( + "fmt" + "os" + "sort" + "strings" +) + +// SIGNIL is the nil signal. +var SIGNIL os.Signal = new(NilSignal) + +// ValidSignals is the list of all valid signals. This is built at runtime +// because it is OS-dependent. +var ValidSignals []string + +func init() { + valid := make([]string, 0, len(SignalLookup)) + for k := range SignalLookup { + valid = append(valid, k) + } + sort.Strings(valid) + ValidSignals = valid +} + +// Parse parses the given string as a signal. If the signal is not found, +// an error is returned. +func Parse(s string) (os.Signal, error) { + sig, ok := SignalLookup[strings.ToUpper(s)] + if !ok { + return nil, fmt.Errorf("invalid signal %q - valid signals are %q", + s, ValidSignals) + } + return sig, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals_unix.go b/vendor/github.com/hashicorp/consul-template/signals/signals_unix.go new file mode 100644 index 000000000000..0b614e93bd29 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/signals_unix.go @@ -0,0 +1,40 @@ +// +build linux darwin freebsd openbsd solaris netbsd + +package signals + +import ( + "os" + "syscall" +) + +var SignalLookup = map[string]os.Signal{ + "SIGABRT": syscall.SIGABRT, + "SIGALRM": syscall.SIGALRM, + "SIGBUS": syscall.SIGBUS, + "SIGCHLD": syscall.SIGCHLD, + "SIGCONT": syscall.SIGCONT, + "SIGFPE": syscall.SIGFPE, + "SIGHUP": syscall.SIGHUP, + "SIGILL": syscall.SIGILL, + "SIGINT": syscall.SIGINT, + "SIGIO": syscall.SIGIO, + "SIGIOT": syscall.SIGIOT, + "SIGKILL": syscall.SIGKILL, + "SIGPIPE": syscall.SIGPIPE, + "SIGPROF": syscall.SIGPROF, + "SIGQUIT": syscall.SIGQUIT, + "SIGSEGV": syscall.SIGSEGV, + "SIGSTOP": syscall.SIGSTOP, + "SIGSYS": syscall.SIGSYS, + "SIGTERM": syscall.SIGTERM, + "SIGTRAP": syscall.SIGTRAP, + "SIGTSTP": syscall.SIGTSTP, + "SIGTTIN": syscall.SIGTTIN, + "SIGTTOU": syscall.SIGTTOU, + "SIGURG": syscall.SIGURG, + "SIGUSR1": syscall.SIGUSR1, + "SIGUSR2": syscall.SIGUSR2, + "SIGWINCH": syscall.SIGWINCH, + "SIGXCPU": syscall.SIGXCPU, + "SIGXFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals_windows.go b/vendor/github.com/hashicorp/consul-template/signals/signals_windows.go new file mode 100644 index 000000000000..e1204a67dea1 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/signals_windows.go @@ -0,0 +1,24 @@ +// +build windows + +package signals + +import ( + "os" + "syscall" +) + +var SignalLookup = map[string]os.Signal{ + "SIGABRT": syscall.SIGABRT, + "SIGALRM": syscall.SIGALRM, + "SIGBUS": syscall.SIGBUS, + "SIGFPE": syscall.SIGFPE, + "SIGHUP": syscall.SIGHUP, + "SIGILL": syscall.SIGILL, + "SIGINT": syscall.SIGINT, + "SIGKILL": syscall.SIGKILL, + "SIGPIPE": syscall.SIGPIPE, + "SIGQUIT": syscall.SIGQUIT, + "SIGSEGV": syscall.SIGSEGV, + "SIGTERM": syscall.SIGTERM, + "SIGTRAP": syscall.SIGTRAP, +} diff --git a/vendor/github.com/hashicorp/consul-template/template/brain.go b/vendor/github.com/hashicorp/consul-template/template/brain.go new file mode 100644 index 000000000000..149fc4f9f3cf --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/brain.go @@ -0,0 +1,74 @@ +package template + +import ( + "sync" + + dep "github.com/hashicorp/consul-template/dependency" +) + +// Brain is what Template uses to determine the values that are +// available for template parsing. +type Brain struct { + sync.RWMutex + + // data is the map of individual dependencies and the most recent data for + // that dependency. + data map[string]interface{} + + // receivedData is an internal tracker of which dependencies have stored data + // in the brain. + receivedData map[string]struct{} +} + +// NewBrain creates a new Brain with empty values for each +// of the key structs. +func NewBrain() *Brain { + return &Brain{ + data: make(map[string]interface{}), + receivedData: make(map[string]struct{}), + } +} + +// Remember accepts a dependency and the data to store associated with that +// dep. This function converts the given data to a proper type and stores +// it interally. +func (b *Brain) Remember(d dep.Dependency, data interface{}) { + b.Lock() + defer b.Unlock() + + b.data[d.String()] = data + b.receivedData[d.String()] = struct{}{} +} + +// Recall gets the current value for the given dependency in the Brain. +func (b *Brain) Recall(d dep.Dependency) (interface{}, bool) { + b.RLock() + defer b.RUnlock() + + // If we have not received data for this dependency, return now. + if _, ok := b.receivedData[d.String()]; !ok { + return nil, false + } + + return b.data[d.String()], true +} + +// ForceSet is used to force set the value of a dependency +// for a given hash code +func (b *Brain) ForceSet(hashCode string, data interface{}) { + b.Lock() + defer b.Unlock() + + b.data[hashCode] = data + b.receivedData[hashCode] = struct{}{} +} + +// Forget accepts a dependency and removes all associated data with this +// dependency. It also resets the "receivedData" internal map. +func (b *Brain) Forget(d dep.Dependency) { + b.Lock() + defer b.Unlock() + + delete(b.data, d.String()) + delete(b.receivedData, d.String()) +} diff --git a/vendor/github.com/hashicorp/consul-template/template/funcs.go b/vendor/github.com/hashicorp/consul-template/template/funcs.go new file mode 100644 index 000000000000..2114279ca719 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/funcs.go @@ -0,0 +1,1322 @@ +package template + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "text/template" + "time" + + "github.com/BurntSushi/toml" + dep "github.com/hashicorp/consul-template/dependency" + socktmpl "github.com/hashicorp/go-sockaddr/template" + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" +) + +// now is function that represents the current time in UTC. This is here +// primarily for the tests to override times. +var now = func() time.Time { return time.Now().UTC() } + +// datacentersFunc returns or accumulates datacenter dependencies. +func datacentersFunc(b *Brain, used, missing *dep.Set) func(ignore ...bool) ([]string, error) { + return func(i ...bool) ([]string, error) { + result := []string{} + + var ignore bool + switch len(i) { + case 0: + ignore = false + case 1: + ignore = i[0] + default: + return result, fmt.Errorf("datacenters: wrong number of arguments, expected 0 or 1"+ + ", but got %d", len(i)) + } + + d, err := dep.NewCatalogDatacentersQuery(ignore) + if err != nil { + return result, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.([]string), nil + } + + missing.Add(d) + + return result, nil + } +} + +// envFunc returns a function which checks the value of an environment variable. +// Invokers can specify their own environment, which takes precedences over any +// real environment variables +func envFunc(env []string) func(string) (string, error) { + return func(s string) (string, error) { + for _, e := range env { + split := strings.SplitN(e, "=", 2) + k, v := split[0], split[1] + if k == s { + return v, nil + } + } + return os.Getenv(s), nil + } +} + +// executeTemplateFunc executes the given template in the context of the +// parent. If an argument is specified, it will be used as the context instead. +// This can be used for nested template definitions. +func executeTemplateFunc(t *template.Template) func(string, ...interface{}) (string, error) { + return func(s string, data ...interface{}) (string, error) { + var dot interface{} + switch len(data) { + case 0: + dot = nil + case 1: + dot = data[0] + default: + return "", fmt.Errorf("executeTemplate: wrong number of arguments, expected 1 or 2"+ + ", but got %d", len(data)+1) + } + var b bytes.Buffer + if err := t.ExecuteTemplate(&b, s, dot); err != nil { + return "", err + } + return b.String(), nil + } +} + +// fileFunc returns or accumulates file dependencies. +func fileFunc(b *Brain, used, missing *dep.Set, sandboxPath string) func(string) (string, error) { + return func(s string) (string, error) { + if len(s) == 0 { + return "", nil + } + err := pathInSandbox(sandboxPath, s) + if err != nil { + return "", err + } + d, err := dep.NewFileQuery(s) + if err != nil { + return "", err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + if value == nil { + return "", nil + } + return value.(string), nil + } + + missing.Add(d) + + return "", nil + } +} + +// keyFunc returns or accumulates key dependencies. +func keyFunc(b *Brain, used, missing *dep.Set) func(string) (string, error) { + return func(s string) (string, error) { + if len(s) == 0 { + return "", nil + } + + d, err := dep.NewKVGetQuery(s) + if err != nil { + return "", err + } + d.EnableBlocking() + + used.Add(d) + + if value, ok := b.Recall(d); ok { + if value == nil { + return "", nil + } + return value.(string), nil + } + + missing.Add(d) + + return "", nil + } +} + +// keyExistsFunc returns true if a key exists, false otherwise. +func keyExistsFunc(b *Brain, used, missing *dep.Set) func(string) (bool, error) { + return func(s string) (bool, error) { + if len(s) == 0 { + return false, nil + } + + d, err := dep.NewKVGetQuery(s) + if err != nil { + return false, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value != nil, nil + } + + missing.Add(d) + + return false, nil + } +} + +// keyWithDefaultFunc returns or accumulates key dependencies that have a +// default value. +func keyWithDefaultFunc(b *Brain, used, missing *dep.Set) func(string, string) (string, error) { + return func(s, def string) (string, error) { + if len(s) == 0 { + return def, nil + } + + d, err := dep.NewKVGetQuery(s) + if err != nil { + return "", err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + if value == nil || value.(string) == "" { + return def, nil + } + return value.(string), nil + } + + missing.Add(d) + + return def, nil + } +} + +func safeLsFunc(b *Brain, used, missing *dep.Set) func(string) ([]*dep.KeyPair, error) { + // call lsFunc but explicitly mark that empty data set returned on monitored KV prefix is NOT safe + return lsFunc(b, used, missing, false) +} + +// lsFunc returns or accumulates keyPrefix dependencies. +func lsFunc(b *Brain, used, missing *dep.Set, emptyIsSafe bool) func(string) ([]*dep.KeyPair, error) { + return func(s string) ([]*dep.KeyPair, error) { + result := []*dep.KeyPair{} + + if len(s) == 0 { + return result, nil + } + + d, err := dep.NewKVListQuery(s) + if err != nil { + return result, err + } + + used.Add(d) + + // Only return non-empty top-level keys + if value, ok := b.Recall(d); ok { + for _, pair := range value.([]*dep.KeyPair) { + if pair.Key != "" && !strings.Contains(pair.Key, "/") { + result = append(result, pair) + } + } + + if len(result) == 0 { + if emptyIsSafe { + // Operator used potentially unsafe ls function in the template instead of the safeLs + return result, nil + } + } else { + // non empty result is good so we just return the data + return result, nil + } + + // If we reach this part of the code result is completely empty as value returned no KV pairs + // Operator selected to use safeLs on the specific KV prefix so we will refuse to render template + // by marking d as missing + } + + // b.Recall either returned an error or safeLs entered unsafe case + missing.Add(d) + + return result, nil + } +} + +// nodeFunc returns or accumulates catalog node dependency. +func nodeFunc(b *Brain, used, missing *dep.Set) func(...string) (*dep.CatalogNode, error) { + return func(s ...string) (*dep.CatalogNode, error) { + + d, err := dep.NewCatalogNodeQuery(strings.Join(s, "")) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.(*dep.CatalogNode), nil + } + + missing.Add(d) + + return nil, nil + } +} + +// nodesFunc returns or accumulates catalog node dependencies. +func nodesFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.Node, error) { + return func(s ...string) ([]*dep.Node, error) { + result := []*dep.Node{} + + d, err := dep.NewCatalogNodesQuery(strings.Join(s, "")) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.([]*dep.Node), nil + } + + missing.Add(d) + + return result, nil + } +} + +// secretFunc returns or accumulates secret dependencies from Vault. +func secretFunc(b *Brain, used, missing *dep.Set) func(...string) (*dep.Secret, error) { + return func(s ...string) (*dep.Secret, error) { + var result *dep.Secret + + if len(s) == 0 { + return result, nil + } + + // TODO: Refactor into separate template functions + path, rest := s[0], s[1:] + data := make(map[string]interface{}) + for _, str := range rest { + parts := strings.SplitN(str, "=", 2) + if len(parts) != 2 { + return result, fmt.Errorf("not k=v pair %q", str) + } + + k, v := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + data[k] = v + } + + var d dep.Dependency + var err error + + if len(rest) == 0 { + d, err = dep.NewVaultReadQuery(path) + } else { + d, err = dep.NewVaultWriteQuery(path, data) + } + + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + result = value.(*dep.Secret) + return result, nil + } + + missing.Add(d) + + return result, nil + } +} + +// secretsFunc returns or accumulates a list of secret dependencies from Vault. +func secretsFunc(b *Brain, used, missing *dep.Set) func(string) ([]string, error) { + return func(s string) ([]string, error) { + var result []string + + if len(s) == 0 { + return result, nil + } + + d, err := dep.NewVaultListQuery(s) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + result = value.([]string) + return result, nil + } + + missing.Add(d) + + return result, nil + } +} + +// byMeta returns Services grouped by one or many ServiceMeta fields. +func byMeta(meta string, services []*dep.HealthService) (groups map[string][]*dep.HealthService, err error) { + re := regexp.MustCompile("[^a-zA-Z0-9_-]") + normalize := func(x string) string { + return re.ReplaceAllString(x, "_") + } + getOrDefault := func(m map[string]string, key string) string { + realKey := strings.TrimSuffix(key, "|int") + if val, ok := m[realKey]; ok { + if val != "" { + return val + } + } + if strings.HasSuffix(key, "|int") { + return "0" + } + return fmt.Sprintf("_no_%s_", realKey) + } + + metas := strings.Split(meta, ",") + + groups = make(map[string][]*dep.HealthService) + + for _, s := range services { + sm := s.ServiceMeta + keyParts := []string{} + for _, meta := range metas { + value := getOrDefault(sm, meta) + if strings.HasSuffix(meta, "|int") { + value = getOrDefault(sm, meta) + i, err := strconv.Atoi(value) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("cannot parse %v as number ", value)) + } + value = fmt.Sprintf("%05d", i) + } + keyParts = append(keyParts, normalize(value)) + } + key := strings.Join(keyParts, "_") + groups[key] = append(groups[key], s) + } + + return groups, nil +} + +// serviceFunc returns or accumulates health service dependencies. +func serviceFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.HealthService, error) { + return func(s ...string) ([]*dep.HealthService, error) { + result := []*dep.HealthService{} + + if len(s) == 0 || s[0] == "" { + return result, nil + } + + d, err := dep.NewHealthServiceQuery(strings.Join(s, "|")) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.([]*dep.HealthService), nil + } + + missing.Add(d) + + return result, nil + } +} + +// servicesFunc returns or accumulates catalog services dependencies. +func servicesFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.CatalogSnippet, error) { + return func(s ...string) ([]*dep.CatalogSnippet, error) { + result := []*dep.CatalogSnippet{} + + d, err := dep.NewCatalogServicesQuery(strings.Join(s, "")) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.([]*dep.CatalogSnippet), nil + } + + missing.Add(d) + + return result, nil + } +} + +func safeTreeFunc(b *Brain, used, missing *dep.Set) func(string) ([]*dep.KeyPair, error) { + // call treeFunc but explicitly mark that empty data set returned on monitored KV prefix is NOT safe + return treeFunc(b, used, missing, false) +} + +// treeFunc returns or accumulates keyPrefix dependencies. +func treeFunc(b *Brain, used, missing *dep.Set, emptyIsSafe bool) func(string) ([]*dep.KeyPair, error) { + return func(s string) ([]*dep.KeyPair, error) { + result := []*dep.KeyPair{} + + if len(s) == 0 { + return result, nil + } + + d, err := dep.NewKVListQuery(s) + if err != nil { + return result, err + } + + used.Add(d) + + // Only return non-empty top-level keys + if value, ok := b.Recall(d); ok { + for _, pair := range value.([]*dep.KeyPair) { + parts := strings.Split(pair.Key, "/") + if parts[len(parts)-1] != "" { + result = append(result, pair) + } + } + + if len(result) == 0 { + if emptyIsSafe { + // Operator used potentially unsafe tree function in the template instead of the safeTree + return result, nil + } + } else { + // non empty result is good so we just return the data + return result, nil + } + + // If we reach this part of the code result is completely empty as value returned no KV pairs + // Operator selected to use safeTree on the specific KV prefix so we will refuse to render template + // by marking d as missing + } + + // b.Recall either returned an error or safeTree entered unsafe case + missing.Add(d) + + return result, nil + } +} + +// base64Decode decodes the given string as a base64 string, returning an error +// if it fails. +func base64Decode(s string) (string, error) { + v, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", errors.Wrap(err, "base64Decode") + } + return string(v), nil +} + +// base64Encode encodes the given value into a string represented as base64. +func base64Encode(s string) (string, error) { + return base64.StdEncoding.EncodeToString([]byte(s)), nil +} + +// base64URLDecode decodes the given string as a URL-safe base64 string. +func base64URLDecode(s string) (string, error) { + v, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return "", errors.Wrap(err, "base64URLDecode") + } + return string(v), nil +} + +// base64URLEncode encodes the given string to be URL-safe. +func base64URLEncode(s string) (string, error) { + return base64.URLEncoding.EncodeToString([]byte(s)), nil +} + +// byKey accepts a slice of KV pairs and returns a map of the top-level +// key to all its subkeys. For example: +// +// elasticsearch/a //=> "1" +// elasticsearch/b //=> "2" +// redis/a/b //=> "3" +// +// Passing the result from Consul through byTag would yield: +// +// map[string]map[string]string{ +// "elasticsearch": &dep.KeyPair{"a": "1"}, &dep.KeyPair{"b": "2"}, +// "redis": &dep.KeyPair{"a/b": "3"} +// } +// +// Note that the top-most key is stripped from the Key value. Keys that have no +// prefix after stripping are removed from the list. +func byKey(pairs []*dep.KeyPair) (map[string]map[string]*dep.KeyPair, error) { + m := make(map[string]map[string]*dep.KeyPair) + for _, pair := range pairs { + parts := strings.Split(pair.Key, "/") + top := parts[0] + key := strings.Join(parts[1:], "/") + + if key == "" { + // Do not add a key if it has no prefix after stripping. + continue + } + + if _, ok := m[top]; !ok { + m[top] = make(map[string]*dep.KeyPair) + } + + newPair := *pair + newPair.Key = key + m[top][key] = &newPair + } + + return m, nil +} + +// byTag is a template func that takes the provided services and +// produces a map based on Service tags. +// +// The map key is a string representing the service tag. The map value is a +// slice of Services which have the tag assigned. +func byTag(in interface{}) (map[string][]interface{}, error) { + m := make(map[string][]interface{}) + + switch typed := in.(type) { + case nil: + case []*dep.CatalogSnippet: + for _, s := range typed { + for _, t := range s.Tags { + m[t] = append(m[t], s) + } + } + case []*dep.CatalogService: + for _, s := range typed { + for _, t := range s.ServiceTags { + m[t] = append(m[t], s) + } + } + case []*dep.HealthService: + for _, s := range typed { + for _, t := range s.Tags { + m[t] = append(m[t], s) + } + } + default: + return nil, fmt.Errorf("byTag: wrong argument type %T", in) + } + + return m, nil +} + +// contains is a function that have reverse arguments of "in" and is designed to +// be used as a pipe instead of a function: +// +// {{ l | contains "thing" }} +// +func contains(v, l interface{}) (bool, error) { + return in(l, v) +} + +// containsSomeFunc returns functions to implement each of the following: +// +// 1. containsAll - true if (∀x ∈ v then x ∈ l); false otherwise +// 2. containsAny - true if (∃x ∈ v such that x ∈ l); false otherwise +// 3. containsNone - true if (∀x ∈ v then x ∉ l); false otherwise +// 2. containsNotAll - true if (∃x ∈ v such that x ∉ l); false otherwise +// +// ret_true - return true at end of loop for none/all; false for any/notall +// invert - invert block test for all/notall +func containsSomeFunc(retTrue, invert bool) func([]interface{}, interface{}) (bool, error) { + return func(v []interface{}, l interface{}) (bool, error) { + for i := 0; i < len(v); i++ { + if ok, _ := in(l, v[i]); ok != invert { + return !retTrue, nil + } + } + return retTrue, nil + } +} + +// explode is used to expand a list of keypairs into a deeply-nested hash. +func explode(pairs []*dep.KeyPair) (map[string]interface{}, error) { + m := make(map[string]interface{}) + for _, pair := range pairs { + if err := explodeHelper(m, pair.Key, pair.Value, pair.Key); err != nil { + return nil, errors.Wrap(err, "explode") + } + } + return m, nil +} + +// explodeHelper is a recursive helper for explode and explodeMap +func explodeHelper(m map[string]interface{}, k string, v interface{}, p string) error { + if strings.Contains(k, "/") { + parts := strings.Split(k, "/") + top := parts[0] + key := strings.Join(parts[1:], "/") + + if _, ok := m[top]; !ok { + m[top] = make(map[string]interface{}) + } + nest, ok := m[top].(map[string]interface{}) + if !ok { + return fmt.Errorf("not a map: %q: %q already has value %q", p, top, m[top]) + } + return explodeHelper(nest, key, v, k) + } + + if k != "" { + m[k] = v + } + + return nil +} + +// explodeMap turns a single-level map into a deeply-nested hash. +func explodeMap(mapIn map[string]interface{}) (map[string]interface{}, error) { + mapOut := make(map[string]interface{}) + + var keys []string + for k := range mapIn { + keys = append(keys, k) + } + sort.Strings(keys) + + for i := range keys { + if err := explodeHelper(mapOut, keys[i], mapIn[keys[i]], keys[i]); err != nil { + return nil, errors.Wrap(err, "explodeMap") + } + } + return mapOut, nil +} + +// in searches for a given value in a given interface. +func in(l, v interface{}) (bool, error) { + lv := reflect.ValueOf(l) + vv := reflect.ValueOf(v) + + switch lv.Kind() { + case reflect.Array, reflect.Slice: + // if the slice contains 'interface' elements, then the element needs to be extracted directly to examine its type, + // otherwise it will just resolve to 'interface'. + var interfaceSlice []interface{} + if reflect.TypeOf(l).Elem().Kind() == reflect.Interface { + interfaceSlice = l.([]interface{}) + } + + for i := 0; i < lv.Len(); i++ { + var lvv reflect.Value + if interfaceSlice != nil { + lvv = reflect.ValueOf(interfaceSlice[i]) + } else { + lvv = lv.Index(i) + } + + switch lvv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch vv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if vv.Int() == lvv.Int() { + return true, nil + } + } + case reflect.Float32, reflect.Float64: + switch vv.Kind() { + case reflect.Float32, reflect.Float64: + if vv.Float() == lvv.Float() { + return true, nil + } + } + case reflect.String: + if vv.Type() == lvv.Type() && vv.String() == lvv.String() { + return true, nil + } + } + } + case reflect.String: + if vv.Type() == lv.Type() && strings.Contains(lv.String(), vv.String()) { + return true, nil + } + } + + return false, nil +} + +// Indent prefixes each line of a string with the specified number of spaces +func indent(spaces int, s string) (string, error) { + if spaces < 0 { + return "", fmt.Errorf("indent value must be a positive integer") + } + var output, prefix []byte + var sp bool + var size int + prefix = []byte(strings.Repeat(" ", spaces)) + sp = true + for _, c := range []byte(s) { + if sp && c != '\n' { + output = append(output, prefix...) + size += spaces + } + output = append(output, c) + sp = c == '\n' + size++ + } + return string(output[:size]), nil +} + +// loop accepts varying parameters and differs its behavior. If given one +// parameter, loop will return a goroutine that begins at 0 and loops until the +// given int, increasing the index by 1 each iteration. If given two parameters, +// loop will return a goroutine that begins at the first parameter and loops +// up to but not including the second parameter. +// +// // Prints 0 1 2 3 4 +// for _, i := range loop(5) { +// print(i) +// } +// +// // Prints 5 6 7 +// for _, i := range loop(5, 8) { +// print(i) +// } +// +func loop(ifaces ...interface{}) (<-chan int64, error) { + + to64 := func(i interface{}) (int64, error) { + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: + return int64(v.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(v.Uint()), nil + case reflect.String: + return parseInt(v.String()) + } + return 0, fmt.Errorf("loop: bad argument type: %T", i) + } + + var i1, i2 interface{} + switch len(ifaces) { + case 1: + i1, i2 = 0, ifaces[0] + case 2: + i1, i2 = ifaces[0], ifaces[1] + default: + return nil, fmt.Errorf("loop: wrong number of arguments, expected "+ + "1 or 2, but got %d", len(ifaces)) + } + + start, err := to64(i1) + if err != nil { + return nil, err + } + stop, err := to64(i2) + if err != nil { + return nil, err + } + + ch := make(chan int64) + + go func() { + for i := start; i < stop; i++ { + ch <- i + } + close(ch) + }() + + return ch, nil +} + +// join is a version of strings.Join that can be piped +func join(sep string, a []string) (string, error) { + return strings.Join(a, sep), nil +} + +// TrimSpace is a version of strings.TrimSpace that can be piped +func trimSpace(s string) (string, error) { + return strings.TrimSpace(s), nil +} + +// parseBool parses a string into a boolean +func parseBool(s string) (bool, error) { + if s == "" { + return false, nil + } + + result, err := strconv.ParseBool(s) + if err != nil { + return false, errors.Wrap(err, "parseBool") + } + return result, nil +} + +// parseFloat parses a string into a base 10 float +func parseFloat(s string) (float64, error) { + if s == "" { + return 0.0, nil + } + + result, err := strconv.ParseFloat(s, 10) + if err != nil { + return 0, errors.Wrap(err, "parseFloat") + } + return result, nil +} + +// parseInt parses a string into a base 10 int +func parseInt(s string) (int64, error) { + if s == "" { + return 0, nil + } + + result, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parseInt") + } + return result, nil +} + +// parseJSON returns a structure for valid JSON +func parseJSON(s string) (interface{}, error) { + if s == "" { + return map[string]interface{}{}, nil + } + + var data interface{} + if err := json.Unmarshal([]byte(s), &data); err != nil { + return nil, err + } + return data, nil +} + +// parseUint parses a string into a base 10 int +func parseUint(s string) (uint64, error) { + if s == "" { + return 0, nil + } + + result, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parseUint") + } + return result, nil +} + +// plugin executes a subprocess as the given command string. It is assumed the +// resulting command returns JSON which is then parsed and returned as the +// value for use in the template. +func plugin(name string, args ...string) (string, error) { + if name == "" { + return "", nil + } + + stdout, stderr := new(bytes.Buffer), new(bytes.Buffer) + + // Strip and trim each arg or else some plugins get confused with the newline + // characters + jsons := make([]string, 0, len(args)) + for _, arg := range args { + if v := strings.TrimSpace(arg); v != "" { + jsons = append(jsons, v) + } + } + + cmd := exec.Command(name, jsons...) + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Start(); err != nil { + return "", fmt.Errorf("exec %q: %s\n\nstdout:\n\n%s\n\nstderr:\n\n%s", + name, err, stdout.Bytes(), stderr.Bytes()) + } + + done := make(chan error, 1) + go func() { + done <- cmd.Wait() + }() + + select { + case <-time.After(30 * time.Second): + if cmd.Process != nil { + if err := cmd.Process.Kill(); err != nil { + return "", fmt.Errorf("exec %q: failed to kill", name) + } + } + <-done // Allow the goroutine to exit + return "", fmt.Errorf("exec %q: did not finish in 30s", name) + case err := <-done: + if err != nil { + return "", fmt.Errorf("exec %q: %s\n\nstdout:\n\n%s\n\nstderr:\n\n%s", + name, err, stdout.Bytes(), stderr.Bytes()) + } + } + + return strings.TrimSpace(stdout.String()), nil +} + +// replaceAll replaces all occurrences of a value in a string with the given +// replacement value. +func replaceAll(f, t, s string) (string, error) { + return strings.Replace(s, f, t, -1), nil +} + +// regexReplaceAll replaces all occurrences of a regular expression with +// the given replacement value. +func regexReplaceAll(re, pl, s string) (string, error) { + compiled, err := regexp.Compile(re) + if err != nil { + return "", err + } + return compiled.ReplaceAllString(s, pl), nil +} + +// regexMatch returns true or false if the string matches +// the given regular expression +func regexMatch(re, s string) (bool, error) { + compiled, err := regexp.Compile(re) + if err != nil { + return false, err + } + return compiled.MatchString(s), nil +} + +// split is a version of strings.Split that can be piped +func split(sep, s string) ([]string, error) { + s = strings.TrimSpace(s) + if s == "" { + return []string{}, nil + } + return strings.Split(s, sep), nil +} + +// timestamp returns the current UNIX timestamp in UTC. If an argument is +// specified, it will be used to format the timestamp. +func timestamp(s ...string) (string, error) { + switch len(s) { + case 0: + return now().Format(time.RFC3339), nil + case 1: + if s[0] == "unix" { + return strconv.FormatInt(now().Unix(), 10), nil + } + return now().Format(s[0]), nil + default: + return "", fmt.Errorf("timestamp: wrong number of arguments, expected 0 or 1"+ + ", but got %d", len(s)) + } +} + +// toLower converts the given string (usually by a pipe) to lowercase. +func toLower(s string) (string, error) { + return strings.ToLower(s), nil +} + +// toJSON converts the given structure into a deeply nested JSON string. +func toJSON(i interface{}) (string, error) { + result, err := json.Marshal(i) + if err != nil { + return "", errors.Wrap(err, "toJSON") + } + return string(bytes.TrimSpace(result)), err +} + +// toJSONPretty converts the given structure into a deeply nested pretty JSON +// string. +func toJSONPretty(m map[string]interface{}) (string, error) { + result, err := json.MarshalIndent(m, "", " ") + if err != nil { + return "", errors.Wrap(err, "toJSONPretty") + } + return string(bytes.TrimSpace(result)), err +} + +// toTitle converts the given string (usually by a pipe) to titlecase. +func toTitle(s string) (string, error) { + return strings.Title(s), nil +} + +// toUpper converts the given string (usually by a pipe) to uppercase. +func toUpper(s string) (string, error) { + return strings.ToUpper(s), nil +} + +// toYAML converts the given structure into a deeply nested YAML string. +func toYAML(m map[string]interface{}) (string, error) { + result, err := yaml.Marshal(m) + if err != nil { + return "", errors.Wrap(err, "toYAML") + } + return string(bytes.TrimSpace(result)), nil +} + +// toTOML converts the given structure into a deeply nested TOML string. +func toTOML(m map[string]interface{}) (string, error) { + buf := bytes.NewBuffer([]byte{}) + enc := toml.NewEncoder(buf) + if err := enc.Encode(m); err != nil { + return "", errors.Wrap(err, "toTOML") + } + result, err := ioutil.ReadAll(buf) + if err != nil { + return "", errors.Wrap(err, "toTOML") + } + return string(bytes.TrimSpace(result)), nil +} + +// add returns the sum of a and b. +func add(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() + bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() + int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) + bv.Float(), nil + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) + bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() + bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) + bv.Float(), nil + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() + float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() + float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() + bv.Float(), nil + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", av, a) + } +} + +// subtract returns the difference of b from a. +func subtract(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() - bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() - int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) - bv.Float(), nil + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) - bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() - bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) - bv.Float(), nil + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() - float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() - float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() - bv.Float(), nil + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", av, a) + } +} + +// multiply returns the product of a and b. +func multiply(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() * bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() * int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) * bv.Float(), nil + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) * bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() * bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) * bv.Float(), nil + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() * float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() * float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() * bv.Float(), nil + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", av, a) + } +} + +// divide returns the division of b from a. +func divide(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() / bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() / int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) / bv.Float(), nil + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) / bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() / bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) / bv.Float(), nil + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() / float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() / float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() / bv.Float(), nil + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", av, a) + } +} + +// modulo returns the modulo of b from a. +func modulo(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() % bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() % int64(bv.Uint()), nil + default: + return nil, fmt.Errorf("modulo: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) % bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() % bv.Uint(), nil + default: + return nil, fmt.Errorf("modulo: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("modulo: unknown type for %q (%T)", av, a) + } +} + +// blacklisted always returns an error, to be used in place of blacklisted template functions +func blacklisted(...string) (string, error) { + return "", errors.New("function is disabled") +} + +// pathInSandbox returns an error if the provided path doesn't fall within the +// sandbox or if the file can't be evaluated (missing, invalid symlink, etc.) +func pathInSandbox(sandbox, path string) error { + if sandbox != "" { + s, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + s, err = filepath.Rel(sandbox, s) + if err != nil { + return err + } + if strings.HasPrefix(s, "..") { + return fmt.Errorf("'%s' is outside of sandbox", path) + } + } + return nil +} + +// sockaddr wraps go-sockaddr templating +func sockaddr(args ...string) (string, error) { + t := fmt.Sprintf("{{ %s }} ", strings.Join(args, " ")) + k, err := socktmpl.Parse(t) + if err != nil { + return "", err + } + return k, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/template/scratch.go b/vendor/github.com/hashicorp/consul-template/template/scratch.go new file mode 100644 index 000000000000..c3d959dc87d9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/scratch.go @@ -0,0 +1,125 @@ +package template + +import ( + "fmt" + "sort" + "sync" +) + +// Scratch is a wrapper around a map which is used by the template. +type Scratch struct { + once sync.Once + sync.RWMutex + values map[string]interface{} +} + +// Key returns a boolean indicating whether the given key exists in the map. +func (s *Scratch) Key(k string) bool { + s.RLock() + defer s.RUnlock() + _, ok := s.values[k] + return ok +} + +// Get returns a value previously set by Add or Set +func (s *Scratch) Get(k string) interface{} { + s.RLock() + defer s.RUnlock() + return s.values[k] +} + +// Set stores the value v at the key k. It will overwrite an existing value +// if present. +func (s *Scratch) Set(k string, v interface{}) string { + s.init() + + s.Lock() + defer s.Unlock() + s.values[k] = v + return "" +} + +// SetX behaves the same as Set, except it will not overwrite existing keys if +// already present. +func (s *Scratch) SetX(k string, v interface{}) string { + s.init() + + s.Lock() + defer s.Unlock() + if _, ok := s.values[k]; !ok { + s.values[k] = v + } + return "" +} + +// MapSet stores the value v into a key mk in the map named k. +func (s *Scratch) MapSet(k, mk string, v interface{}) (string, error) { + s.init() + + s.Lock() + defer s.Unlock() + return s.mapSet(k, mk, v, true) +} + +// MapSetX behaves the same as MapSet, except it will not overwrite the map +// key if it already exists. +func (s *Scratch) MapSetX(k, mk string, v interface{}) (string, error) { + s.init() + + s.Lock() + defer s.Unlock() + return s.mapSet(k, mk, v, false) +} + +// mapSet is sets the value in the map, overwriting if o is true. This function +// does not perform locking; callers should lock before invoking. +func (s *Scratch) mapSet(k, mk string, v interface{}, o bool) (string, error) { + if _, ok := s.values[k]; !ok { + s.values[k] = make(map[string]interface{}) + } + + typed, ok := s.values[k].(map[string]interface{}) + if !ok { + return "", fmt.Errorf("%q is not a map", k) + } + + if _, ok := typed[mk]; o || !ok { + typed[mk] = v + } + return "", nil +} + +// MapValues returns the list of values in the map sorted by key. +func (s *Scratch) MapValues(k string) ([]interface{}, error) { + s.init() + + s.Lock() + defer s.Unlock() + if s.values == nil { + return nil, nil + } + + typed, ok := s.values[k].(map[string]interface{}) + if !ok { + return nil, nil + } + + keys := make([]string, 0, len(typed)) + for k := range typed { + keys = append(keys, k) + } + sort.Strings(keys) + + sorted := make([]interface{}, len(keys)) + for i, k := range keys { + sorted[i] = typed[k] + } + return sorted, nil +} + +// init initializes the scratch. +func (s *Scratch) init() { + if s.values == nil { + s.values = make(map[string]interface{}) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/template/template.go b/vendor/github.com/hashicorp/consul-template/template/template.go new file mode 100644 index 000000000000..36da551834a5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/template.go @@ -0,0 +1,303 @@ +package template + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "io/ioutil" + "text/template" + + "github.com/pkg/errors" + + dep "github.com/hashicorp/consul-template/dependency" +) + +var ( + // ErrTemplateContentsAndSource is the error returned when a template + // specifies both a "source" and "content" argument, which is not valid. + ErrTemplateContentsAndSource = errors.New("template: cannot specify both 'source' and 'content'") + + // ErrTemplateMissingContentsAndSource is the error returned when a template + // does not specify either a "source" or "content" argument, which is not + // valid. + ErrTemplateMissingContentsAndSource = errors.New("template: must specify exactly one of 'source' or 'content'") +) + +// Template is the internal representation of an individual template to process. +// The template retains the relationship between it's contents and is +// responsible for it's own execution. +type Template struct { + // contents is the string contents for the template. It is either given + // during template creation or read from disk when initialized. + contents string + + // source is the original location of the template. This may be undefined if + // the template was dynamically defined. + source string + + // leftDelim and rightDelim are the template delimiters. + leftDelim string + rightDelim string + + // hexMD5 stores the hex version of the MD5 + hexMD5 string + + // errMissingKey causes the template processing to exit immediately if a map + // is indexed with a key that does not exist. + errMissingKey bool + + // functionBlacklist are functions not permitted to be executed + // when we render this template + functionBlacklist []string + + // sandboxPath adds a prefix to any path provided to the `file` function + // and causes an error if a relative path tries to traverse outside that + // prefix. + sandboxPath string +} + +// NewTemplateInput is used as input when creating the template. +type NewTemplateInput struct { + // Source is the location on disk to the file. + Source string + + // Contents are the raw template contents. + Contents string + + // ErrMissingKey causes the template parser to exit immediately with an error + // when a map is indexed with a key that does not exist. + ErrMissingKey bool + + // LeftDelim and RightDelim are the template delimiters. + LeftDelim string + RightDelim string + + // FunctionBlacklist are functions not permitted to be executed + // when we render this template + FunctionBlacklist []string + + // SandboxPath adds a prefix to any path provided to the `file` function + // and causes an error if a relative path tries to traverse outside that + // prefix. + SandboxPath string +} + +// NewTemplate creates and parses a new Consul Template template at the given +// path. If the template does not exist, an error is returned. During +// initialization, the template is read and is parsed for dependencies. Any +// errors that occur are returned. +func NewTemplate(i *NewTemplateInput) (*Template, error) { + if i == nil { + i = &NewTemplateInput{} + } + + // Validate that we are either given the path or the explicit contents + if i.Source != "" && i.Contents != "" { + return nil, ErrTemplateContentsAndSource + } else if i.Source == "" && i.Contents == "" { + return nil, ErrTemplateMissingContentsAndSource + } + + var t Template + t.source = i.Source + t.contents = i.Contents + t.leftDelim = i.LeftDelim + t.rightDelim = i.RightDelim + t.errMissingKey = i.ErrMissingKey + t.functionBlacklist = i.FunctionBlacklist + t.sandboxPath = i.SandboxPath + + if i.Source != "" { + contents, err := ioutil.ReadFile(i.Source) + if err != nil { + return nil, errors.Wrap(err, "failed to read template") + } + t.contents = string(contents) + } + + // Compute the MD5, encode as hex + hash := md5.Sum([]byte(t.contents)) + t.hexMD5 = hex.EncodeToString(hash[:]) + + return &t, nil +} + +// ID returns the identifier for this template. +func (t *Template) ID() string { + return t.hexMD5 +} + +// Contents returns the raw contents of the template. +func (t *Template) Contents() string { + return t.contents +} + +// Source returns the filepath source of this template. +func (t *Template) Source() string { + if t.source == "" { + return "(dynamic)" + } + return t.source +} + +// ExecuteInput is used as input to the template's execute function. +type ExecuteInput struct { + // Brain is the brain where data for the template is stored. + Brain *Brain + + // Env is a custom environment provided to the template for envvar resolution. + // Values specified here will take precedence over any values in the + // environment when using the `env` function. + Env []string +} + +// ExecuteResult is the result of the template execution. +type ExecuteResult struct { + // Used is the set of dependencies that were used. + Used *dep.Set + + // Missing is the set of dependencies that were missing. + Missing *dep.Set + + // Output is the rendered result. + Output []byte +} + +// Execute evaluates this template in the provided context. +func (t *Template) Execute(i *ExecuteInput) (*ExecuteResult, error) { + if i == nil { + i = &ExecuteInput{} + } + + var used, missing dep.Set + + tmpl := template.New("") + tmpl.Delims(t.leftDelim, t.rightDelim) + + tmpl.Funcs(funcMap(&funcMapInput{ + t: tmpl, + brain: i.Brain, + env: i.Env, + used: &used, + missing: &missing, + functionBlacklist: t.functionBlacklist, + sandboxPath: t.sandboxPath, + })) + + if t.errMissingKey { + tmpl.Option("missingkey=error") + } else { + tmpl.Option("missingkey=zero") + } + + tmpl, err := tmpl.Parse(t.contents) + if err != nil { + return nil, errors.Wrap(err, "parse") + } + + // Execute the template into the writer + var b bytes.Buffer + if err := tmpl.Execute(&b, nil); err != nil { + return nil, errors.Wrap(err, "execute") + } + + return &ExecuteResult{ + Used: &used, + Missing: &missing, + Output: b.Bytes(), + }, nil +} + +// funcMapInput is input to the funcMap, which builds the template functions. +type funcMapInput struct { + t *template.Template + brain *Brain + env []string + functionBlacklist []string + sandboxPath string + used *dep.Set + missing *dep.Set +} + +// funcMap is the map of template functions to their respective functions. +func funcMap(i *funcMapInput) template.FuncMap { + var scratch Scratch + + r := template.FuncMap{ + // API functions + "datacenters": datacentersFunc(i.brain, i.used, i.missing), + "file": fileFunc(i.brain, i.used, i.missing, i.sandboxPath), + "key": keyFunc(i.brain, i.used, i.missing), + "keyExists": keyExistsFunc(i.brain, i.used, i.missing), + "keyOrDefault": keyWithDefaultFunc(i.brain, i.used, i.missing), + "ls": lsFunc(i.brain, i.used, i.missing, true), + "safeLs": safeLsFunc(i.brain, i.used, i.missing), + "node": nodeFunc(i.brain, i.used, i.missing), + "nodes": nodesFunc(i.brain, i.used, i.missing), + "secret": secretFunc(i.brain, i.used, i.missing), + "secrets": secretsFunc(i.brain, i.used, i.missing), + "service": serviceFunc(i.brain, i.used, i.missing), + "services": servicesFunc(i.brain, i.used, i.missing), + "tree": treeFunc(i.brain, i.used, i.missing, true), + "safeTree": safeTreeFunc(i.brain, i.used, i.missing), + + // Scratch + "scratch": func() *Scratch { return &scratch }, + + // Helper functions + "base64Decode": base64Decode, + "base64Encode": base64Encode, + "base64URLDecode": base64URLDecode, + "base64URLEncode": base64URLEncode, + "byKey": byKey, + "byTag": byTag, + "contains": contains, + "containsAll": containsSomeFunc(true, true), + "containsAny": containsSomeFunc(false, false), + "containsNone": containsSomeFunc(true, false), + "containsNotAll": containsSomeFunc(false, true), + "env": envFunc(i.env), + "executeTemplate": executeTemplateFunc(i.t), + "explode": explode, + "explodeMap": explodeMap, + "in": in, + "indent": indent, + "loop": loop, + "join": join, + "trimSpace": trimSpace, + "parseBool": parseBool, + "parseFloat": parseFloat, + "parseInt": parseInt, + "parseJSON": parseJSON, + "parseUint": parseUint, + "plugin": plugin, + "regexReplaceAll": regexReplaceAll, + "regexMatch": regexMatch, + "replaceAll": replaceAll, + "timestamp": timestamp, + "toLower": toLower, + "toJSON": toJSON, + "toJSONPretty": toJSONPretty, + "toTitle": toTitle, + "toTOML": toTOML, + "toUpper": toUpper, + "toYAML": toYAML, + "split": split, + "byMeta": byMeta, + "sockaddr": sockaddr, + // Math functions + "add": add, + "subtract": subtract, + "multiply": multiply, + "divide": divide, + "modulo": modulo, + } + + for _, bf := range i.functionBlacklist { + if _, ok := r[bf]; ok { + r[bf] = blacklisted + } + } + + return r +} diff --git a/vendor/github.com/hashicorp/consul-template/version/version.go b/vendor/github.com/hashicorp/consul-template/version/version.go new file mode 100644 index 000000000000..c0d0a6919cef --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/version/version.go @@ -0,0 +1,12 @@ +package version + +import "fmt" + +const Version = "0.22.0" + +var ( + Name string + GitCommit string + + HumanVersion = fmt.Sprintf("%s v%s (%s)", Name, Version, GitCommit) +) diff --git a/vendor/github.com/hashicorp/consul-template/watch/view.go b/vendor/github.com/hashicorp/consul-template/watch/view.go new file mode 100644 index 000000000000..bcef6c0b2187 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/watch/view.go @@ -0,0 +1,308 @@ +package watch + +import ( + "fmt" + "log" + "math/rand" + "reflect" + "sync" + "time" + + dep "github.com/hashicorp/consul-template/dependency" +) + +const ( + // The amount of time to do a blocking query for + defaultWaitTime = 60 * time.Second +) + +// View is a representation of a Dependency and the most recent data it has +// received from Consul. +type View struct { + // dependency is the dependency that is associated with this View + dependency dep.Dependency + + // clients is the list of clients to communicate upstream. This is passed + // directly to the dependency. + clients *dep.ClientSet + + // data is the most-recently-received data from Consul for this View. It is + // accompanied by a series of locks and booleans to ensure consistency. + dataLock sync.RWMutex + data interface{} + receivedData bool + lastIndex uint64 + + // maxStale is the maximum amount of time to allow a query to be stale. + maxStale time.Duration + + // once determines if this view should receive data exactly once. + once bool + + // retryFunc is the function to invoke on failure to determine if a retry + // should be attempted. + retryFunc RetryFunc + + // stopCh is used to stop polling on this View + stopCh chan struct{} + + // vaultGrace is the grace period between a lease and the max TTL for which + // Consul Template will generate a new secret instead of renewing an existing + // one. + vaultGrace time.Duration +} + +// NewViewInput is used as input to the NewView function. +type NewViewInput struct { + // Dependency is the dependency to associate with the new view. + Dependency dep.Dependency + + // Clients is the list of clients to communicate upstream. This is passed + // directly to the dependency. + Clients *dep.ClientSet + + // MaxStale is the maximum amount a time a query response is allowed to be + // stale before forcing a read from the leader. + MaxStale time.Duration + + // Once indicates this view should poll for data exactly one time. + Once bool + + // RetryFunc is a function which dictates how this view should retry on + // upstream errors. + RetryFunc RetryFunc + + // VaultGrace is the grace period between a lease and the max TTL for which + // Consul Template will generate a new secret instead of renewing an existing + // one. + VaultGrace time.Duration +} + +// NewView constructs a new view with the given inputs. +func NewView(i *NewViewInput) (*View, error) { + return &View{ + dependency: i.Dependency, + clients: i.Clients, + maxStale: i.MaxStale, + once: i.Once, + retryFunc: i.RetryFunc, + stopCh: make(chan struct{}, 1), + vaultGrace: i.VaultGrace, + }, nil +} + +// Dependency returns the dependency attached to this View. +func (v *View) Dependency() dep.Dependency { + return v.dependency +} + +// Data returns the most-recently-received data from Consul for this View. +func (v *View) Data() interface{} { + v.dataLock.RLock() + defer v.dataLock.RUnlock() + return v.data +} + +// DataAndLastIndex returns the most-recently-received data from Consul for +// this view, along with the last index. This is atomic so you will get the +// index that goes with the data you are fetching. +func (v *View) DataAndLastIndex() (interface{}, uint64) { + v.dataLock.RLock() + defer v.dataLock.RUnlock() + return v.data, v.lastIndex +} + +// poll queries the Consul instance for data using the fetch function, but also +// accounts for interrupts on the interrupt channel. This allows the poll +// function to be fired in a goroutine, but then halted even if the fetch +// function is in the middle of a blocking query. +func (v *View) poll(viewCh chan<- *View, errCh chan<- error) { + var retries int + + for { + doneCh := make(chan struct{}, 1) + successCh := make(chan struct{}, 1) + fetchErrCh := make(chan error, 1) + go v.fetch(doneCh, successCh, fetchErrCh) + + WAIT: + select { + case <-doneCh: + // Reset the retry to avoid exponentially incrementing retries when we + // have some successful requests + retries = 0 + + log.Printf("[TRACE] (view) %s received data", v.dependency) + select { + case <-v.stopCh: + return + case viewCh <- v: + } + + // If we are operating in once mode, do not loop - we received data at + // least once which is the API promise here. + if v.once { + return + } + case <-successCh: + // We successfully received a non-error response from the server. This + // does not mean we have data (that's dataCh's job), but rather this + // just resets the counter indicating we communicated successfully. For + // example, Consul make have an outage, but when it returns, the view + // is unchanged. We have to reset the counter retries, but not update the + // actual template. + log.Printf("[TRACE] (view) %s successful contact, resetting retries", v.dependency) + retries = 0 + goto WAIT + case err := <-fetchErrCh: + if v.retryFunc != nil { + retry, sleep := v.retryFunc(retries) + if retry { + log.Printf("[WARN] (view) %s (retry attempt %d after %q)", + err, retries+1, sleep) + select { + case <-time.After(sleep): + retries++ + continue + case <-v.stopCh: + return + } + } + } + + log.Printf("[ERR] (view) %s (exceeded maximum retries)", err) + + // Push the error back up to the watcher + select { + case <-v.stopCh: + return + case errCh <- err: + return + } + case <-v.stopCh: + log.Printf("[TRACE] (view) %s stopping poll (received on view stopCh)", v.dependency) + return + } + } +} + +// fetch queries the Consul instance for the attached dependency. This API +// promises that either data will be written to doneCh or an error will be +// written to errCh. It is designed to be run in a goroutine that selects the +// result of doneCh and errCh. It is assumed that only one instance of fetch +// is running per View and therefore no locking or mutexes are used. +func (v *View) fetch(doneCh, successCh chan<- struct{}, errCh chan<- error) { + log.Printf("[TRACE] (view) %s starting fetch", v.dependency) + + var allowStale bool + if v.maxStale != 0 { + allowStale = true + } + + for { + // If the view was stopped, short-circuit this loop. This prevents a bug + // where a view can get "lost" in the event Consul Template is reloaded. + select { + case <-v.stopCh: + return + default: + } + + start := time.Now() // for rateLimiter below + + data, rm, err := v.dependency.Fetch(v.clients, &dep.QueryOptions{ + AllowStale: allowStale, + WaitTime: defaultWaitTime, + WaitIndex: v.lastIndex, + VaultGrace: v.vaultGrace, + }) + if err != nil { + if err == dep.ErrStopped { + log.Printf("[TRACE] (view) %s reported stop", v.dependency) + } else { + errCh <- err + } + return + } + + if rm == nil { + errCh <- fmt.Errorf("received nil response metadata - this is a bug " + + "and should be reported") + return + } + + // If we got this far, we received data successfully. That data might not + // trigger a data update (because we could continue below), but we need to + // inform the poller to reset the retry count. + log.Printf("[TRACE] (view) %s marking successful data response", v.dependency) + select { + case successCh <- struct{}{}: + default: + } + + if allowStale && rm.LastContact > v.maxStale { + allowStale = false + log.Printf("[TRACE] (view) %s stale data (last contact exceeded max_stale)", v.dependency) + continue + } + + if v.maxStale != 0 { + allowStale = true + } + + if dur := rateLimiter(start); dur > 1 { + time.Sleep(dur) + } + + if rm.LastIndex == v.lastIndex { + log.Printf("[TRACE] (view) %s no new data (index was the same)", v.dependency) + continue + } + + v.dataLock.Lock() + if rm.LastIndex < v.lastIndex { + log.Printf("[TRACE] (view) %s had a lower index, resetting", v.dependency) + v.lastIndex = 0 + v.dataLock.Unlock() + continue + } + v.lastIndex = rm.LastIndex + + if v.receivedData && reflect.DeepEqual(data, v.data) { + log.Printf("[TRACE] (view) %s no new data (contents were the same)", v.dependency) + v.dataLock.Unlock() + continue + } + + if data == nil && rm.Block { + log.Printf("[TRACE] (view) %s asked for blocking query", v.dependency) + v.dataLock.Unlock() + continue + } + + v.data = data + v.receivedData = true + v.dataLock.Unlock() + + close(doneCh) + return + } +} + +const minDelayBetweenUpdates = time.Millisecond * 100 + +// return a duration to sleep to limit the frequency of upstream calls +func rateLimiter(start time.Time) time.Duration { + remaining := minDelayBetweenUpdates - time.Since(start) + if remaining > 0 { + dither := time.Duration(rand.Int63n(20000000)) // 0-20ms + return remaining + dither + } + return 0 +} + +// stop halts polling of this view. +func (v *View) stop() { + v.dependency.Stop() + close(v.stopCh) +} diff --git a/vendor/github.com/hashicorp/consul-template/watch/watcher.go b/vendor/github.com/hashicorp/consul-template/watch/watcher.go new file mode 100644 index 000000000000..fcbaa35217bd --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/watch/watcher.go @@ -0,0 +1,253 @@ +package watch + +import ( + "log" + "sync" + "time" + + dep "github.com/hashicorp/consul-template/dependency" + "github.com/pkg/errors" +) + +// dataBufferSize is the default number of views to process in a batch. +const dataBufferSize = 2048 + +type RetryFunc func(int) (bool, time.Duration) + +// Watcher is a top-level manager for views that poll Consul for data. +type Watcher struct { + sync.Mutex + + // clients is the collection of API clients to talk to upstreams. + clients *dep.ClientSet + + // dataCh is the chan where Views will be published. + dataCh chan *View + + // errCh is the chan where any errors will be published. + errCh chan error + + // depViewMap is a map of Templates to Views. Templates are keyed by + // their string. + depViewMap map[string]*View + + // maxStale specifies the maximum staleness of a query response. + maxStale time.Duration + + // once signals if this watcher should tell views to retrieve data exactly + // one time instead of polling infinitely. + once bool + + // retryFuncs specifies the different ways to retry based on the upstream. + retryFuncConsul RetryFunc + retryFuncDefault RetryFunc + retryFuncVault RetryFunc + + // vaultGrace is the grace period between a lease and the max TTL for which + // Consul Template will generate a new secret instead of renewing an existing + // one. + vaultGrace time.Duration +} + +type NewWatcherInput struct { + // Clients is the client set to communicate with upstreams. + Clients *dep.ClientSet + + // MaxStale is the maximum staleness of a query. + MaxStale time.Duration + + // Once specifies this watcher should tell views to poll exactly once. + Once bool + + // RenewVault indicates if this watcher should renew Vault tokens. + RenewVault bool + + // VaultToken is the vault token to renew. + VaultToken string + + // VaultAgentTokenFile is the path to Vault Agent token file + VaultAgentTokenFile string + + // RetryFuncs specify the different ways to retry based on the upstream. + RetryFuncConsul RetryFunc + RetryFuncDefault RetryFunc + RetryFuncVault RetryFunc + + // VaultGrace is the grace period between a lease and the max TTL for which + // Consul Template will generate a new secret instead of renewing an existing + // one. + VaultGrace time.Duration +} + +// NewWatcher creates a new watcher using the given API client. +func NewWatcher(i *NewWatcherInput) (*Watcher, error) { + w := &Watcher{ + clients: i.Clients, + depViewMap: make(map[string]*View), + dataCh: make(chan *View, dataBufferSize), + errCh: make(chan error), + maxStale: i.MaxStale, + once: i.Once, + retryFuncConsul: i.RetryFuncConsul, + retryFuncDefault: i.RetryFuncDefault, + retryFuncVault: i.RetryFuncVault, + vaultGrace: i.VaultGrace, + } + + // Start a watcher for the Vault renew if that config was specified + if i.RenewVault { + vt, err := dep.NewVaultTokenQuery(i.VaultToken) + if err != nil { + return nil, errors.Wrap(err, "watcher") + } + if _, err := w.Add(vt); err != nil { + return nil, errors.Wrap(err, "watcher") + } + } + + if len(i.VaultAgentTokenFile) > 0 { + vag, err := dep.NewVaultAgentTokenQuery(i.VaultAgentTokenFile) + if err != nil { + return nil, errors.Wrap(err, "watcher") + } + if _, err := w.Add(vag); err != nil { + return nil, errors.Wrap(err, "watcher") + } + } + + return w, nil +} + +// DataCh returns a read-only channel of Views which is populated when a view +// receives data from its upstream. +func (w *Watcher) DataCh() <-chan *View { + return w.dataCh +} + +// ErrCh returns a read-only channel of errors returned by the upstream. +func (w *Watcher) ErrCh() <-chan error { + return w.errCh +} + +// Add adds the given dependency to the list of monitored dependencies +// and start the associated view. If the dependency already exists, no action is +// taken. +// +// If the Dependency already existed, it this function will return false. If the +// view was successfully created, it will return true. If an error occurs while +// creating the view, it will be returned here (but future errors returned by +// the view will happen on the channel). +func (w *Watcher) Add(d dep.Dependency) (bool, error) { + w.Lock() + defer w.Unlock() + + log.Printf("[DEBUG] (watcher) adding %s", d) + + if _, ok := w.depViewMap[d.String()]; ok { + log.Printf("[TRACE] (watcher) %s already exists, skipping", d) + return false, nil + } + + // Choose the correct retry function based off of the dependency's type. + var retryFunc RetryFunc + switch d.Type() { + case dep.TypeConsul: + retryFunc = w.retryFuncConsul + case dep.TypeVault: + retryFunc = w.retryFuncVault + default: + retryFunc = w.retryFuncDefault + } + + v, err := NewView(&NewViewInput{ + Dependency: d, + Clients: w.clients, + MaxStale: w.maxStale, + Once: w.once, + RetryFunc: retryFunc, + VaultGrace: w.vaultGrace, + }) + if err != nil { + return false, errors.Wrap(err, "watcher") + } + + log.Printf("[TRACE] (watcher) %s starting", d) + + w.depViewMap[d.String()] = v + go v.poll(w.dataCh, w.errCh) + + return true, nil +} + +// Watching determines if the given dependency is being watched. +func (w *Watcher) Watching(d dep.Dependency) bool { + w.Lock() + defer w.Unlock() + + _, ok := w.depViewMap[d.String()] + return ok +} + +// ForceWatching is used to force setting the internal state of watching +// a dependency. This is only used for unit testing purposes. +func (w *Watcher) ForceWatching(d dep.Dependency, enabled bool) { + w.Lock() + defer w.Unlock() + + if enabled { + w.depViewMap[d.String()] = nil + } else { + delete(w.depViewMap, d.String()) + } +} + +// Remove removes the given dependency from the list and stops the +// associated View. If a View for the given dependency does not exist, this +// function will return false. If the View does exist, this function will return +// true upon successful deletion. +func (w *Watcher) Remove(d dep.Dependency) bool { + w.Lock() + defer w.Unlock() + + log.Printf("[DEBUG] (watcher) removing %s", d) + + if view, ok := w.depViewMap[d.String()]; ok { + log.Printf("[TRACE] (watcher) actually removing %s", d) + view.stop() + delete(w.depViewMap, d.String()) + return true + } + + log.Printf("[TRACE] (watcher) %s did not exist, skipping", d) + return false +} + +// Size returns the number of views this watcher is watching. +func (w *Watcher) Size() int { + w.Lock() + defer w.Unlock() + return len(w.depViewMap) +} + +// Stop halts this watcher and any currently polling views immediately. If a +// view was in the middle of a poll, no data will be returned. +func (w *Watcher) Stop() { + w.Lock() + defer w.Unlock() + + log.Printf("[DEBUG] (watcher) stopping all views") + + for _, view := range w.depViewMap { + if view == nil { + continue + } + log.Printf("[TRACE] (watcher) stopping %s", view.Dependency()) + view.stop() + } + + // Reset the map to have no views + w.depViewMap = make(map[string]*View) + + // Close any idle TCP connections + w.clients.Stop() +} diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index 53a052363e29..124409ff2b74 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -4,7 +4,10 @@ import ( "fmt" "io" "io/ioutil" + "net/url" "time" + + "github.com/mitchellh/mapstructure" ) const ( @@ -19,18 +22,26 @@ type ACLTokenPolicyLink struct { ID string Name string } +type ACLTokenRoleLink struct { + ID string + Name string +} // ACLToken represents an ACL Token type ACLToken struct { - CreateIndex uint64 - ModifyIndex uint64 - AccessorID string - SecretID string - Description string - Policies []*ACLTokenPolicyLink - Local bool - CreateTime time.Time `json:",omitempty"` - Hash []byte `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + SecretID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Local bool + ExpirationTTL time.Duration `json:",omitempty"` + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time `json:",omitempty"` + Hash []byte `json:",omitempty"` // DEPRECATED (ACL-Legacy-Compat) // Rules will only be present for legacy tokens returned via the new APIs @@ -38,15 +49,18 @@ type ACLToken struct { } type ACLTokenListEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - AccessorID string - Description string - Policies []*ACLTokenPolicyLink - Local bool - CreateTime time.Time - Hash []byte - Legacy bool + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Local bool + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time + Hash []byte + Legacy bool } // ACLEntry is used to represent a legacy ACL token @@ -67,11 +81,20 @@ type ACLReplicationStatus struct { SourceDatacenter string ReplicationType string ReplicatedIndex uint64 + ReplicatedRoleIndex uint64 ReplicatedTokenIndex uint64 LastSuccess time.Time LastError time.Time } +// ACLServiceIdentity represents a high-level grant of all necessary privileges +// to assume the identity of the named Service in the Catalog and within +// Connect. +type ACLServiceIdentity struct { + ServiceName string + Datacenters []string `json:",omitempty"` +} + // ACLPolicy represents an ACL Policy. type ACLPolicy struct { ID string @@ -94,6 +117,113 @@ type ACLPolicyListEntry struct { ModifyIndex uint64 } +type ACLRolePolicyLink struct { + ID string + Name string +} + +// ACLRole represents an ACL Role. +type ACLRole struct { + ID string + Name string + Description string + Policies []*ACLRolePolicyLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// BindingRuleBindType is the type of binding rule mechanism used. +type BindingRuleBindType string + +const ( + // BindingRuleBindTypeService binds to a service identity with the given name. + BindingRuleBindTypeService BindingRuleBindType = "service" + + // BindingRuleBindTypeRole binds to pre-existing roles with the given name. + BindingRuleBindTypeRole BindingRuleBindType = "role" +) + +type ACLBindingRule struct { + ID string + Description string + AuthMethod string + Selector string + BindType BindingRuleBindType + BindName string + + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLAuthMethod struct { + Name string + Type string + Description string + + // Configuration is arbitrary configuration for the auth method. This + // should only contain primitive values and containers (such as lists and + // maps). + Config map[string]interface{} + + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLAuthMethodListEntry struct { + Name string + Type string + Description string + CreateIndex uint64 + ModifyIndex uint64 +} + +// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed +// KubernetesAuthMethodConfig. +func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) { + var config KubernetesAuthMethodConfig + decodeConf := &mapstructure.DecoderConfig{ + Result: &config, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return &config, nil +} + +// KubernetesAuthMethodConfig is the config for the built-in Consul auth method +// for Kubernetes. +type KubernetesAuthMethodConfig struct { + Host string `json:",omitempty"` + CACert string `json:",omitempty"` + ServiceAccountJWT string `json:",omitempty"` +} + +// RenderToConfig converts this into a map[string]interface{} suitable for use +// in the ACLAuthMethod.Config field. +func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} { + return map[string]interface{}{ + "Host": c.Host, + "CACert": c.CACert, + "ServiceAccountJWT": c.ServiceAccountJWT, + } +} + +type ACLLoginParams struct { + AuthMethod string + BearerToken string + Meta map[string]string `json:",omitempty"` +} + // ACL can be used to query the ACL endpoints type ACL struct { c *Client @@ -266,17 +396,9 @@ func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, e return entries, qm, nil } -// TokenCreate creates a new ACL token. It requires that the AccessorID and SecretID fields -// of the ACLToken structure to be empty as these will be filled in by Consul. +// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields +// of the ACLToken structure are empty they will be filled in by Consul. func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - if token.AccessorID != "" { - return nil, nil, fmt.Errorf("Cannot specify an AccessorID in Token Creation") - } - - if token.SecretID != "" { - return nil, nil, fmt.Errorf("Cannot specify a SecretID in Token Creation") - } - r := a.c.newRequest("PUT", "/v1/acl/token") r.setWriteOptions(q) r.obj = token @@ -437,7 +559,6 @@ func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri if policy.ID != "" { return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation") } - r := a.c.newRequest("PUT", "/v1/acl/policy") r.setWriteOptions(q) r.obj = policy @@ -460,7 +581,7 @@ func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri // existing policy ID func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { if policy.ID == "" { - return nil, nil, fmt.Errorf("Must specify an ID in Policy Creation") + return nil, nil, fmt.Errorf("Must specify an ID in Policy Update") } r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID) @@ -586,3 +707,410 @@ func (a *ACL) RulesTranslateToken(tokenID string) (string, error) { return string(ruleBytes), nil } + +// RoleCreate will create a new role. It is not allowed for the role parameters +// ID field to be set as this will be generated by Consul while processing the request. +func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/role") + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleUpdate updates a role. The ID field of the role parameter must be set to an +// existing role ID +func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Role Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID) + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleDelete deletes a role given its ID. +func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// RoleRead retrieves the role details (by ID). Returns nil if not found. +func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/"+roleID) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleReadByName retrieves the role details (by name). Returns nil if not found. +func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName)) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleList retrieves a listing of all roles. The listing does not include some +// metadata for the role as those should be retrieved by subsequent calls to +// RoleRead. +func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/roles") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLRole + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// AuthMethodCreate will create a new auth method. +func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method") + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodUpdate updates an auth method. +func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name)) + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodDelete deletes an auth method given its Name. +func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) { + if methodName == "" { + return nil, fmt.Errorf("Must specify a Name in Auth Method Delete") + } + + r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// AuthMethodRead retrieves the auth method. Returns nil if not found. +func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) { + if methodName == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read") + } + + r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// AuthMethodList retrieves a listing of all auth methods. The listing does not +// include some metadata for the auth method as those should be retrieved by +// subsequent calls to AuthMethodRead. +func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/auth-methods") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLAuthMethodListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// BindingRuleCreate will create a new binding rule. It is not allowed for the +// binding rule parameter's ID field to be set as this will be generated by +// Consul while processing the request. +func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule") + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleUpdate updates a binding rule. The ID field of the role binding +// rule parameter must be set to an existing binding rule ID. +func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID) + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleDelete deletes a binding rule given its ID. +func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// BindingRuleRead retrieves the binding rule details. Returns nil if not found. +func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// BindingRuleList retrieves a listing of all binding rules. +func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rules") + if methodName != "" { + r.params.Set("authmethod", methodName) + } + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLBindingRule + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Login is used to exchange auth method credentials for a newly-minted Consul Token. +func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/login") + r.setWriteOptions(q) + r.obj = auth + + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} + +// Logout is used to destroy a Consul Token created via Login(). +func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/logout") + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 412b37df52a6..04043ba842f9 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -84,11 +84,11 @@ type AgentService struct { Address string Weights AgentWeights EnableTagOverride bool - CreateIndex uint64 `json:",omitempty"` - ModifyIndex uint64 `json:",omitempty"` - ContentHash string `json:",omitempty"` + CreateIndex uint64 `json:",omitempty" bexpr:"-"` + ModifyIndex uint64 `json:",omitempty" bexpr:"-"` + ContentHash string `json:",omitempty" bexpr:"-"` // DEPRECATED (ProxyDestination) - remove this field - ProxyDestination string `json:",omitempty"` + ProxyDestination string `json:",omitempty" bexpr:"-"` Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` Connect *AgentServiceConnect `json:",omitempty"` } @@ -103,8 +103,8 @@ type AgentServiceChecksInfo struct { // AgentServiceConnect represents the Connect configuration of a service. type AgentServiceConnect struct { Native bool `json:",omitempty"` - Proxy *AgentServiceConnectProxy `json:",omitempty"` - SidecarService *AgentServiceRegistration `json:",omitempty"` + Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"` + SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"` } // AgentServiceConnectProxy represents the Connect Proxy configuration of a @@ -112,7 +112,7 @@ type AgentServiceConnect struct { type AgentServiceConnectProxy struct { ExecMode ProxyExecMode `json:",omitempty"` Command []string `json:",omitempty"` - Config map[string]interface{} `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` Upstreams []Upstream `json:",omitempty"` } @@ -123,7 +123,7 @@ type AgentServiceConnectProxyConfig struct { DestinationServiceID string `json:",omitempty"` LocalServiceAddress string `json:",omitempty"` LocalServicePort int `json:",omitempty"` - Config map[string]interface{} `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` Upstreams []Upstream } @@ -278,9 +278,9 @@ type ConnectProxyConfig struct { ContentHash string // DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs // but they don't need ExecMode or Command - ExecMode ProxyExecMode `json:",omitempty"` - Command []string `json:",omitempty"` - Config map[string]interface{} + ExecMode ProxyExecMode `json:",omitempty"` + Command []string `json:",omitempty"` + Config map[string]interface{} `bexpr:"-"` Upstreams []Upstream } @@ -292,7 +292,7 @@ type Upstream struct { Datacenter string `json:",omitempty"` LocalBindAddress string `json:",omitempty"` LocalBindPort int `json:",omitempty"` - Config map[string]interface{} `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` } // Agent can be used to query the Agent endpoints @@ -387,7 +387,14 @@ func (a *Agent) NodeName() (string, error) { // Checks returns the locally registered checks func (a *Agent) Checks() (map[string]*AgentCheck, error) { + return a.ChecksWithFilter("") +} + +// ChecksWithFilter returns a subset of the locally registered checks that match +// the given filter expression +func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) { r := a.c.newRequest("GET", "/v1/agent/checks") + r.filterQuery(filter) _, resp, err := requireOK(a.c.doRequest(r)) if err != nil { return nil, err @@ -403,7 +410,14 @@ func (a *Agent) Checks() (map[string]*AgentCheck, error) { // Services returns the locally registered services func (a *Agent) Services() (map[string]*AgentService, error) { + return a.ServicesWithFilter("") +} + +// ServicesWithFilter returns a subset of the locally registered services that match +// the given filter expression +func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) { r := a.c.newRequest("GET", "/v1/agent/services") + r.filterQuery(filter) _, resp, err := requireOK(a.c.doRequest(r)) if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 39a0ad3e190e..4b17ff6cda22 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -30,6 +30,10 @@ const ( // the HTTP token. HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + // HTTPTokenFileEnvName defines an environment variable name which sets + // the HTTP token file. + HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE" + // HTTPAuthEnvName defines an environment variable name which sets // the HTTP authentication header. HTTPAuthEnvName = "CONSUL_HTTP_AUTH" @@ -146,6 +150,10 @@ type QueryOptions struct { // ctx is an optional context pass through to the underlying HTTP // request layer. Use Context() and WithContext() to manage this. ctx context.Context + + // Filter requests filtering data prior to it being returned. The string + // is a go-bexpr compatible expression. + Filter string } func (o *QueryOptions) Context() context.Context { @@ -276,6 +284,10 @@ type Config struct { // which overrides the agent's default token. Token string + // TokenFile is a file containing the current token to use for this client. + // If provided it is read once at startup and never again. + TokenFile string + TLSConfig TLSConfig } @@ -339,6 +351,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config { config.Address = addr } + if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" { + config.TokenFile = tokenFile + } + if token := os.Getenv(HTTPTokenEnvName); token != "" { config.Token = token } @@ -445,6 +461,7 @@ func (c *Config) GenerateEnv() []string { env = append(env, fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address), fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token), + fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile), fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"), fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile), fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath), @@ -537,6 +554,19 @@ func NewClient(config *Config) (*Client, error) { config.Address = parts[1] } + // If the TokenFile is set, always use that, even if a Token is configured. + // This is because when TokenFile is set it is read into the Token field. + // We want any derived clients to have to re-read the token file. + if config.TokenFile != "" { + data, err := ioutil.ReadFile(config.TokenFile) + if err != nil { + return nil, fmt.Errorf("Error loading token file: %s", err) + } + + if token := strings.TrimSpace(string(data)); token != "" { + config.Token = token + } + } if config.Token == "" { config.Token = defConfig.Token } @@ -614,6 +644,9 @@ func (r *request) setQueryOptions(q *QueryOptions) { if q.Near != "" { r.params.Set("near", q.Near) } + if q.Filter != "" { + r.params.Set("filter", q.Filter) + } if len(q.NodeMeta) > 0 { for key, value := range q.NodeMeta { r.params.Add("node-meta", key+":"+value) @@ -813,6 +846,8 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (* } // parseQueryMeta is used to help parse query meta-data +// +// TODO(rb): bug? the error from this function is never handled func parseQueryMeta(resp *http.Response, q *QueryMeta) error { header := resp.Header @@ -890,10 +925,42 @@ func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *h return d, nil, e } if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + return d, nil, generateUnexpectedResponseCodeError(resp) } return d, resp, nil } + +func (req *request) filterQuery(filter string) { + if filter == "" { + return + } + + req.params.Set("filter", filter) +} + +// generateUnexpectedResponseCodeError consumes the rest of the body, closes +// the body stream and generates an error indicating the status code was +// unexpected. +func generateUnexpectedResponseCodeError(resp *http.Response) error { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) +} + +func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return false, d, nil, e + } + switch resp.StatusCode { + case 200: + return true, d, resp, nil + case 404: + return false, d, resp, nil + default: + return false, d, nil, generateUnexpectedResponseCodeError(resp) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go new file mode 100644 index 000000000000..0c18963fd60f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -0,0 +1,255 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + + "github.com/mitchellh/mapstructure" +) + +const ( + ServiceDefaults string = "service-defaults" + ProxyDefaults string = "proxy-defaults" + ProxyConfigGlobal string = "global" +) + +type ConfigEntry interface { + GetKind() string + GetName() string + GetCreateIndex() uint64 + GetModifyIndex() uint64 +} + +type ServiceConfigEntry struct { + Kind string + Name string + Protocol string + CreateIndex uint64 + ModifyIndex uint64 +} + +func (s *ServiceConfigEntry) GetKind() string { + return s.Kind +} + +func (s *ServiceConfigEntry) GetName() string { + return s.Name +} + +func (s *ServiceConfigEntry) GetCreateIndex() uint64 { + return s.CreateIndex +} + +func (s *ServiceConfigEntry) GetModifyIndex() uint64 { + return s.ModifyIndex +} + +type ProxyConfigEntry struct { + Kind string + Name string + Config map[string]interface{} + CreateIndex uint64 + ModifyIndex uint64 +} + +func (p *ProxyConfigEntry) GetKind() string { + return p.Kind +} + +func (p *ProxyConfigEntry) GetName() string { + return p.Name +} + +func (p *ProxyConfigEntry) GetCreateIndex() uint64 { + return p.CreateIndex +} + +func (p *ProxyConfigEntry) GetModifyIndex() uint64 { + return p.ModifyIndex +} + +type rawEntryListResponse struct { + kind string + Entries []map[string]interface{} +} + +func makeConfigEntry(kind, name string) (ConfigEntry, error) { + switch kind { + case ServiceDefaults: + return &ServiceConfigEntry{Name: name}, nil + case ProxyDefaults: + return &ProxyConfigEntry{Name: name}, nil + default: + return nil, fmt.Errorf("invalid config entry kind: %s", kind) + } +} + +func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { + var entry ConfigEntry + + kindVal, ok := raw["Kind"] + if !ok { + kindVal, ok = raw["kind"] + } + if !ok { + return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level") + } + + if kindStr, ok := kindVal.(string); ok { + newEntry, err := makeConfigEntry(kindStr, "") + if err != nil { + return nil, err + } + entry = newEntry + } else { + return nil, fmt.Errorf("Kind value in payload is not a string") + } + + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Result: &entry, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + return entry, decoder.Decode(raw) +} + +func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) { + var raw map[string]interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return nil, err + } + + return DecodeConfigEntry(raw) +} + +// Config can be used to query the Config endpoints +type ConfigEntries struct { + c *Client +} + +// Config returns a handle to the Config endpoints +func (c *Client) ConfigEntries() *ConfigEntries { + return &ConfigEntries{c} +} + +func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) { + if kind == "" || name == "" { + return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + entry, err := makeConfigEntry(kind, name) + if err != nil { + return nil, nil, err + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setQueryOptions(q) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, entry); err != nil { + return nil, nil, err + } + + return entry, qm, nil +} + +func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) { + if kind == "" { + return nil, nil, fmt.Errorf("The kind parameter must not be empty") + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind)) + r.setQueryOptions(q) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var raw []map[string]interface{} + if err := decodeBody(resp, &raw); err != nil { + return nil, nil, err + } + + var entries []ConfigEntry + for _, rawEntry := range raw { + entry, err := DecodeConfigEntry(rawEntry) + if err != nil { + return nil, nil, err + } + entries = append(entries, entry) + } + + return entries, qm, nil +} + +func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, nil, w) +} + +func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w) +} + +func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) { + r := conf.c.newRequest("PUT", "/v1/config") + r.setWriteOptions(w) + for param, value := range params { + r.params.Set(param, value) + } + r.obj = entry + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + wm := &WriteMeta{RequestTime: rtt} + return res, wm, nil +} + +func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) { + if kind == "" || name == "" { + return nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setWriteOptions(w) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod index 25f931c55644..e19821891584 100644 --- a/vendor/github.com/hashicorp/consul/api/go.mod +++ b/vendor/github.com/hashicorp/consul/api/go.mod @@ -5,7 +5,7 @@ go 1.12 replace github.com/hashicorp/consul/sdk => ../sdk require ( - github.com/hashicorp/consul/sdk v0.1.0 + github.com/hashicorp/consul/sdk v0.1.1 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-rootcerts v1.0.0 github.com/hashicorp/go-uuid v1.0.1 diff --git a/vendor/github.com/hashicorp/go-sockaddr/template/GNUmakefile b/vendor/github.com/hashicorp/go-sockaddr/template/GNUmakefile new file mode 100644 index 000000000000..ce1e274e47a4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/template/GNUmakefile @@ -0,0 +1,2 @@ +test:: + go test diff --git a/vendor/github.com/hashicorp/go-sockaddr/template/README.md b/vendor/github.com/hashicorp/go-sockaddr/template/README.md new file mode 100644 index 000000000000..c40905af72ca --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/template/README.md @@ -0,0 +1,6 @@ +# sockaddr/template + +sockaddr's template library. See +the +[sockaddr/template](https://godoc.org/github.com/hashicorp/go-sockaddr/template) +docs for details on how to use this template. diff --git a/vendor/github.com/hashicorp/go-sockaddr/template/doc.go b/vendor/github.com/hashicorp/go-sockaddr/template/doc.go new file mode 100644 index 000000000000..8cc6730a4f44 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/template/doc.go @@ -0,0 +1,311 @@ +/* + +Package sockaddr/template provides a text/template interface the SockAddr helper +functions. The primary entry point into the sockaddr/template package is +through its Parse() call. For example: + + import ( + "fmt" + + template "github.com/hashicorp/go-sockaddr/template" + ) + + results, err := template.Parse(`{{ GetPrivateIP }}`) + if err != nil { + fmt.Errorf("Unable to find a private IP address: %v", err) + } + fmt.Printf("My Private IP address is: %s\n", results) + +Below is a list of builtin template functions and details re: their usage. It +is possible to add additional functions by calling ParseIfAddrsTemplate +directly. + +In general, the calling convention for this template library is to seed a list +of initial interfaces via one of the Get*Interfaces() calls, then filter, sort, +and extract the necessary attributes for use as string input. This template +interface is primarily geared toward resolving specific values that are only +available at runtime, but can be defined as a heuristic for execution when a +config file is parsed. + +All functions, unless noted otherwise, return an array of IfAddr structs making +it possible to `sort`, `filter`, `limit`, seek (via the `offset` function), or +`unique` the list. To extract useful string information, the `attr` and `join` +functions return a single string value. See below for details. + +Important note: see the +https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr utility for +more examples and for a CLI utility to experiment with the template syntax. + +`GetAllInterfaces` - Returns an exhaustive set of IfAddr structs available on +the host. `GetAllInterfaces` is the initial input and accessible as the initial +"dot" in the pipeline. + +Example: + + {{ GetAllInterfaces }} + + +`GetDefaultInterfaces` - Returns one IfAddr for every IP that is on the +interface containing the default route for the host. + +Example: + + {{ GetDefaultInterfaces }} + +`GetPrivateInterfaces` - Returns one IfAddr for every forwardable IP address +that is included in RFC 6890 and whose interface is marked as up. NOTE: RFC 6890 is a more exhaustive +version of RFC1918 because it spans IPv4 and IPv6, however, RFC6890 does permit the +inclusion of likely undesired addresses such as multicast, therefore our version +of "private" also filters out non-forwardable addresses. + +Example: + + {{ GetPrivateInterfaces | sort "default" | join "address" " " }} + + +`GetPublicInterfaces` - Returns a list of IfAddr structs whos IPs are +forwardable, do not match RFC 6890, and whose interface is marked up. + +Example: + + {{ GetPublicInterfaces | sort "default" | join "name" " " }} + + +`GetPrivateIP` - Helper function that returns a string of the first IP address +from GetPrivateInterfaces. + +Example: + + {{ GetPrivateIP }} + + +`GetPrivateIPs` - Helper function that returns a string of the all private IP +addresses on the host. + +Example: + + {{ GetPrivateIPs }} + + +`GetPublicIP` - Helper function that returns a string of the first IP from +GetPublicInterfaces. + +Example: + + {{ GetPublicIP }} + +`GetPublicIPs` - Helper function that returns a space-delimited string of the +all public IP addresses on the host. + +Example: + + {{ GetPrivateIPs }} + + +`GetInterfaceIP` - Helper function that returns a string of the first IP from +the named interface. + +Example: + + {{ GetInterfaceIP "en0" }} + + + +`GetInterfaceIPs` - Helper function that returns a space-delimited list of all +IPs on a given interface. + +Example: + + {{ GetInterfaceIPs "en0" }} + + +`sort` - Sorts the IfAddrs result based on its arguments. `sort` takes one +argument, a list of ways to sort its IfAddrs argument. The list of sort +criteria is comma separated (`,`): + - `address`, `+address`: Ascending sort of IfAddrs by Address + - `-address`: Descending sort of IfAddrs by Address + - `default`, `+default`: Ascending sort of IfAddrs, IfAddr with a default route first + - `-default`: Descending sort of IfAddrs, IfAttr with default route last + - `name`, `+name`: Ascending sort of IfAddrs by lexical ordering of interface name + - `-name`: Descending sort of IfAddrs by lexical ordering of interface name + - `port`, `+port`: Ascending sort of IfAddrs by port number + - `-port`: Descending sort of IfAddrs by port number + - `private`, `+private`: Ascending sort of IfAddrs with private addresses first + - `-private`: Descending sort IfAddrs with private addresses last + - `size`, `+size`: Ascending sort of IfAddrs by their network size as determined + by their netmask (larger networks first) + - `-size`: Descending sort of IfAddrs by their network size as determined by their + netmask (smaller networks first) + - `type`, `+type`: Ascending sort of IfAddrs by the type of the IfAddr (Unix, + IPv4, then IPv6) + - `-type`: Descending sort of IfAddrs by the type of the IfAddr (IPv6, IPv4, Unix) + +Example: + + {{ GetPrivateInterfaces | sort "default,-type,size,+address" }} + + +`exclude` and `include`: Filters IfAddrs based on the selector criteria and its +arguments. Both `exclude` and `include` take two arguments. The list of +available filtering criteria is: + - "address": Filter IfAddrs based on a regexp matching the string representation + of the address + - "flag","flags": Filter IfAddrs based on the list of flags specified. Multiple + flags can be passed together using the pipe character (`|`) to create an inclusive + bitmask of flags. The list of flags is included below. + - "name": Filter IfAddrs based on a regexp matching the interface name. + - "network": Filter IfAddrs based on whether a netowkr is included in a given + CIDR. More than one CIDR can be passed in if each network is separated by + the pipe character (`|`). + - "port": Filter IfAddrs based on an exact match of the port number (number must + be expressed as a string) + - "rfc", "rfcs": Filter IfAddrs based on the matching RFC. If more than one RFC + is specified, the list of RFCs can be joined together using the pipe character (`|`). + - "size": Filter IfAddrs based on the exact match of the mask size. + - "type": Filter IfAddrs based on their SockAddr type. Multiple types can be + specified together by using the pipe character (`|`). Valid types include: + `ip`, `ipv4`, `ipv6`, and `unix`. + +Example: + + {{ GetPrivateInterfaces | exclude "type" "IPv6" }} + + +`unique`: Removes duplicate entries from the IfAddrs list, assuming the list has +already been sorted. `unique` only takes one argument: + - "address": Removes duplicates with the same address + - "name": Removes duplicates with the same interface names + +Example: + + {{ GetAllInterfaces | sort "default,-type,address" | unique "name" }} + + +`limit`: Reduces the size of the list to the specified value. + +Example: + + {{ GetPrivateInterfaces | limit 1 }} + + +`offset`: Seeks into the list by the specified value. A negative value can be +used to seek from the end of the list. + +Example: + + {{ GetPrivateInterfaces | offset "-2" | limit 1 }} + + +`math`: Perform a "math" operation on each member of the list and return new +values. `math` takes two arguments, the attribute to operate on and the +operation's value. + +Supported operations include: + + - `address`: Adds the value, a positive or negative value expressed as a + decimal string, to the address. The sign is required. This value is + allowed to over or underflow networks (e.g. 127.255.255.255 `"address" "+1"` + will return "128.0.0.0"). Addresses will wrap at IPv4 or IPv6 boundaries. + - `network`: Add the value, a positive or negative value expressed as a + decimal string, to the network address. The sign is required. Positive + values are added to the network address. Negative values are subtracted + from the network's broadcast address (e.g. 127.0.0.1 `"network" "-1"` will + return "127.255.255.255"). Values that overflow the network size will + safely wrap. + - `mask`: Applies the given network mask to the address. The network mask is + expressed as a decimal value (e.g. network mask "24" corresponds to + `255.255.255.0`). After applying the network mask, the network mask of the + resulting address will be either the applied network mask or the network mask + of the input address depending on which network is larger + (e.g. 192.168.10.20/24 `"mask" "16"` will return "192.168.0.0/16" but + 192.168.10.20/24 `"mask" "28"` will return "192.168.10.16/24"). + +Example: + + {{ GetPrivateInterfaces | include "type" "IP" | math "address" "+256" | attr "address" }} + {{ GetPrivateInterfaces | include "type" "IP" | math "address" "-256" | attr "address" }} + {{ GetPrivateInterfaces | include "type" "IP" | math "network" "+2" | attr "address" }} + {{ GetPrivateInterfaces | include "type" "IP" | math "network" "-2" | attr "address" }} + {{ GetPrivateInterfaces | include "type" "IP" | math "mask" "24" | attr "address" }} + {{ GetPrivateInterfaces | include "flags" "forwardable|up" | include "type" "IPv4" | math "network" "+2" | attr "address" }} + + +`attr`: Extracts a single attribute of the first member of the list and returns +it as a string. `attr` takes a single attribute name. The list of available +attributes is type-specific and shared between `join`. See below for a list of +supported attributes. + +Example: + + {{ GetAllInterfaces | exclude "flags" "up" | attr "address" }} + + +`Attr`: Extracts a single attribute from an `IfAttr` and in every other way +performs the same as the `attr`. + +Example: + + {{ with $ifAddrs := GetAllInterfaces | include "type" "IP" | sort "+type,+address" -}} + {{- range $ifAddrs -}} + {{- Attr "address" . }} -- {{ Attr "network" . }}/{{ Attr "size" . -}} + {{- end -}} + {{- end }} + + +`join`: Similar to `attr`, `join` extracts all matching attributes of the list +and returns them as a string joined by the separator, the second argument to +`join`. The list of available attributes is type-specific and shared between +`join`. + +Example: + + {{ GetAllInterfaces | include "flags" "forwardable" | join "address" " " }} + + +`exclude` and `include` flags: + - `broadcast` + - `down`: Is the interface down? + - `forwardable`: Is the IP forwardable? + - `global unicast` + - `interface-local multicast` + - `link-local multicast` + - `link-local unicast` + - `loopback` + - `multicast` + - `point-to-point` + - `unspecified`: Is the IfAddr the IPv6 unspecified address? + - `up`: Is the interface up? + + +Attributes for `attr`, `Attr`, and `join`: + +SockAddr Type: + - `string` + - `type` + +IPAddr Type: + - `address` + - `binary` + - `first_usable` + - `hex` + - `host` + - `last_usable` + - `mask_bits` + - `netmask` + - `network` + - `octets`: Decimal values per byte + - `port` + - `size`: Number of hosts in the network + +IPv4Addr Type: + - `broadcast` + - `uint32`: unsigned integer representation of the value + +IPv6Addr Type: + - `uint128`: unsigned integer representation of the value + +UnixSock Type: + - `path` + +*/ +package template diff --git a/vendor/github.com/hashicorp/go-sockaddr/template/template.go b/vendor/github.com/hashicorp/go-sockaddr/template/template.go new file mode 100644 index 000000000000..bbed513617f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/template/template.go @@ -0,0 +1,155 @@ +package template + +import ( + "bytes" + "fmt" + "text/template" + + "github.com/hashicorp/errwrap" + sockaddr "github.com/hashicorp/go-sockaddr" +) + +var ( + // SourceFuncs is a map of all top-level functions that generate + // sockaddr data types. + SourceFuncs template.FuncMap + + // SortFuncs is a map of all functions used in sorting + SortFuncs template.FuncMap + + // FilterFuncs is a map of all functions used in sorting + FilterFuncs template.FuncMap + + // HelperFuncs is a map of all functions used in sorting + HelperFuncs template.FuncMap +) + +func init() { + SourceFuncs = template.FuncMap{ + // GetAllInterfaces - Returns an exhaustive set of IfAddr + // structs available on the host. `GetAllInterfaces` is the + // initial input and accessible as the initial "dot" in the + // pipeline. + "GetAllInterfaces": sockaddr.GetAllInterfaces, + + // GetDefaultInterfaces - Returns one IfAddr for every IP that + // is on the interface containing the default route for the + // host. + "GetDefaultInterfaces": sockaddr.GetDefaultInterfaces, + + // GetPrivateInterfaces - Returns one IfAddr for every IP that + // matches RFC 6890, are attached to the interface with the + // default route, and are forwardable IP addresses. NOTE: RFC + // 6890 is a more exhaustive version of RFC1918 because it spans + // IPv4 and IPv6, however it doespermit the inclusion of likely + // undesired addresses such as multicast, therefore our + // definition of a "private" address also excludes + // non-forwardable IP addresses (as defined by the IETF). + "GetPrivateInterfaces": sockaddr.GetPrivateInterfaces, + + // GetPublicInterfaces - Returns a list of IfAddr that do not + // match RFC 6890, are attached to the default route, and are + // forwardable. + "GetPublicInterfaces": sockaddr.GetPublicInterfaces, + } + + SortFuncs = template.FuncMap{ + "sort": sockaddr.SortIfBy, + } + + FilterFuncs = template.FuncMap{ + "exclude": sockaddr.ExcludeIfs, + "include": sockaddr.IncludeIfs, + } + + HelperFuncs = template.FuncMap{ + // Misc functions that operate on IfAddrs inputs + "attr": Attr, + "join": sockaddr.JoinIfAddrs, + "limit": sockaddr.LimitIfAddrs, + "offset": sockaddr.OffsetIfAddrs, + "unique": sockaddr.UniqueIfAddrsBy, + + // Misc math functions that operate on a single IfAddr input + "math": sockaddr.IfAddrsMath, + + // Return a Private RFC 6890 IP address string that is attached + // to the default route and a forwardable address. + "GetPrivateIP": sockaddr.GetPrivateIP, + + // Return all Private RFC 6890 IP addresses as a space-delimited string of + // IP addresses. Addresses returned do not have to be on the interface with + // a default route. + "GetPrivateIPs": sockaddr.GetPrivateIPs, + + // Return a Public RFC 6890 IP address string that is attached + // to the default route and a forwardable address. + "GetPublicIP": sockaddr.GetPublicIP, + + // Return allPublic RFC 6890 IP addresses as a space-delimited string of IP + // addresses. Addresses returned do not have to be on the interface with a + // default route. + "GetPublicIPs": sockaddr.GetPublicIPs, + + // Return the first IP address of the named interface, sorted by + // the largest network size. + "GetInterfaceIP": sockaddr.GetInterfaceIP, + + // Return all IP addresses on the named interface, sorted by the largest + // network size. + "GetInterfaceIPs": sockaddr.GetInterfaceIPs, + } +} + +// Attr returns the attribute from the ifAddrRaw argument. If the argument is +// an IfAddrs, only the first element will be evaluated for resolution. +func Attr(selectorName string, ifAddrsRaw interface{}) (string, error) { + switch v := ifAddrsRaw.(type) { + case sockaddr.IfAddr: + return sockaddr.IfAttr(selectorName, v) + case sockaddr.IfAddrs: + return sockaddr.IfAttrs(selectorName, v) + default: + return "", fmt.Errorf("unable to obtain attribute %s from type %T (%v)", selectorName, ifAddrsRaw, ifAddrsRaw) + } +} + +// Parse parses input as template input using the addresses available on the +// host, then returns the string output if there are no errors. +func Parse(input string) (string, error) { + addrs, err := sockaddr.GetAllInterfaces() + if err != nil { + return "", errwrap.Wrapf("unable to query interface addresses: {{err}}", err) + } + + return ParseIfAddrs(input, addrs) +} + +// ParseIfAddrs parses input as template input using the IfAddrs inputs, then +// returns the string output if there are no errors. +func ParseIfAddrs(input string, ifAddrs sockaddr.IfAddrs) (string, error) { + return ParseIfAddrsTemplate(input, ifAddrs, template.New("sockaddr.Parse")) +} + +// ParseIfAddrsTemplate parses input as template input using the IfAddrs inputs, +// then returns the string output if there are no errors. +func ParseIfAddrsTemplate(input string, ifAddrs sockaddr.IfAddrs, tmplIn *template.Template) (string, error) { + // Create a template, add the function map, and parse the text. + tmpl, err := tmplIn.Option("missingkey=error"). + Funcs(SourceFuncs). + Funcs(SortFuncs). + Funcs(FilterFuncs). + Funcs(HelperFuncs). + Parse(input) + if err != nil { + return "", errwrap.Wrapf(fmt.Sprintf("unable to parse template %+q: {{err}}", input), err) + } + + var outWriter bytes.Buffer + err = tmpl.Execute(&outWriter, ifAddrs) + if err != nil { + return "", errwrap.Wrapf(fmt.Sprintf("unable to execute sockaddr input %+q: {{err}}", input), err) + } + + return outWriter.String(), nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pointerutil/pointer.go b/vendor/github.com/hashicorp/vault/sdk/helper/pointerutil/pointer.go new file mode 100644 index 000000000000..73952313fe0c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pointerutil/pointer.go @@ -0,0 +1,28 @@ +package pointerutil + +import ( + "os" + "time" +) + +// StringPtr returns a pointer to a string value +func StringPtr(s string) *string { + return &s +} + +// BoolPtr returns a pointer to a boolean value +func BoolPtr(b bool) *bool { + return &b +} + +// TimeDurationPtr returns a pointer to a time duration value +func TimeDurationPtr(duration string) *time.Duration { + d, _ := time.ParseDuration(duration) + + return &d +} + +// FileModePtr returns a pointer to the given os.FileMode +func FileModePtr(o os.FileMode) *os.FileMode { + return &o +} diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go deleted file mode 100644 index 7c634cdc5cd6..000000000000 --- a/vendor/github.com/lib/pq/oid/gen.go +++ /dev/null @@ -1,93 +0,0 @@ -// +build ignore - -// Generate the table of OID values -// Run with 'go run gen.go'. -package main - -import ( - "database/sql" - "fmt" - "log" - "os" - "os/exec" - "strings" - - _ "github.com/lib/pq" -) - -// OID represent a postgres Object Identifier Type. -type OID struct { - ID int - Type string -} - -// Name returns an upper case version of the oid type. -func (o OID) Name() string { - return strings.ToUpper(o.Type) -} - -func main() { - datname := os.Getenv("PGDATABASE") - sslmode := os.Getenv("PGSSLMODE") - - if datname == "" { - os.Setenv("PGDATABASE", "pqgotest") - } - - if sslmode == "" { - os.Setenv("PGSSLMODE", "disable") - } - - db, err := sql.Open("postgres", "") - if err != nil { - log.Fatal(err) - } - rows, err := db.Query(` - SELECT typname, oid - FROM pg_type WHERE oid < 10000 - ORDER BY oid; - `) - if err != nil { - log.Fatal(err) - } - oids := make([]*OID, 0) - for rows.Next() { - var oid OID - if err = rows.Scan(&oid.Type, &oid.ID); err != nil { - log.Fatal(err) - } - oids = append(oids, &oid) - } - if err = rows.Err(); err != nil { - log.Fatal(err) - } - cmd := exec.Command("gofmt") - cmd.Stderr = os.Stderr - w, err := cmd.StdinPipe() - if err != nil { - log.Fatal(err) - } - f, err := os.Create("types.go") - if err != nil { - log.Fatal(err) - } - cmd.Stdout = f - err = cmd.Start() - if err != nil { - log.Fatal(err) - } - fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.") - fmt.Fprintln(w, "\npackage oid") - fmt.Fprintln(w, "const (") - for _, oid := range oids { - fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID) - } - fmt.Fprintln(w, ")") - fmt.Fprintln(w, "var TypeName = map[Oid]string{") - for _, oid := range oids { - fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name()) - } - fmt.Fprintln(w, "}") - w.Close() - cmd.Wait() -} diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml new file mode 100644 index 000000000000..16d1430aa220 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 2FMhp57u8LcstKL9B190fLTcEnBtAAiEL diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE new file mode 100644 index 000000000000..740fa931322d --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md new file mode 100644 index 000000000000..b1d235c78d37 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -0,0 +1,47 @@ +# go-shellwords + +[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master) +[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) + +Parse line as shell words. + +## Usage + +```go +args, err := shellwords.Parse("./foo --bar=baz") +// args should be ["./foo", "--bar=baz"] +``` + +```go +os.Setenv("FOO", "bar") +p := shellwords.NewParser() +p.ParseEnv = true +args, err := p.Parse("./foo $FOO") +// args should be ["./foo", "bar"] +``` + +```go +p := shellwords.NewParser() +p.ParseBacktick = true +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +```go +shellwords.ParseBacktick = true +p := shellwords.NewParser() +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +# Thanks + +This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). + +# License + +under the MIT License: http://mattn.mit-license.org/2017 + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/go.mod b/vendor/github.com/mattn/go-shellwords/go.mod new file mode 100644 index 000000000000..8d96dbd5fa35 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.mod @@ -0,0 +1 @@ +module github.com/mattn/go-shellwords diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go new file mode 100644 index 000000000000..41429d8f26f8 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -0,0 +1,195 @@ +package shellwords + +import ( + "errors" + "os" + "regexp" + "strings" +) + +var ( + ParseEnv bool = false + ParseBacktick bool = false +) + +var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`) + +func isSpace(r rune) bool { + switch r { + case ' ', '\t', '\r', '\n': + return true + } + return false +} + +func replaceEnv(getenv func(string) string, s string) string { + if getenv == nil { + getenv = os.Getenv + } + + return envRe.ReplaceAllStringFunc(s, func(s string) string { + s = s[1:] + if s[0] == '{' { + s = s[1 : len(s)-1] + } + return getenv(s) + }) +} + +type Parser struct { + ParseEnv bool + ParseBacktick bool + Position int + + // If ParseEnv is true, use this for getenv. + // If nil, use os.Getenv. + Getenv func(string) string +} + +func NewParser() *Parser { + return &Parser{ + ParseEnv: ParseEnv, + ParseBacktick: ParseBacktick, + Position: 0, + } +} + +func (p *Parser) Parse(line string) ([]string, error) { + args := []string{} + buf := "" + var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool + backtick := "" + + pos := -1 + got := false + +loop: + for i, r := range line { + if escaped { + buf += string(r) + escaped = false + continue + } + + if r == '\\' { + if singleQuoted { + buf += string(r) + } else { + escaped = true + } + continue + } + + if isSpace(r) { + if singleQuoted || doubleQuoted || backQuote || dollarQuote { + buf += string(r) + backtick += string(r) + } else if got { + if p.ParseEnv { + buf = replaceEnv(p.Getenv, buf) + } + args = append(args, buf) + buf = "" + got = false + } + continue + } + + switch r { + case '`': + if !singleQuoted && !doubleQuoted && !dollarQuote { + if p.ParseBacktick { + if backQuote { + out, err := shellRun(backtick) + if err != nil { + return nil, err + } + buf = out + } + backtick = "" + backQuote = !backQuote + continue + } + backtick = "" + backQuote = !backQuote + } + case ')': + if !singleQuoted && !doubleQuoted && !backQuote { + if p.ParseBacktick { + if dollarQuote { + out, err := shellRun(backtick) + if err != nil { + return nil, err + } + if r == ')' { + buf = buf[:len(buf)-len(backtick)-2] + out + } else { + buf = buf[:len(buf)-len(backtick)-1] + out + } + } + backtick = "" + dollarQuote = !dollarQuote + continue + } + backtick = "" + dollarQuote = !dollarQuote + } + case '(': + if !singleQuoted && !doubleQuoted && !backQuote { + if !dollarQuote && strings.HasSuffix(buf, "$") { + dollarQuote = true + buf += "(" + continue + } else { + return nil, errors.New("invalid command line string") + } + } + case '"': + if !singleQuoted && !dollarQuote { + doubleQuoted = !doubleQuoted + continue + } + case '\'': + if !doubleQuoted && !dollarQuote { + singleQuoted = !singleQuoted + continue + } + case ';', '&', '|', '<', '>': + if !(escaped || singleQuoted || doubleQuoted || backQuote) { + if r == '>' && len(buf) > 0 { + if c := buf[0]; '0' <= c && c <= '9' { + i -= 1 + got = false + } + } + pos = i + break loop + } + } + + got = true + buf += string(r) + if backQuote || dollarQuote { + backtick += string(r) + } + } + + if got { + if p.ParseEnv { + buf = replaceEnv(p.Getenv, buf) + } + args = append(args, buf) + } + + if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote { + return nil, errors.New("invalid command line string") + } + + p.Position = pos + + return args, nil +} + +func Parse(line string) ([]string, error) { + return NewParser().Parse(line) +} diff --git a/vendor/github.com/mattn/go-shellwords/util_go15.go b/vendor/github.com/mattn/go-shellwords/util_go15.go new file mode 100644 index 000000000000..180f00f0bd6a --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_go15.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package shellwords + +import ( + "os" + "os/exec" + "runtime" + "strings" +) + +func shellRun(line string) (string, error) { + var b []byte + var err error + if runtime.GOOS == "windows" { + b, err = exec.Command(os.Getenv("COMSPEC"), "/c", line).Output() + } else { + b, err = exec.Command(os.Getenv("SHELL"), "-c", line).Output() + } + if err != nil { + return "", err + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go new file mode 100644 index 000000000000..eaf1011d60ec --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -0,0 +1,22 @@ +// +build !windows,go1.6 + +package shellwords + +import ( + "errors" + "os" + "os/exec" + "strings" +) + +func shellRun(line string) (string, error) { + shell := os.Getenv("SHELL") + b, err := exec.Command(shell, "-c", line).Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", errors.New(err.Error() + ":" + string(b)) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go new file mode 100644 index 000000000000..e46f89a1fedd --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -0,0 +1,22 @@ +// +build windows,go1.6 + +package shellwords + +import ( + "errors" + "os" + "os/exec" + "strings" +) + +func shellRun(line string) (string, error) { + shell := os.Getenv("COMSPEC") + b, err := exec.Command(shell, "/c", line).Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", errors.New(err.Error() + ":" + string(b)) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE new file mode 100644 index 000000000000..a3866a291fd1 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md new file mode 100644 index 000000000000..28ce45a3e181 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/README.md @@ -0,0 +1,65 @@ +# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) + +hashstructure is a Go library for creating a unique hash value +for arbitrary values in Go. + +This can be used to key values in a hash (for use in a map, set, etc.) +that are complex. The most common use case is comparing two values without +sending data across the network, caching values locally (de-dup), and so on. + +## Features + + * Hash any arbitrary Go value, including complex types. + + * Tag a struct field to ignore it and not affect the hash value. + + * Tag a slice type struct field to treat it as a set where ordering + doesn't affect the hash code but the field itself is still taken into + account to create the hash value. + + * Optionally specify a custom hash function to optimize for speed, collision + avoidance for your data set, etc. + + * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, + allowing effective hashing of time.Time + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/hashstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). + +A quick code example is shown below: + +```go +type ComplexStruct struct { + Name string + Age uint + Metadata map[string]interface{} +} + +v := ComplexStruct{ + Name: "mitchellh", + Age: 64, + Metadata: map[string]interface{}{ + "car": true, + "location": "California", + "siblings": []string{"Bob", "John"}, + }, +} + +hash, err := hashstructure.Hash(v, nil) +if err != nil { + panic(err) +} + +fmt.Printf("%d", hash) +// Output: +// 2307517237273902113 +``` diff --git a/vendor/github.com/mitchellh/hashstructure/go.mod b/vendor/github.com/mitchellh/hashstructure/go.mod new file mode 100644 index 000000000000..966582aa95b4 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/hashstructure diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go new file mode 100644 index 000000000000..ea13a1583c3b --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -0,0 +1,358 @@ +package hashstructure + +import ( + "encoding/binary" + "fmt" + "hash" + "hash/fnv" + "reflect" +) + +// ErrNotStringer is returned when there's an error with hash:"string" +type ErrNotStringer struct { + Field string +} + +// Error implements error for ErrNotStringer +func (ens *ErrNotStringer) Error() string { + return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) +} + +// HashOptions are options that are available for hashing. +type HashOptions struct { + // Hasher is the hash function to use. If this isn't set, it will + // default to FNV. + Hasher hash.Hash64 + + // TagName is the struct tag to look at when hashing the structure. + // By default this is "hash". + TagName string + + // ZeroNil is flag determining if nil pointer should be treated equal + // to a zero value of pointed type. By default this is false. + ZeroNil bool +} + +// Hash returns the hash value of an arbitrary value. +// +// If opts is nil, then default options will be used. See HashOptions +// for the default values. The same *HashOptions value cannot be used +// concurrently. None of the values within a *HashOptions struct are +// safe to read/write while hashing is being done. +// +// Notes on the value: +// +// * Unexported fields on structs are ignored and do not affect the +// hash value. +// +// * Adding an exported field to a struct with the zero value will change +// the hash value. +// +// For structs, the hashing can be controlled using tags. For example: +// +// struct { +// Name string +// UUID string `hash:"ignore"` +// } +// +// The available tag values are: +// +// * "ignore" or "-" - The field will be ignored and not affect the hash code. +// +// * "set" - The field will be treated as a set, where ordering doesn't +// affect the hash code. This only works for slices. +// +// * "string" - The field will be hashed as a string, only works when the +// field implements fmt.Stringer +// +func Hash(v interface{}, opts *HashOptions) (uint64, error) { + // Create default options + if opts == nil { + opts = &HashOptions{} + } + if opts.Hasher == nil { + opts.Hasher = fnv.New64() + } + if opts.TagName == "" { + opts.TagName = "hash" + } + + // Reset the hash + opts.Hasher.Reset() + + // Create our walker and walk the structure + w := &walker{ + h: opts.Hasher, + tag: opts.TagName, + zeronil: opts.ZeroNil, + } + return w.visit(reflect.ValueOf(v), nil) +} + +type walker struct { + h hash.Hash64 + tag string + zeronil bool +} + +type visitOpts struct { + // Flags are a bitmask of flags to affect behavior of this visit + Flags visitFlag + + // Information about the struct containing this field + Struct interface{} + StructField string +} + +func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { + t := reflect.TypeOf(0) + + // Loop since these can be wrapped in multiple layers of pointers + // and interfaces. + for { + // If we have an interface, dereference it. We have to do this up + // here because it might be a nil in there and the check below must + // catch that. + if v.Kind() == reflect.Interface { + v = v.Elem() + continue + } + + if v.Kind() == reflect.Ptr { + if w.zeronil { + t = v.Type().Elem() + } + v = reflect.Indirect(v) + continue + } + + break + } + + // If it is nil, treat it like a zero. + if !v.IsValid() { + v = reflect.Zero(t) + } + + // Binary writing can use raw ints, we have to convert to + // a sized-int, we'll choose the largest... + switch v.Kind() { + case reflect.Int: + v = reflect.ValueOf(int64(v.Int())) + case reflect.Uint: + v = reflect.ValueOf(uint64(v.Uint())) + case reflect.Bool: + var tmp int8 + if v.Bool() { + tmp = 1 + } + v = reflect.ValueOf(tmp) + } + + k := v.Kind() + + // We can shortcut numeric values by directly binary writing them + if k >= reflect.Int && k <= reflect.Complex64 { + // A direct hash calculation + w.h.Reset() + err := binary.Write(w.h, binary.LittleEndian, v.Interface()) + return w.h.Sum64(), err + } + + switch k { + case reflect.Array: + var h uint64 + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + h = hashUpdateOrdered(w.h, h, current) + } + + return h, nil + + case reflect.Map: + var includeMap IncludableMap + if opts != nil && opts.Struct != nil { + if v, ok := opts.Struct.(IncludableMap); ok { + includeMap = v + } + } + + // Build the hash for the map. We do this by XOR-ing all the key + // and value hashes. This makes it deterministic despite ordering. + var h uint64 + for _, k := range v.MapKeys() { + v := v.MapIndex(k) + if includeMap != nil { + incl, err := includeMap.HashIncludeMap( + opts.StructField, k.Interface(), v.Interface()) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + kh, err := w.visit(k, nil) + if err != nil { + return 0, err + } + vh, err := w.visit(v, nil) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + + return h, nil + + case reflect.Struct: + parent := v.Interface() + var include Includable + if impl, ok := parent.(Includable); ok { + include = impl + } + + t := v.Type() + h, err := w.visit(reflect.ValueOf(t.Name()), nil) + if err != nil { + return 0, err + } + + l := v.NumField() + for i := 0; i < l; i++ { + if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + var f visitFlag + fieldType := t.Field(i) + if fieldType.PkgPath != "" { + // Unexported + continue + } + + tag := fieldType.Tag.Get(w.tag) + if tag == "ignore" || tag == "-" { + // Ignore this field + continue + } + + // if string is set, use the string value + if tag == "string" { + if impl, ok := innerV.Interface().(fmt.Stringer); ok { + innerV = reflect.ValueOf(impl.String()) + } else { + return 0, &ErrNotStringer{ + Field: v.Type().Field(i).Name, + } + } + } + + // Check if we implement includable and check it + if include != nil { + incl, err := include.HashInclude(fieldType.Name, innerV) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + switch tag { + case "set": + f |= visitFlagSet + } + + kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) + if err != nil { + return 0, err + } + + vh, err := w.visit(innerV, &visitOpts{ + Flags: f, + Struct: parent, + StructField: fieldType.Name, + }) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + } + + return h, nil + + case reflect.Slice: + // We have two behaviors here. If it isn't a set, then we just + // visit all the elements. If it is a set, then we do a deterministic + // hash code. + var h uint64 + var set bool + if opts != nil { + set = (opts.Flags & visitFlagSet) != 0 + } + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + if set { + h = hashUpdateUnordered(h, current) + } else { + h = hashUpdateOrdered(w.h, h, current) + } + } + + return h, nil + + case reflect.String: + // Directly hash + w.h.Reset() + _, err := w.h.Write([]byte(v.String())) + return w.h.Sum64(), err + + default: + return 0, fmt.Errorf("unknown kind to hash: %s", k) + } + +} + +func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { + // For ordered updates, use a real hash function + h.Reset() + + // We just panic if the binary writes fail because we are writing + // an int64 which should never be fail-able. + e1 := binary.Write(h, binary.LittleEndian, a) + e2 := binary.Write(h, binary.LittleEndian, b) + if e1 != nil { + panic(e1) + } + if e2 != nil { + panic(e2) + } + + return h.Sum64() +} + +func hashUpdateUnordered(a, b uint64) uint64 { + return a ^ b +} + +// visitFlag is used as a bitmask for affecting visit behavior +type visitFlag uint + +const ( + visitFlagInvalid visitFlag = iota + visitFlagSet = iota << 1 +) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go new file mode 100644 index 000000000000..b6289c0bee71 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/include.go @@ -0,0 +1,15 @@ +package hashstructure + +// Includable is an interface that can optionally be implemented by +// a struct. It will be called for each field in the struct to check whether +// it should be included in the hash. +type Includable interface { + HashInclude(field string, v interface{}) (bool, error) +} + +// IncludableMap is an interface that can optionally be implemented by +// a struct. It will be called when a map-type field is found to ask the +// struct if the map item should be included in the hash. +type IncludableMap interface { + HashIncludeMap(field string, k, v interface{}) (bool, error) +} diff --git a/vendor/github.com/ory/dockertest/docker/pkg/archive/example_changes.go b/vendor/github.com/ory/dockertest/docker/pkg/archive/example_changes.go deleted file mode 100644 index d840f5a7ddc7..000000000000 --- a/vendor/github.com/ory/dockertest/docker/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/ory/dockertest/docker/pkg/archive" - "github.com/sirupsen/logrus" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go b/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go deleted file mode 100644 index dd6ddc4f7286..000000000000 --- a/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build ignore -// Hand writing: _Ctype_struct___0 - -/* -Input to cgo -godefs. - -*/ - -package disk - -/* -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -// because statinfo has long double snap_time, redefine with changing long long -struct statinfo2 { - long cp_time[CPUSTATES]; - long tk_nin; - long tk_nout; - struct devinfo *dinfo; - long long snap_time; -}; -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeofLongDouble = C.sizeof_longlong - - DEVSTAT_NO_DATA = 0x00 - DEVSTAT_READ = 0x01 - DEVSTAT_WRITE = 0x02 - DEVSTAT_FREE = 0x03 - - // from sys/mount.h - MNT_RDONLY = 0x00000001 /* read only filesystem */ - MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */ - MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */ - MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */ - MNT_UNION = 0x00000020 /* union with underlying filesystem */ - MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */ - MNT_SUIDDIR = 0x00100000 /* special handling of SUID on dirs */ - MNT_SOFTDEP = 0x00200000 /* soft updates being done */ - MNT_NOSYMFOLLOW = 0x00400000 /* do not follow symlinks */ - MNT_GJOURNAL = 0x02000000 /* GEOM journal support enabled */ - MNT_MULTILABEL = 0x04000000 /* MAC support for individual objects */ - MNT_ACLS = 0x08000000 /* ACL support enabled */ - MNT_NOATIME = 0x10000000 /* disable update of file access time */ - MNT_NOCLUSTERR = 0x40000000 /* disable cluster read */ - MNT_NOCLUSTERW = 0x80000000 /* disable cluster write */ - MNT_NFS4ACLS = 0x00000010 - - MNT_WAIT = 1 /* synchronously wait for I/O to complete */ - MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */ - MNT_LAZY = 3 /* push data not written by filesystem syncer */ - MNT_SUSPEND = 4 /* Suspend file system after sync */ -) - -const ( - sizeOfDevstat = C.sizeof_struct_devstat -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong - _C_long_double C.longlong -) - -type Statfs C.struct_statfs -type Fsid C.struct_fsid - -type Devstat C.struct_devstat -type Bintime C.struct_bintime diff --git a/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go b/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go deleted file mode 100644 index 1e3ddef5cfb4..000000000000 --- a/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build ignore -// Hand writing: _Ctype_struct___0 - -/* -Input to cgo -godefs. -*/ - -package disk - -/* -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeofLongDouble = C.sizeof_longlong - - DEVSTAT_NO_DATA = 0x00 - DEVSTAT_READ = 0x01 - DEVSTAT_WRITE = 0x02 - DEVSTAT_FREE = 0x03 - - // from sys/mount.h - MNT_RDONLY = 0x00000001 /* read only filesystem */ - MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */ - MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */ - MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */ - MNT_NODEV = 0x00000010 /* don't interpret special files */ - MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */ - - MNT_WAIT = 1 /* synchronously wait for I/O to complete */ - MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */ - MNT_LAZY = 3 /* push data not written by filesystem syncer */ -) - -const ( - sizeOfDiskstats = C.sizeof_struct_diskstats -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong - _C_long_double C.longlong -) - -type Statfs C.struct_statfs -type Diskstats C.struct_diskstats -type Fsid C.fsid_t -type Timeval C.struct_timeval - -type Diskstat C.struct_diskstat -type Bintime C.struct_bintime diff --git a/vendor/github.com/shirou/gopsutil/host/types_darwin.go b/vendor/github.com/shirou/gopsutil/host/types_darwin.go deleted file mode 100644 index b8582278859b..000000000000 --- a/vendor/github.com/shirou/gopsutil/host/types_darwin.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build ignore -// plus hand editing about timeval - -/* -Input to cgo -godefs. -*/ - -package host - -/* -#include -#include -*/ -import "C" - -type Utmpx C.struct_utmpx -type Timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/host/types_freebsd.go b/vendor/github.com/shirou/gopsutil/host/types_freebsd.go deleted file mode 100644 index bbdce0c6a9f2..000000000000 --- a/vendor/github.com/shirou/gopsutil/host/types_freebsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build ignore - -/* -Input to cgo -godefs. -*/ - -package host - -/* -#define KERNEL -#include -#include -#include -#include "freebsd_headers/utxdb.h" - -enum { - sizeofPtr = sizeof(void*), -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeOfUtmpx = C.sizeof_struct_futx -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type Utmp C.struct_utmp // for FreeBSD 9.0 compatibility -type Utmpx C.struct_futx diff --git a/vendor/github.com/shirou/gopsutil/host/types_linux.go b/vendor/github.com/shirou/gopsutil/host/types_linux.go deleted file mode 100644 index 8adecb6cfa9f..000000000000 --- a/vendor/github.com/shirou/gopsutil/host/types_linux.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build ignore - -/* -Input to cgo -godefs. -*/ - -package host - -/* -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeOfUtmp = C.sizeof_struct_utmp -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type utmp C.struct_utmp -type exit_status C.struct_exit_status -type timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/host/types_openbsd.go b/vendor/github.com/shirou/gopsutil/host/types_openbsd.go deleted file mode 100644 index 9ebb97ce52e9..000000000000 --- a/vendor/github.com/shirou/gopsutil/host/types_openbsd.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build ignore - -/* -Input to cgo -godefs. -*/ - -package host - -/* -#define KERNEL -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeOfUtmp = C.sizeof_struct_utmp -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type Utmp C.struct_utmp -type Timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/mem/types_openbsd.go b/vendor/github.com/shirou/gopsutil/mem/types_openbsd.go deleted file mode 100644 index 83cb91a1964c..000000000000 --- a/vendor/github.com/shirou/gopsutil/mem/types_openbsd.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build ignore - -/* -Input to cgo -godefs. -*/ - -package mem - -/* -#include -#include -#include -#include - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - CTLVm = 2 - CTLVfs = 10 - VmUvmexp = 4 // get uvmexp - VfsGeneric = 0 - VfsBcacheStat = 3 -) - -const ( - sizeOfUvmexp = C.sizeof_struct_uvmexp - sizeOfBcachestats = C.sizeof_struct_bcachestats -) - -type Uvmexp C.struct_uvmexp -type Bcachestats C.struct_bcachestats diff --git a/vendor/github.com/shirou/gopsutil/process/types_darwin.go b/vendor/github.com/shirou/gopsutil/process/types_darwin.go deleted file mode 100644 index 21216cd09a77..000000000000 --- a/vendor/github.com/shirou/gopsutil/process/types_darwin.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Hand Writing -// - all pointer in ExternProc to uint64 - -// +build ignore - -/* -Input to cgo -godefs. -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ -// +godefs map struct_ [16]byte /* in6_addr */ - -package process - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -struct ucred_queue { - struct ucred *tqe_next; - struct ucred **tqe_prev; - TRACEBUF -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type UGid_t C.gid_t - -type KinfoProc C.struct_kinfo_proc - -type Eproc C.struct_eproc - -type Proc C.struct_proc - -type Session C.struct_session - -type ucred C.struct_ucred - -type Uucred C.struct__ucred - -type Upcred C.struct__pcred - -type Vmspace C.struct_vmspace - -type Sigacts C.struct_sigacts - -type ExternProc C.struct_extern_proc - -type Itimerval C.struct_itimerval - -type Vnode C.struct_vnode - -type Pgrp C.struct_pgrp - -type UserStruct C.struct_user - -type Au_session C.struct_au_session - -type Posix_cred C.struct_posix_cred - -type Label C.struct_label - -type AuditinfoAddr C.struct_auditinfo_addr -type AuMask C.struct_au_mask -type AuTidAddr C.struct_au_tid_addr - -// TAILQ(ucred) -type UcredQueue C.struct_ucred_queue diff --git a/vendor/github.com/shirou/gopsutil/process/types_freebsd.go b/vendor/github.com/shirou/gopsutil/process/types_freebsd.go deleted file mode 100644 index aa7b3462de69..000000000000 --- a/vendor/github.com/shirou/gopsutil/process/types_freebsd.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build ignore - -// We still need editing by hands. -// go tool cgo -godefs types_freebsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_freebsd_amd64.go - -/* -Input to cgo -godefs. -*/ - -// +godefs map struct_pargs int64 /* pargs */ -// +godefs map struct_proc int64 /* proc */ -// +godefs map struct_user int64 /* user */ -// +godefs map struct_vnode int64 /* vnode */ -// +godefs map struct_vnode int64 /* vnode */ -// +godefs map struct_filedesc int64 /* filedesc */ -// +godefs map struct_vmspace int64 /* vmspace */ -// +godefs map struct_pcb int64 /* pcb */ -// +godefs map struct_thread int64 /* thread */ -// +godefs map struct___sigset [16]byte /* sigset */ - -package process - -/* -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - CTLKern = 1 // "high kernel": proc, limits - KernProc = 14 // struct: process entries - KernProcPID = 1 // by process id - KernProcProc = 8 // only return procs - KernProcPathname = 12 // path to executable - KernProcArgs = 7 // get/set arguments/proctitle -) - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong -) - -const ( - sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry - sizeOfKinfoProc = C.sizeof_struct_kinfo_proc -) - -// from sys/proc.h -const ( - SIDL = 1 /* Process being created by fork. */ - SRUN = 2 /* Currently runnable. */ - SSLEEP = 3 /* Sleeping on an address. */ - SSTOP = 4 /* Process debugging or suspension. */ - SZOMB = 5 /* Awaiting collection by parent. */ - SWAIT = 6 /* Waiting for interrupt. */ - SLOCK = 7 /* Blocked on a lock. */ -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type KinfoProc C.struct_kinfo_proc - -type Priority C.struct_priority - -type KinfoVmentry C.struct_kinfo_vmentry diff --git a/vendor/github.com/shirou/gopsutil/process/types_openbsd.go b/vendor/github.com/shirou/gopsutil/process/types_openbsd.go deleted file mode 100644 index 09ac590288a4..000000000000 --- a/vendor/github.com/shirou/gopsutil/process/types_openbsd.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build ignore - -// We still need editing by hands. -// go tool cgo -godefs types_openbsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_openbsd_amd64.go - -/* -Input to cgo -godefs. -*/ - -// +godefs map struct_pargs int64 /* pargs */ -// +godefs map struct_proc int64 /* proc */ -// +godefs map struct_user int64 /* user */ -// +godefs map struct_vnode int64 /* vnode */ -// +godefs map struct_vnode int64 /* vnode */ -// +godefs map struct_filedesc int64 /* filedesc */ -// +godefs map struct_vmspace int64 /* vmspace */ -// +godefs map struct_pcb int64 /* pcb */ -// +godefs map struct_thread int64 /* thread */ -// +godefs map struct___sigset [16]byte /* sigset */ - -package process - -/* -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - CTLKern = 1 // "high kernel": proc, limits - KernProc = 66 // struct: process entries - KernProcAll = 0 - KernProcPID = 1 // by process id - KernProcProc = 8 // only return procs - KernProcPathname = 12 // path to executable - KernProcArgs = 55 // get/set arguments/proctitle - KernProcArgv = 1 - KernProcEnv = 3 -) - -const ( - ArgMax = 256 * 1024 // sys/syslimits.h:#define ARG_MAX -) - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong -) - -const ( - sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry - sizeOfKinfoProc = C.sizeof_struct_kinfo_proc -) - -// from sys/proc.h -const ( - SIDL = 1 /* Process being created by fork. */ - SRUN = 2 /* Currently runnable. */ - SSLEEP = 3 /* Sleeping on an address. */ - SSTOP = 4 /* Process debugging or suspension. */ - SZOMB = 5 /* Awaiting collection by parent. */ - SDEAD = 6 /* Thread is almost gone */ - SONPROC = 7 /* Thread is currently on a CPU. */ -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type KinfoProc C.struct_kinfo_proc - -type Priority C.struct_priority - -type KinfoVmentry C.struct_kinfo_vmentry diff --git a/vendor/github.com/ugorji/go/codec/xml.go b/vendor/github.com/ugorji/go/codec/xml.go deleted file mode 100644 index 19fc36caf3cc..000000000000 --- a/vendor/github.com/ugorji/go/codec/xml.go +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build ignore - -package codec - -import "reflect" - -/* - -A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder. - -We are attempting this due to perceived issues with encoding/xml: - - Complicated. It tried to do too much, and is not as simple to use as json. - - Due to over-engineering, reflection is over-used AND performance suffers: - java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/ - even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html - -codec framework will offer the following benefits - - VASTLY improved performance (when using reflection-mode or codecgen) - - simplicity and consistency: with the rest of the supported formats - - all other benefits of codec framework (streaming, codegeneration, etc) - -codec is not a drop-in replacement for encoding/xml. -It is a replacement, based on the simplicity and performance of codec. -Look at it like JAXB for Go. - -Challenges: - - Need to output XML preamble, with all namespaces at the right location in the output. - - Each "end" block is dynamic, so we need to maintain a context-aware stack - - How to decide when to use an attribute VS an element - - How to handle chardata, attr, comment EXPLICITLY. - - Should it output fragments? - e.g. encoding a bool should just output true OR false, which is not well-formed XML. - -Extend the struct tag. See representative example: - type X struct { - ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"` - // format: [namespace-uri ][namespace-prefix ]local-name, ... - } - -Based on this, we encode - - fields as elements, BUT - encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string) - - text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata". - -To handle namespaces: - - XMLHandle is denoted as being namespace-aware. - Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name. - - *Encoder and *Decoder know whether the Handle "prefers" namespaces. - - add *Encoder.getEncName(*structFieldInfo). - No one calls *structFieldInfo.indexForEncName directly anymore - - OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware - indexForEncName takes a parameter of the form namespace:local-name OR local-name - - add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc - by being a method on *Decoder, or maybe a method on the Handle itself. - No one accesses .encName anymore - - let encode.go and decode.go use these (for consistency) - - only problem exists for gen.go, where we create a big switch on encName. - Now, we also have to add a switch on strings.endsWith(kName, encNsName) - - gen.go will need to have many more methods, and then double-on the 2 switch loops like: - switch k { - case "abc" : x.abc() - case "def" : x.def() - default { - switch { - case !nsAware: panic(...) - case strings.endsWith(":abc"): x.abc() - case strings.endsWith(":def"): x.def() - default: panic(...) - } - } - } - -The structure below accommodates this: - - type typeInfo struct { - sfi []*structFieldInfo // sorted by encName - sfins // sorted by namespace - sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately. - sfip // unsorted - } - type structFieldInfo struct { - encName - nsEncName - ns string - attr bool - cdata bool - } - -indexForEncName is now an internal helper function that takes a sorted array -(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...) - -There will be a separate parser from the builder. -The parser will have a method: next() xmlToken method. It has lookahead support, -so you can pop multiple tokens, make a determination, and push them back in the order popped. -This will be needed to determine whether we are "nakedly" decoding a container or not. -The stack will be implemented using a slice and push/pop happens at the [0] element. - -xmlToken has fields: - - type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text - - value string - - ns string - -SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL - -The following are skipped when parsing: - - External Entities (from external file) - - Notation Declaration e.g. - - Entity Declarations & References - - XML Declaration (assume UTF-8) - - XML Directive i.e. - - Other Declarations: Notation, etc. - - Comment - - Processing Instruction - - schema / DTD for validation: - We are not a VALIDATING parser. Validation is done elsewhere. - However, some parts of the DTD internal subset are used (SEE BELOW). - For Attribute List Declarations e.g. - - We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED) - -The following XML features are supported - - Namespace - - Element - - Attribute - - cdata - - Unicode escape - -The following DTD (when as an internal sub-set) features are supported: - - Internal Entities e.g. - AND entities for the set: [<>&"'] - - Parameter entities e.g. - - -At decode time, a structure containing the following is kept - - namespace mapping - - default attribute values - - all internal entities (<>&"' and others written in the document) - -When decode starts, it parses XML namespace declarations and creates a map in the -xmlDecDriver. While parsing, that map continuously gets updated. -The only problem happens when a namespace declaration happens on the node that it defines. -e.g. -To handle this, each Element must be fully parsed at a time, -even if it amounts to multiple tokens which are returned one at a time on request. - -xmlns is a special attribute name. - - It is used to define namespaces, including the default - - It is never returned as an AttrKey or AttrVal. - *We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.* - -Number, bool, null, mapKey, etc can all be decoded from any xmlToken. -This accommodates map[int]string for example. - -It should be possible to create a schema from the types, -or vice versa (generate types from schema with appropriate tags). -This is however out-of-scope from this parsing project. - -We should write all namespace information at the first point that it is referenced in the tree, -and use the mapping for all child nodes and attributes. This means that state is maintained -at a point in the tree. This also means that calls to Decode or MustDecode will reset some state. - -When decoding, it is important to keep track of entity references and default attribute values. -It seems these can only be stored in the DTD components. We should honor them when decoding. - -Configuration for XMLHandle will look like this: - - XMLHandle - DefaultNS string - // Encoding: - NS map[string]string // ns URI to key, used for encoding - // Decoding: in case ENTITY declared in external schema or dtd, store info needed here - Entities map[string]string // map of entity rep to character - - -During encode, if a namespace mapping is not defined for a namespace found on a struct, -then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict -with any other namespace mapping). - -Note that different fields in a struct can have different namespaces. -However, all fields will default to the namespace on the _struct field (if defined). - -An XML document is a name, a map of attributes and a list of children. -Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example). -We have to "DecodeNaked" into something that resembles XML data. - -To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types: - type Name struct { // Preferred. Less allocations due to conversions. - Local string - Space string - } - type Element struct { - Name Name - Attrs map[Name]string - Children []interface{} // each child is either *Element or string - } -Only two "supporting" types are exposed for XML: Name and Element. - -// ------------------ - -We considered 'type Name string' where Name is like "Space Local" (space-separated). -We decided against it, because each creation of a name would lead to -double allocation (first convert []byte to string, then concatenate them into a string). -The benefit is that it is faster to read Attrs from a map. But given that Element is a value -object, we want to eschew methods and have public exposed variables. - -We also considered the following, where xml types were not value objects, and we used -intelligent accessor methods to extract information and for performance. -*** WE DECIDED AGAINST THIS. *** - type Attr struct { - Name Name - Value string - } - // Element is a ValueObject: There are no accessor methods. - // Make element self-contained. - type Element struct { - Name Name - attrsMap map[string]string // where key is "Space Local" - attrs []Attr - childrenT []string - childrenE []Element - childrenI []int // each child is a index into T or E. - } - func (x *Element) child(i) interface{} // returns string or *Element - -// ------------------ - -Per XML spec and our default handling, white space is always treated as -insignificant between elements, except in a text node. The xml:space='preserve' -attribute is ignored. - -**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.** -**So treat them as just "directives" that should be interpreted to mean something**. - -On encoding, we support indenting aka prettifying markup in the same way we support it for json. - -A document or element can only be encoded/decoded from/to a struct. In this mode: - - struct name maps to element name (or tag-info from _struct field) - - fields are mapped to child elements or attributes - -A map is either encoded as attributes on current element, or as a set of child elements. -Maps are encoded as attributes iff their keys and values are primitives (number, bool, string). - -A list is encoded as a set of child elements. - -Primitives (number, bool, string) are encoded as an element, attribute or text -depending on the context. - -Extensions must encode themselves as a text string. - -Encoding is tough, specifically when encoding mappings, because we need to encode -as either attribute or element. To do this, we need to default to encoding as attributes, -and then let Encoder inform the Handle when to start encoding as nodes. -i.e. Encoder does something like: - - h.EncodeMapStart() - h.Encode(), h.Encode(), ... - h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal - h.Encode(), h.Encode(), ... - h.EncodeEnd() - -Only XMLHandle understands this, and will set itself to start encoding as elements. - -This support extends to maps. For example, if a struct field is a map, and it has -the struct tag signifying it should be attr, then all its fields are encoded as attributes. -e.g. - - type X struct { - M map[string]int `codec:"m,attr"` // encode keys as attributes named - } - -Question: - - if encoding a map, what if map keys have spaces in them??? - Then they cannot be attributes or child elements. Error. - -Options to consider adding later: - - For attribute values, normalize by trimming beginning and ending white space, - and converting every white space sequence to a single space. - - ATTLIST restrictions are enforced. - e.g. default value of xml:space, skipping xml:XYZ style attributes, etc. - - Consider supporting NON-STRICT mode (e.g. to handle HTML parsing). - Some elements e.g. br, hr, etc need not close and should be auto-closed - ... (see http://www.w3.org/TR/html4/loose.dtd) - An expansive set of entities are pre-defined. - - Have easy way to create a HTML parser: - add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose, - and add HTML Entities to the list. - - Support validating element/attribute XMLName before writing it. - Keep this behind a flag, which is set to false by default (for performance). - type XMLHandle struct { - CheckName bool - } - -Misc: - -ROADMAP (1 weeks): - - build encoder (1 day) - - build decoder (based off xmlParser) (1 day) - - implement xmlParser (2 days). - Look at encoding/xml for inspiration. - - integrate and TEST (1 days) - - write article and post it (1 day) - -// ---------- MORE NOTES FROM 2017-11-30 ------------ - -when parsing -- parse the attributes first -- then parse the nodes - -basically: -- if encoding a field: we use the field name for the wrapper -- if encoding a non-field, then just use the element type name - - map[string]string ==> abcval... or - val... OR - val1val2... <- PREFERED - []string ==> v1v2... - string v1 ==> v1 - bool true ==> true - float 1.0 ==> 1.0 - ... - - F1 map[string]string ==> abcval... OR - val... OR - val... <- PREFERED - F2 []string ==> v1v2... - F3 bool ==> true - ... - -- a scalar is encoded as: - (value) of type T ==> - (value) of field F ==> -- A kv-pair is encoded as: - (key,value) ==> OR - (key,value) of field F ==> OR -- A map or struct is just a list of kv-pairs -- A list is encoded as sequences of same node e.g. - - - value21 - value22 -- we may have to singularize the field name, when entering into xml, - and pluralize them when encoding. -- bi-directional encode->decode->encode is not a MUST. - even encoding/xml cannot decode correctly what was encoded: - - see https://play.golang.org/p/224V_nyhMS - func main() { - fmt.Println("Hello, playground") - v := []interface{}{"hello", 1, true, nil, time.Now()} - s, err := xml.Marshal(v) - fmt.Printf("err: %v, \ns: %s\n", err, s) - var v2 []interface{} - err = xml.Unmarshal(s, &v2) - fmt.Printf("err: %v, \nv2: %v\n", err, v2) - type T struct { - V []interface{} - } - v3 := T{V: v} - s, err = xml.Marshal(v3) - fmt.Printf("err: %v, \ns: %s\n", err, s) - var v4 T - err = xml.Unmarshal(s, &v4) - fmt.Printf("err: %v, \nv4: %v\n", err, v4) - } - Output: - err: , - s: hello1true - err: , - v2: [] - err: , - s: hello1true2009-11-10T23:00:00Z - err: , - v4: {[ ]} -- -*/ - -// ----------- PARSER ------------------- - -type xmlTokenType uint8 - -const ( - _ xmlTokenType = iota << 1 - xmlTokenElemStart - xmlTokenElemEnd - xmlTokenAttrKey - xmlTokenAttrVal - xmlTokenText -) - -type xmlToken struct { - Type xmlTokenType - Value string - Namespace string // blank for AttrVal and Text -} - -type xmlParser struct { - r decReader - toks []xmlToken // list of tokens. - ptr int // ptr into the toks slice - done bool // nothing else to parse. r now returns EOF. -} - -func (x *xmlParser) next() (t *xmlToken) { - // once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish) - if !x.done && len(x.toks) == 0 { - x.nextTag() - } - // parses one element at a time (into possible many tokens) - if x.ptr < len(x.toks) { - t = &(x.toks[x.ptr]) - x.ptr++ - if x.ptr == len(x.toks) { - x.ptr = 0 - x.toks = x.toks[:0] - } - } - return -} - -// nextTag will parses the next element and fill up toks. -// It set done flag if/once EOF is reached. -func (x *xmlParser) nextTag() { - // TODO: implement. -} - -// ----------- ENCODER ------------------- - -type xmlEncDriver struct { - e *Encoder - w encWriter - h *XMLHandle - b [64]byte // scratch - bs []byte // scratch - // s jsonStack - noBuiltInTypes -} - -// ----------- DECODER ------------------- - -type xmlDecDriver struct { - d *Decoder - h *XMLHandle - r decReader // *bytesDecReader decReader - ct valueType // container type. one of unset, array or map. - bstr [8]byte // scratch used for string \UXXX parsing - b [64]byte // scratch - - // wsSkipped bool // whitespace skipped - - // s jsonStack - - noBuiltInTypes -} - -// DecodeNaked will decode into an XMLNode - -// XMLName is a value object representing a namespace-aware NAME -type XMLName struct { - Local string - Space string -} - -// XMLNode represents a "union" of the different types of XML Nodes. -// Only one of fields (Text or *Element) is set. -type XMLNode struct { - Element *Element - Text string -} - -// XMLElement is a value object representing an fully-parsed XML element. -type XMLElement struct { - Name Name - Attrs map[XMLName]string - // Children is a list of child nodes, each being a *XMLElement or string - Children []XMLNode -} - -// ----------- HANDLE ------------------- - -type XMLHandle struct { - BasicHandle - textEncodingType - - DefaultNS string - NS map[string]string // ns URI to key, for encoding - Entities map[string]string // entity representation to string, for encoding. -} - -func (h *XMLHandle) newEncDriver(e *Encoder) encDriver { - return &xmlEncDriver{e: e, w: e.w, h: h} -} - -func (h *XMLHandle) newDecDriver(d *Decoder) decDriver { - // d := xmlDecDriver{r: r.(*bytesDecReader), h: h} - hd := xmlDecDriver{d: d, r: d.r, h: h} - hd.n.bytes = d.b[:] - return &hd -} - -func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) -} - -var _ decDriver = (*xmlDecDriver)(nil) -var _ encDriver = (*xmlEncDriver)(nil) diff --git a/vendor/github.com/ulikunitz/xz/example.go b/vendor/github.com/ulikunitz/xz/example.go deleted file mode 100644 index 855e60aee55e..000000000000 --- a/vendor/github.com/ulikunitz/xz/example.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "io" - "log" - "os" - - "github.com/ulikunitz/xz" -) - -func main() { - const text = "The quick brown fox jumps over the lazy dog.\n" - var buf bytes.Buffer - // compress text - w, err := xz.NewWriter(&buf) - if err != nil { - log.Fatalf("xz.NewWriter error %s", err) - } - if _, err := io.WriteString(w, text); err != nil { - log.Fatalf("WriteString error %s", err) - } - if err := w.Close(); err != nil { - log.Fatalf("w.Close error %s", err) - } - // decompress buffer and write output to stdout - r, err := xz.NewReader(&buf) - if err != nil { - log.Fatalf("NewReader error %s", err) - } - if _, err = io.Copy(os.Stdout, r); err != nil { - log.Fatalf("io.Copy error %s", err) - } -} diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go deleted file mode 100644 index 4548b993dbcc..000000000000 --- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. -//This program must be run after mksyscall.go. -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "strings" -) - -func main() { - in1, err := ioutil.ReadFile("syscall_darwin.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.go: %s", err) - } - arch := os.Args[1] - in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) - } - in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) - } - in := string(in1) + string(in2) + string(in3) - - trampolines := map[string]bool{} - - var out bytes.Buffer - - fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) - fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "// +build go1.12\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "#include \"textflag.h\"\n") - for _, line := range strings.Split(in, "\n") { - if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { - continue - } - fn := line[5 : len(line)-13] - if !trampolines[fn] { - trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) - fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) - } - } - err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) - if err != nil { - log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) - } -} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index eb4332059aef..000000000000 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs; see README.md. -package main - -import ( - "bytes" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check that we are using the Docker-based build system if we should be. - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") - os.Stderr.WriteString("See README.md\n") - os.Exit(1) - } - } - - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - if goos == "aix" { - // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t - // to avoid having both StTimespec and Timespec. - sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) - b = sttimespec.ReplaceAll(b, []byte("Timespec")) - } - - // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) - - // Intentionally export __fds_bits field in FdSet - fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) - b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) - - // If we have empty Ptrace structs, we should delete them. Only s390x emits - // nonempty Ptrace structs. - ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) - b = ptraceRexexp.ReplaceAll(b, nil) - - // Replace the control_regs union with a blank identifier for now. - controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) - b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) - - // Remove fields that are added by glibc - // Note that this is unstable as the identifers are private. - removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Convert [65]int8 to [65]byte in Utsname members to simplify - // conversion to string; see golang.org/issue/20753 - convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) - b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) - - // Convert [1024]int8 to [1024]byte in Ptmget members - convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) - b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) - - // Remove spare fields (e.g. in Statx_t) - spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) - b = spareFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove cgo padding fields - removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) - b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove the first line of warning from cgo - b = b[bytes.IndexByte(b, '\n')+1:] - // Modify the command in the header to include: - // mkpost, our own warning, and a build tag. - replacement := fmt.Sprintf(`$1 | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s,%s`, goarch, goos) - cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) - b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) - - // Rename Stat_t time fields - if goos == "freebsd" && goarch == "386" { - // Hide Stat_t.[AMCB]tim_ext fields - renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) - b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) - } - renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) - b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) - - // gofmt - b, err = format.Source(b) - if err != nil { - log.Fatal(err) - } - - os.Stdout.Write(b) -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go deleted file mode 100644 index e4af9424e978..000000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_darwin.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named errno. - -A line beginning with //sysnb is like //sys, except that the -goroutine will not be suspended during the execution of the system -call. This must only be used for system calls which can never -block, as otherwise the system call could cause all goroutines to -hang. -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - plan9 = flag.Bool("plan9", false, "plan9") - openbsd = flag.Bool("openbsd", false, "openbsd") - netbsd = flag.Bool("netbsd", false, "netbsd") - dragonfly = flag.Bool("dragonfly", false, "dragonfly") - arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair - tags = flag.String("tags", "", "build tags") - filename = flag.String("output", "", "output file name (standard output if omitted)") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - if goos == "" { - fmt.Fprintln(os.Stderr, "GOOS not defined in environment") - os.Exit(1) - } - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - - // Check that we are using the Docker-based build system if we should - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") - fmt.Fprintf(os.Stderr, "See README.md\n") - os.Exit(1) - } - } - - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - libc := false - if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") { - libc = true - } - trampolines := map[string]bool{} - - text := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, errno error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, sysname := f[2], f[3], f[4], f[5] - - // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. - if goos == "darwin" && !libc && funct == "ClockGettime" { - continue - } - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Go function header. - outDecl := "" - if len(out) > 0 { - outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - break - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass dummy pointer in that case. - // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) - text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && (*openbsd || *netbsd) { - args = append(args, "0") - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if p.Type == "int64" && *dragonfly { - if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { - if len(args)%2 == 1 && *arm { - // arm abi specifies 64-bit argument uses - // (even, odd) pair - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - - // Determine which form to use; pad args with zeros. - asm := "Syscall" - if nonblock != nil { - if errvar == "" && goos == "linux" { - asm = "RawSyscallNoError" - } else { - asm = "RawSyscall" - } - } else { - if errvar == "" && goos == "linux" { - asm = "SyscallNoError" - } - } - if len(args) <= 3 { - for len(args) < 3 { - args = append(args, "0") - } - } else if len(args) <= 6 { - asm += "6" - for len(args) < 6 { - args = append(args, "0") - } - } else if len(args) <= 9 { - asm += "9" - for len(args) < 9 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) - } - - // System call number. - if sysname == "" { - sysname = "SYS_" + funct - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToUpper(sysname) - } - - var libcFn string - if libc { - asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call - sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ - sysname = strings.ToLower(sysname) // lowercase - if sysname == "getdirentries64" { - // Special case - libSystem name and - // raw syscall name don't match. - sysname = "__getdirentries64" - } - libcFn = sysname - sysname = "funcPC(libc_" + sysname + "_trampoline)" - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" && !*plan9 { - reg = "e1" - ret[2] = reg - doErrno = true - } else if p.Name == "err" && *plan9 { - ret[0] = "r0" - ret[2] = "e1" - break - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" || *plan9 { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - if errvar == "" && goos == "linux" { - // raw syscall without error on Linux, see golang.org/issue/22924 - text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - } - text += body - - if *plan9 && ret[2] == "e1" { - text += "\tif int32(r0) == -1 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } else if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = errnoErr(e1)\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n\n" - - if libc && !trampolines[libcFn] { - // some system calls share a trampoline, like read and readlen. - trampolines[libcFn] = true - // Declare assembly trampoline. - text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) - // Assembly trampoline calls the libc_* function, which this magic - // redirects to use the function from libSystem. - text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) - text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) - text += "\n" - } - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go deleted file mode 100644 index 3be3cdfc3b6e..000000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - cExtern := "/*\n#include \n#include \n" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Check if value return, err return available - errvar := "" - retvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - retvar = p.Name - rettype = p.Type - } - } - - // System call name. - if sysname == "" { - sysname = funct - } - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // Change p.Types to c - var cIn []string - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - cIn = append(cIn, "int") - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - - // So file name. - if *aix { - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - } - - strconvfunc := "C.CString" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if text != "" { - text += "\n" - } - - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments to Syscall. - var args []string - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) - n++ - text += fmt.Sprintf("\tvar _p%d int\n", n) - text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - n++ - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("_p%d", n)) - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "unsafe.Pointer" { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "int" { - if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { - args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) - } else if argN == 0 && funct == "fcntl" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := "" - if sysname == "exit" { - if errvar != "" { - call += "er :=" - } else { - call += "" - } - } else if errvar != "" { - call += "r0,er :=" - } else if retvar != "" { - call += "r0,_ :=" - } else { - call += "" - } - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) - } else { - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) - } - - // Assign return values. - body := "" - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - } else { - reg = "r0" - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - - // verify return - if sysname != "exit" && errvar != "" { - if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { - body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } else { - body += "\tif (r0 ==-1 && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - } else if errvar != "" { - body += "\tif (er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - - text += fmt.Sprintf("\t%s\n", call) - text += body - - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - - -%s -*/ -import "C" -import ( - "unsafe" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go deleted file mode 100644 index c960099517af..000000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - - -This program will generate three files and handle both gc and gccgo implementation: - - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) - - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 - - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. - - The generated code looks like this - -zsyscall_aix_ppc64.go -func asyscall(...) (n int, err error) { - // Pointer Creation - r1, e1 := callasyscall(...) - // Type Conversion - // Error Handler - return -} - -zsyscall_aix_ppc64_gc.go -//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" -//go:linkname libc_asyscall libc_asyscall -var asyscall syscallFunc - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) - return -} - -zsyscall_aix_ppc64_ggcgo.go - -// int asyscall(...) - -import "C" - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.asyscall(...)) - e1 = syscall.GetErrno() - return -} -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - // GCCGO - textgccgo := "" - cExtern := "/*\n#include \n" - // GC - textgc := "" - dynimports := "" - linknames := "" - var vars []string - // COMMON - textcommon := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - if sysname == "" { - sysname = funct - } - - onlyCommon := false - if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { - // This function call another syscall which is already implemented. - // Therefore, the gc and gccgo part must not be generated. - onlyCommon = true - } - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - - textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - if !onlyCommon { - textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - } - - // Check if value return, err return available - errvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - rettype = p.Type - } - } - - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // GCCGO Prototype return type - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // GCCGO Prototype arguments type - var cIn []string - for i, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - if (i == 0 || i == 2) && funct == "fcntl" { - // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock - cIn = append(cIn, "uintptr_t") - } else { - cIn = append(cIn, "int") - } - - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if !onlyCommon { - // GCCGO Prototype Generation - // Imports of system calls from libc - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - // GC Library name - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - sysvarname := fmt.Sprintf("libc_%s", sysname) - - if !onlyCommon { - // GC Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) - // GC Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) - // GC Library proc address variable. - vars = append(vars, sysvarname) - } - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if textcommon != "" { - textcommon += "\n" - } - - textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments tocall. - var argscommon []string // Arguments in the common part - var argscall []string // Arguments for call prototype - var argsgc []string // Arguments for gc call (with syscall6) - var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "string" && errvar != "" { - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") - } else if p.Type == "bool" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "int" { - if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { - // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - - } else { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - nargs := len(argsgc) - - // COMMON function generation - argscommonlist := strings.Join(argscommon, ", ") - callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) - ret := []string{"_", "_"} - body := "" - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[1] = reg - doErrno = true - } else { - reg = "r0" - ret[0] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" { - textcommon += fmt.Sprintf("\t%s\n", callcommon) - } else { - textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) - } - textcommon += body - - if doErrno { - textcommon += "\tif e1 != 0 {\n" - textcommon += "\t\terr = errnoErr(e1)\n" - textcommon += "\t}\n" - } - textcommon += "\treturn\n" - textcommon += "}\n" - - if onlyCommon { - continue - } - - // CALL Prototype - callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) - - // GC function generation - asm := "syscall6" - if nonblock != nil { - asm = "rawSyscall6" - } - - if len(argsgc) <= 6 { - for len(argsgc) < 6 { - argsgc = append(argsgc, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) - os.Exit(1) - } - argsgclist := strings.Join(argsgc, ", ") - callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) - - textgc += callProto - textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) - textgc += "\treturn\n}\n" - - // GCCGO function generation - argsgccgolist := strings.Join(argsgccgo, ", ") - var callgccgo string - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) - } else { - callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) - } - textgccgo += callProto - textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) - textgccgo += "\te1 = syscall.GetErrno()\n" - textgccgo += "\treturn\n}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - - // Print zsyscall_aix_ppc64.go - err := ioutil.WriteFile("zsyscall_aix_ppc64.go", - []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gc.go - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", - []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gccgo.go - err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", - []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } -} - -const srcTemplate1 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "unsafe" -) - - -%s - -%s -` -const srcTemplate2 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build !gccgo - -package %s - -import ( - "unsafe" -) -%s -%s -%s -type syscallFunc uintptr - -var ( -%s -) - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -%s -` -const srcTemplate3 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build gccgo - -package %s - -%s -*/ -import "C" -import ( - "syscall" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go deleted file mode 100644 index 3d864738b695..000000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* - This program reads a file containing function prototypes - (like syscall_solaris.go) and generates system call bodies. - The prototypes are marked by lines beginning with "//sys" - and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - dynimports := "" - linknames := "" - var vars []string - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // So file name. - if modname == "" { - modname = "libc" - } - - // System call name. - if sysname == "" { - sysname = funct - } - - // System call pointer variable name. - sysvarname := fmt.Sprintf("proc%s", sysname) - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) - // Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) - // Library proc address variable. - vars = append(vars, sysvarname) - - // Go function header. - outlist := strings.Join(out, ", ") - if outlist != "" { - outlist = fmt.Sprintf(" (%s)", outlist) - } - if text != "" { - text += "\n" - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - continue - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) - n++ - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - nargs := len(args) - - // Determine which form to use; pad args with zeros. - asm := "sysvicall6" - if nonblock != nil { - asm = "rawSysvicall6" - } - if len(args) <= 6 { - for len(args) < 6 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) - os.Exit(1) - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[2] = reg - doErrno = true - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%d != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) - os.Exit(1) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - text += body - - if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "syscall" - "unsafe" -) -%s -%s -%s -var ( -%s -) - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go deleted file mode 100644 index b6b409909cc3..000000000000 --- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. -// -// Build a MIB with each entry being an array containing the level, type and -// a hash that will contain additional entries if the current entry is a node. -// We then walk this MIB and create a flattened sysctl name to OID hash. - -package main - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments. -func cmdLine() string { - return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags. -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -// reMatch performs regular expression match and stores the substring slice to value pointed by m. -func reMatch(re *regexp.Regexp, str string, m *[]string) bool { - *m = re.FindStringSubmatch(str) - if *m != nil { - return true - } - return false -} - -type nodeElement struct { - n int - t string - pE *map[string]nodeElement -} - -var ( - debugEnabled bool - mib map[string]nodeElement - node *map[string]nodeElement - nodeMap map[string]string - sysCtl []string -) - -var ( - ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) - ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) - ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) - netInetRE = regexp.MustCompile(`^netinet/`) - netInet6RE = regexp.MustCompile(`^netinet6/`) - netRE = regexp.MustCompile(`^net/`) - bracesRE = regexp.MustCompile(`{.*}`) - ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) - fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) -) - -func debug(s string) { - if debugEnabled { - fmt.Fprintln(os.Stderr, s) - } -} - -// Walk the MIB and build a sysctl name to OID mapping. -func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { - lNode := pNode // local copy of pointer to node - var keys []string - for k := range *lNode { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, key := range keys { - nodename := name - if name != "" { - nodename += "." - } - nodename += key - - nodeoid := append(oid, (*pNode)[key].n) - - if (*pNode)[key].t == `CTLTYPE_NODE` { - if _, ok := nodeMap[nodename]; ok { - lNode = &mib - ctlName := nodeMap[nodename] - for _, part := range strings.Split(ctlName, ".") { - lNode = ((*lNode)[part]).pE - } - } else { - lNode = (*pNode)[key].pE - } - buildSysctl(lNode, nodename, nodeoid) - } else if (*pNode)[key].t != "" { - oidStr := []string{} - for j := range nodeoid { - oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) - } - text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" - sysCtl = append(sysCtl, text) - } - } -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - mib = make(map[string]nodeElement) - headers := [...]string{ - `sys/sysctl.h`, - `sys/socket.h`, - `sys/tty.h`, - `sys/malloc.h`, - `sys/mount.h`, - `sys/namei.h`, - `sys/sem.h`, - `sys/shm.h`, - `sys/vmmeter.h`, - `uvm/uvmexp.h`, - `uvm/uvm_param.h`, - `uvm/uvm_swap_encrypt.h`, - `ddb/db_var.h`, - `net/if.h`, - `net/if_pfsync.h`, - `net/pipex.h`, - `netinet/in.h`, - `netinet/icmp_var.h`, - `netinet/igmp_var.h`, - `netinet/ip_ah.h`, - `netinet/ip_carp.h`, - `netinet/ip_divert.h`, - `netinet/ip_esp.h`, - `netinet/ip_ether.h`, - `netinet/ip_gre.h`, - `netinet/ip_ipcomp.h`, - `netinet/ip_ipip.h`, - `netinet/pim_var.h`, - `netinet/tcp_var.h`, - `netinet/udp_var.h`, - `netinet6/in6.h`, - `netinet6/ip6_divert.h`, - `netinet6/pim6_var.h`, - `netinet/icmp6.h`, - `netmpls/mpls.h`, - } - - ctls := [...]string{ - `kern`, - `vm`, - `fs`, - `net`, - //debug /* Special handling required */ - `hw`, - //machdep /* Arch specific */ - `user`, - `ddb`, - //vfs /* Special handling required */ - `fs.posix`, - `kern.forkstat`, - `kern.intrcnt`, - `kern.malloc`, - `kern.nchstats`, - `kern.seminfo`, - `kern.shminfo`, - `kern.timecounter`, - `kern.tty`, - `kern.watchdog`, - `net.bpf`, - `net.ifq`, - `net.inet`, - `net.inet.ah`, - `net.inet.carp`, - `net.inet.divert`, - `net.inet.esp`, - `net.inet.etherip`, - `net.inet.gre`, - `net.inet.icmp`, - `net.inet.igmp`, - `net.inet.ip`, - `net.inet.ip.ifq`, - `net.inet.ipcomp`, - `net.inet.ipip`, - `net.inet.mobileip`, - `net.inet.pfsync`, - `net.inet.pim`, - `net.inet.tcp`, - `net.inet.udp`, - `net.inet6`, - `net.inet6.divert`, - `net.inet6.ip6`, - `net.inet6.icmp6`, - `net.inet6.pim6`, - `net.inet6.tcp6`, - `net.inet6.udp6`, - `net.mpls`, - `net.mpls.ifq`, - `net.key`, - `net.pflow`, - `net.pfsync`, - `net.pipex`, - `net.rt`, - `vm.swapencrypt`, - //vfsgenctl /* Special handling required */ - } - - // Node name "fixups" - ctlMap := map[string]string{ - "ipproto": "net.inet", - "net.inet.ipproto": "net.inet", - "net.inet6.ipv6proto": "net.inet6", - "net.inet6.ipv6": "net.inet6.ip6", - "net.inet.icmpv6": "net.inet6.icmp6", - "net.inet6.divert6": "net.inet6.divert", - "net.inet6.tcp6": "net.inet.tcp", - "net.inet6.udp6": "net.inet.udp", - "mpls": "net.mpls", - "swpenc": "vm.swapencrypt", - } - - // Node mappings - nodeMap = map[string]string{ - "net.inet.ip.ifq": "net.ifq", - "net.inet.pfsync": "net.pfsync", - "net.mpls.ifq": "net.ifq", - } - - mCtls := make(map[string]bool) - for _, ctl := range ctls { - mCtls[ctl] = true - } - - for _, header := range headers { - debug("Processing " + header) - file, err := os.Open(filepath.Join("/usr/include", header)) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - var sub []string - if reMatch(ctlNames1RE, s.Text(), &sub) || - reMatch(ctlNames2RE, s.Text(), &sub) || - reMatch(ctlNames3RE, s.Text(), &sub) { - if sub[1] == `CTL_NAMES` { - // Top level. - node = &mib - } else { - // Node. - nodename := strings.ToLower(sub[2]) - ctlName := "" - if reMatch(netInetRE, header, &sub) { - ctlName = "net.inet." + nodename - } else if reMatch(netInet6RE, header, &sub) { - ctlName = "net.inet6." + nodename - } else if reMatch(netRE, header, &sub) { - ctlName = "net." + nodename - } else { - ctlName = nodename - ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) - } - - if val, ok := ctlMap[ctlName]; ok { - ctlName = val - } - if _, ok := mCtls[ctlName]; !ok { - debug("Ignoring " + ctlName + "...") - continue - } - - // Walk down from the top of the MIB. - node = &mib - for _, part := range strings.Split(ctlName, ".") { - if _, ok := (*node)[part]; !ok { - debug("Missing node " + part) - (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} - } - node = (*node)[part].pE - } - } - - // Populate current node with entries. - i := -1 - for !strings.HasPrefix(s.Text(), "}") { - s.Scan() - if reMatch(bracesRE, s.Text(), &sub) { - i++ - } - if !reMatch(ctlTypeRE, s.Text(), &sub) { - continue - } - (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} - } - } - } - err = s.Err() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - file.Close() - } - buildSysctl(&mib, "", []int{}) - - sort.Strings(sysCtl) - text := strings.Join(sysCtl, "") - - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; DO NOT EDIT. - -// +build %s - -package unix - -type mibentry struct { - ctlname string - ctloid []_C_int -} - -var sysctlMib = []mibentry { -%s -} -` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go deleted file mode 100644 index baa6ecd85065..000000000000 --- a/vendor/golang.org/x/sys/unix/mksysnum.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate system call table for DragonFly, NetBSD, -// FreeBSD, OpenBSD or Darwin from master list -// (for example, /usr/src/sys/kern/syscalls.master or -// sys/syscall.h). -package main - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -func checkErr(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -// source string and substring slice for regexp -type re struct { - str string // source string - sub []string // matched sub-string -} - -// Match performs regular expression match -func (r *re) Match(exp string) bool { - r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) - if r.sub != nil { - return true - } - return false -} - -// fetchFile fetches a text file from URL -func fetchFile(URL string) io.Reader { - resp, err := http.Get(URL) - checkErr(err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - checkErr(err) - return strings.NewReader(string(body)) -} - -// readFile reads a text file from path -func readFile(path string) io.Reader { - file, err := os.Open(os.Args[1]) - checkErr(err) - return file -} - -func format(name, num, proto string) string { - name = strings.ToUpper(name) - // There are multiple entries for enosys and nosys, so comment them out. - nm := re{str: name} - if nm.Match(`^SYS_E?NOSYS$`) { - name = fmt.Sprintf("// %s", name) - } - if name == `SYS_SYS_EXIT` { - name = `SYS_EXIT` - } - return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - file := strings.TrimSpace(os.Args[1]) - var syscalls io.Reader - if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { - // Download syscalls.master file - syscalls = fetchFile(file) - } else { - syscalls = readFile(file) - } - - var text, line string - s := bufio.NewScanner(syscalls) - for s.Scan() { - t := re{str: line} - if t.Match(`^(.*)\\$`) { - // Handle continuation - line = t.sub[1] - line += strings.TrimLeft(s.Text(), " \t") - } else { - // New line - line = s.Text() - } - t = re{str: line} - if t.Match(`\\$`) { - continue - } - t = re{str: line} - - switch goos { - case "dragonfly": - if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "openbsd": - if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { - num, proto, name := t.sub[1], t.sub[3], t.sub[4] - text += format(name, num, proto) - } - case "netbsd": - if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { - num, proto, compat := t.sub[1], t.sub[6], t.sub[8] - name := t.sub[7] + "_" + t.sub[9] - if t.sub[11] != "" { - name = t.sub[7] + "_" + t.sub[11] - } - name = strings.ToUpper(name) - if compat == "" || compat == "13" || compat == "30" || compat == "50" { - text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) - } - } - case "darwin": - if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { - name, num := t.sub[1], t.sub[2] - name = strings.ToUpper(name) - text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) - } - default: - fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) - os.Exit(1) - - } - } - err := s.Err() - checkErr(err) - - fmt.Printf(template, cmdLine(), buildTags(), text) -} - -const template = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -const( -%s)` diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go deleted file mode 100644 index 40d2beede556..000000000000 --- a/vendor/golang.org/x/sys/unix/types_aix.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore -// +build aix - -/* -Input to cgo -godefs. See also mkerrors.sh and mkall.sh -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - - -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type off64 C.off64_t -type off C.off_t -type Mode_t C.mode_t - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Timex C.struct_timex - -type Time_t C.time_t - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -type Timezone C.struct_timezone - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit64 - -type Pid_t C.pid_t - -type _Gid_t C.gid_t - -type dev_t C.dev_t - -// Files - -type Stat_t C.struct_stat - -type StatxTimestamp C.struct_statx_timestamp - -type Statx_t C.struct_statx - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Cmsghdr C.struct_cmsghdr - -type ICMPv6Filter C.struct_icmp6_filter - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type Linger C.struct_linger - -type Msghdr C.struct_msghdr - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr -) - -type IfMsgHdr C.struct_if_msghdr - -// Misc - -type FdSet C.fd_set - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -type Sigset_t C.sigset_t - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -//poll - -type PollFd struct { - Fd int32 - Events uint16 - Revents uint16 -} - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -//flock_t - -type Flock_t C.struct_flock64 - -// Statfs - -type Fsid_t C.struct_fsid_t -type Fsid64_t C.struct_fsid64_t - -type Statfs_t C.struct_statfs - -const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go deleted file mode 100644 index 155c2e692b45..000000000000 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat64 - -type Statfs_t C.struct_statfs64 - -type Flock_t C.struct_flock - -type Fstore_t C.struct_fstore - -type Radvisory_t C.struct_radvisory - -type Fbootstraptransfer_t C.struct_fbootstraptransfer - -type Log2phys_t C.struct_log2phys - -type Fsid C.struct_fsid - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet4Pktinfo C.struct_in_pktinfo - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfmaMsghdr2 C.struct_ifma_msghdr2 - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go deleted file mode 100644 index 3365dd79d082..000000000000 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go deleted file mode 100644 index a121dc3368f9..000000000000 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define _WANT_FREEBSD11_STAT 1 -#define _WANT_FREEBSD11_STATFS 1 -#define _WANT_FREEBSD11_DIRENT 1 -#define _WANT_FREEBSD11_KEVENT 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -// This structure is a duplicate of if_data on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_data8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch - time_t ifi_epoch; -#undef ifi_lastchange - struct timeval ifi_lastchange; -}; - -// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_msghdr8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data8 ifm_data; -}; -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -const ( - _statfsVersion = C.STATFS_VERSION - _dirblksiz = C.DIRBLKSIZ -) - -type Stat_t C.struct_stat - -type stat_freebsd11_t C.struct_freebsd11_stat - -type Statfs_t C.struct_statfs - -type statfs_freebsd11_t C.struct_freebsd11_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type dirent_freebsd11 C.struct_freebsd11_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPMreqn C.struct_ip_mreqn - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPMreqn = C.sizeof_struct_ip_mreqn - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_ATTACH = C.PT_ATTACH - PTRACE_CONT = C.PT_CONTINUE - PTRACE_DETACH = C.PT_DETACH - PTRACE_GETFPREGS = C.PT_GETFPREGS - PTRACE_GETFSBASE = C.PT_GETFSBASE - PTRACE_GETLWPLIST = C.PT_GETLWPLIST - PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS - PTRACE_GETREGS = C.PT_GETREGS - PTRACE_GETXSTATE = C.PT_GETXSTATE - PTRACE_IO = C.PT_IO - PTRACE_KILL = C.PT_KILL - PTRACE_LWPEVENTS = C.PT_LWP_EVENTS - PTRACE_LWPINFO = C.PT_LWPINFO - PTRACE_SETFPREGS = C.PT_SETFPREGS - PTRACE_SETREGS = C.PT_SETREGS - PTRACE_SINGLESTEP = C.PT_STEP - PTRACE_TRACEME = C.PT_TRACE_ME -) - -const ( - PIOD_READ_D = C.PIOD_READ_D - PIOD_WRITE_D = C.PIOD_WRITE_D - PIOD_READ_I = C.PIOD_READ_I - PIOD_WRITE_I = C.PIOD_WRITE_I -) - -const ( - PL_FLAG_BORN = C.PL_FLAG_BORN - PL_FLAG_EXITED = C.PL_FLAG_EXITED - PL_FLAG_SI = C.PL_FLAG_SI -) - -const ( - TRAP_BRKPT = C.TRAP_BRKPT - TRAP_TRACE = C.TRAP_TRACE -) - -type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo - -type __Siginfo C.struct___siginfo - -type Sigset_t C.sigset_t - -type Reg C.struct_reg - -type FpReg C.struct_fpreg - -type PtraceIoDesc C.struct_ptrace_io_desc - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent_freebsd11 - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - sizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 - sizeofIfData = C.sizeof_struct_if_data - SizeofIfData = C.sizeof_struct_if_data8 - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type ifMsghdr C.struct_if_msghdr - -type IfMsghdr C.struct_if_msghdr8 - -type ifData C.struct_if_data - -type IfData C.struct_if_data8 - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr - SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfZbuf C.struct_bpf_zbuf - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfZbufHeader C.struct_bpf_zbuf_header - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLINIGNEOF = C.POLLINIGNEOF - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Capabilities - -type CapRights C.struct_cap_rights - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go deleted file mode 100644 index 4a96d72c37d1..000000000000 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -type Ptmget C.struct_ptmget - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Sysctl - -type Sysctlnode C.struct_sysctlnode - -// Uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go deleted file mode 100644 index 775cb57dc8a8..000000000000 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Signal Sets - -type Sigset_t C.sigset_t - -// Uname - -type Utsname C.struct_utsname - -// Uvmexp - -const SizeofUvmexp = C.sizeof_struct_uvmexp - -type Uvmexp C.struct_uvmexp - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go deleted file mode 100644 index 2b716f93481d..000000000000 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -// Filesystems - -type _Fsblkcnt_t C.fsblkcnt_t - -type Statvfs_t C.struct_statvfs - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Select - -type FdSet C.fd_set - -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfTimeval C.struct_bpf_timeval - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go deleted file mode 100644 index 26cfef9c6b71..000000000000 --- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type registry struct { - XMLName xml.Name `xml:"registry"` - Updated string `xml:"updated"` - Registry []struct { - ID string `xml:"id,attr"` - Record []struct { - Name string `xml:"name"` - Xref []struct { - Type string `xml:"type,attr"` - Data string `xml:"data,attr"` - } `xml:"xref"` - Desc struct { - Data string `xml:",innerxml"` - // Any []struct { - // Data string `xml:",chardata"` - // } `xml:",any"` - // Data string `xml:",chardata"` - } `xml:"description,"` - MIB string `xml:"value"` - Alias []string `xml:"alias"` - MIME string `xml:"preferred_alias"` - } `xml:"record"` - } `xml:"registry"` -} - -func main() { - r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") - reg := ®istry{} - if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { - log.Fatalf("Error decoding charset registry: %v", err) - } - if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { - log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) - } - - w := &bytes.Buffer{} - fmt.Fprintf(w, "const (\n") - for _, rec := range reg.Registry[0].Record { - constName := "" - for _, a := range rec.Alias { - if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { - // Some of the constant definitions have comments in them. Strip those. - constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) - } - } - if constName == "" { - switch rec.MIB { - case "2085": - constName = "HZGB2312" // Not listed as alias for some reason. - default: - log.Fatalf("No cs alias defined for %s.", rec.MIB) - } - } - if rec.MIME != "" { - rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) - } - fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) - if len(rec.Desc.Data) > 0 { - fmt.Fprint(w, "// ") - d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) - inElem := true - attr := "" - for { - t, err := d.Token() - if err != nil { - if err != io.EOF { - log.Fatal(err) - } - break - } - switch x := t.(type) { - case xml.CharData: - attr = "" // Don't need attribute info. - a := bytes.Split([]byte(x), []byte("\n")) - for i, b := range a { - if b = bytes.TrimSpace(b); len(b) != 0 { - if !inElem && i > 0 { - fmt.Fprint(w, "\n// ") - } - inElem = false - fmt.Fprintf(w, "%s ", string(b)) - } - } - case xml.StartElement: - if x.Name.Local == "xref" { - inElem = true - use := false - for _, a := range x.Attr { - if a.Name.Local == "type" { - use = use || a.Value != "person" - } - if a.Name.Local == "data" && use { - // Patch up URLs to use https. From some links, the - // https version is different from the http one. - s := a.Value - s = strings.Replace(s, "http://", "https://", -1) - s = strings.Replace(s, "/unicode/", "/", -1) - attr = s + " " - } - } - } - case xml.EndElement: - inElem = false - fmt.Fprint(w, attr) - } - } - fmt.Fprint(w, "\n") - } - for _, x := range rec.Xref { - switch x.Type { - case "rfc": - fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) - case "uri": - fmt.Fprintf(w, "// Reference: %s\n", x.Data) - } - } - fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) - fmt.Fprintln(w) - } - fmt.Fprintln(w, ")") - - gen.WriteGoFile("mib.go", "identifier", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go deleted file mode 100644 index 987fc169cc04..000000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -var outputFile = flag.String("out", "tables.go", "output file") - -func main() { - gen.Init() - gen.Repackage("gen_trieval.go", "trieval.go", "bidi") - gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") - - genTables() -} - -// bidiClass names and codes taken from class "bc" in -// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt -var bidiClass = map[string]Class{ - "AL": AL, // ArabicLetter - "AN": AN, // ArabicNumber - "B": B, // ParagraphSeparator - "BN": BN, // BoundaryNeutral - "CS": CS, // CommonSeparator - "EN": EN, // EuropeanNumber - "ES": ES, // EuropeanSeparator - "ET": ET, // EuropeanTerminator - "L": L, // LeftToRight - "NSM": NSM, // NonspacingMark - "ON": ON, // OtherNeutral - "R": R, // RightToLeft - "S": S, // SegmentSeparator - "WS": WS, // WhiteSpace - - "FSI": Control, - "PDF": Control, - "PDI": Control, - "LRE": Control, - "LRI": Control, - "LRO": Control, - "RLE": Control, - "RLI": Control, - "RLO": Control, -} - -func genTables() { - if numClass > 0x0F { - log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) - } - w := gen.NewCodeWriter() - defer w.WriteVersionedGoFile(*outputFile, "bidi") - - gen.WriteUnicodeVersion(w) - - t := triegen.NewTrie("bidi") - - // Build data about bracket mapping. These bits need to be or-ed with - // any other bits. - orMask := map[rune]uint64{} - - xorMap := map[rune]int{} - xorMasks := []rune{0} // First value is no-op. - - ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { - r1 := p.Rune(0) - r2 := p.Rune(1) - xor := r1 ^ r2 - if _, ok := xorMap[xor]; !ok { - xorMap[xor] = len(xorMasks) - xorMasks = append(xorMasks, xor) - } - entry := uint64(xorMap[xor]) << xorMaskShift - switch p.String(2) { - case "o": - entry |= openMask - case "c", "n": - default: - log.Fatalf("Unknown bracket class %q.", p.String(2)) - } - orMask[r1] = entry - }) - - w.WriteComment(` - xorMasks contains masks to be xor-ed with brackets to get the reverse - version.`) - w.WriteVar("xorMasks", xorMasks) - - done := map[rune]bool{} - - insert := func(r rune, c Class) { - if !done[r] { - t.Insert(r, orMask[r]|uint64(c)) - done[r] = true - } - } - - // Insert the derived BiDi properties. - ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - class, ok := bidiClass[p.String(1)] - if !ok { - log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) - } - insert(r, class) - }) - visitDefaults(insert) - - // TODO: use sparse blocks. This would reduce table size considerably - // from the looks of it. - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -// dummy values to make methods in gen_common compile. The real versions -// will be generated by this file to tables.go. -var ( - xorMasks []rune -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go deleted file mode 100644 index 02c3b505d640..000000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/rangetable" -) - -// These tables are hand-extracted from: -// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt -func visitDefaults(fn func(r rune, c Class)) { - // first write default values for ranges listed above. - visitRunes(fn, AL, []rune{ - 0x0600, 0x07BF, // Arabic - 0x08A0, 0x08FF, // Arabic Extended-A - 0xFB50, 0xFDCF, // Arabic Presentation Forms - 0xFDF0, 0xFDFF, - 0xFE70, 0xFEFF, - 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols - }) - visitRunes(fn, R, []rune{ - 0x0590, 0x05FF, // Hebrew - 0x07C0, 0x089F, // Nko et al. - 0xFB1D, 0xFB4F, - 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. - 0x0001E800, 0x0001EDFF, - 0x0001EF00, 0x0001EFFF, - }) - visitRunes(fn, ET, []rune{ // European Terminator - 0x20A0, 0x20Cf, // Currency symbols - }) - rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { - fn(r, BN) // Boundary Neutral - }) - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - fn(p.Rune(0), BN) // Boundary Neutral - } - }) -} - -func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { - for i := 0; i < len(runes); i += 2 { - lo, hi := runes[i], runes[i+1] - for j := lo; j <= hi; j++ { - fn(j, c) - } - } -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go deleted file mode 100644 index 9cb994289492..000000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Class is the Unicode BiDi class. Each rune has a single class. -type Class uint - -const ( - L Class = iota // LeftToRight - R // RightToLeft - EN // EuropeanNumber - ES // EuropeanSeparator - ET // EuropeanTerminator - AN // ArabicNumber - CS // CommonSeparator - B // ParagraphSeparator - S // SegmentSeparator - WS // WhiteSpace - ON // OtherNeutral - BN // BoundaryNeutral - NSM // NonspacingMark - AL // ArabicLetter - Control // Control LRO - PDI - - numClass - - LRO // LeftToRightOverride - RLO // RightToLeftOverride - LRE // LeftToRightEmbedding - RLE // RightToLeftEmbedding - PDF // PopDirectionalFormat - LRI // LeftToRightIsolate - RLI // RightToLeftIsolate - FSI // FirstStrongIsolate - PDI // PopDirectionalIsolate - - unknownClass = ^Class(0) -) - -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - -// A trie entry has the following bits: -// 7..5 XOR mask for brackets -// 4 1: Bracket open, 0: Bracket close -// 3..0 Class type - -const ( - openMask = 0x10 - xorMaskShift = 5 -) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 30a3aa93343d..000000000000 --- a/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - testDerived() - printTestdata() - makeTables() -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;;Lo;0;L;;;;;N;;;;; -// 4DB5;;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See https://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - nLead := chars[r].nLeadingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 || nLead > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - fmt.Fprintln(w, `import "sync"`) - fmt.Fprintln(w) - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap map[uint32]rune") - fmt.Fprintln(w, "var recompMapOnce sync.Once\n") - fmt.Fprintln(w, `const recompMapPacked = "" +`) - var buf [8]byte - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - binary.BigEndian.PutUint32(buf[:4], key) - binary.BigEndian.PutUint32(buf[4:], uint32(i)) - fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) - } - } - // hack so we don't have to special case the trailing plus sign - fmt.Fprintf(w, ` ""`) - fmt.Fprintln(w) - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See https://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d711900d14..000000000000 --- a/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7ce1977b507f..65a405a72385 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,43 +1,45 @@ # cloud.google.com/go v0.39.0 -cloud.google.com/go/monitoring/apiv3 -cloud.google.com/go/storage -cloud.google.com/go/spanner -cloud.google.com/go/kms/apiv1 cloud.google.com/go/civil cloud.google.com/go/compute/metadata cloud.google.com/go/iam cloud.google.com/go/internal +cloud.google.com/go/internal/fields cloud.google.com/go/internal/optional +cloud.google.com/go/internal/protostruct cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -cloud.google.com/go/internal/fields -cloud.google.com/go/internal/protostruct +cloud.google.com/go/kms/apiv1 +cloud.google.com/go/monitoring/apiv3 +cloud.google.com/go/spanner cloud.google.com/go/spanner/internal/backoff +cloud.google.com/go/storage # code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f code.cloudfoundry.org/gofileutils/fileutils # contrib.go.opencensus.io/exporter/ocagent v0.4.12 contrib.go.opencensus.io/exporter/ocagent # github.com/Azure/azure-sdk-for-go v29.0.0+incompatible -github.com/Azure/azure-sdk-for-go/storage -github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac +github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization +github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 -github.com/Azure/go-ansiterm/winterm github.com/Azure/go-ansiterm +github.com/Azure/go-ansiterm/winterm # github.com/Azure/go-autorest v11.7.1+incompatible -github.com/Azure/go-autorest/autorest/azure github.com/Azure/go-autorest/autorest +github.com/Azure/go-autorest/autorest/adal +github.com/Azure/go-autorest/autorest/azure github.com/Azure/go-autorest/autorest/azure/auth -github.com/Azure/go-autorest/autorest/to +github.com/Azure/go-autorest/autorest/azure/cli github.com/Azure/go-autorest/autorest/date -github.com/Azure/go-autorest/tracing +github.com/Azure/go-autorest/autorest/to github.com/Azure/go-autorest/autorest/validation -github.com/Azure/go-autorest/autorest/adal github.com/Azure/go-autorest/logger -github.com/Azure/go-autorest/autorest/azure/cli +github.com/Azure/go-autorest/tracing +# github.com/BurntSushi/toml v0.3.1 +github.com/BurntSushi/toml # github.com/DataDog/datadog-go v2.2.0+incompatible github.com/DataDog/datadog-go/statsd # github.com/Jeffail/gabs v1.1.1 @@ -54,27 +56,27 @@ github.com/Nvveen/Gotty # github.com/SAP/go-hdb v0.14.1 github.com/SAP/go-hdb/driver github.com/SAP/go-hdb/driver/sqltrace -github.com/SAP/go-hdb/internal/protocol github.com/SAP/go-hdb/internal/bufio +github.com/SAP/go-hdb/internal/protocol github.com/SAP/go-hdb/internal/unicode github.com/SAP/go-hdb/internal/unicode/cesu8 # github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 github.com/StackExchange/wmi # github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f -github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth -github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers github.com/aliyun/alibaba-cloud-sdk-go/sdk -github.com/aliyun/alibaba-cloud-sdk-go/services/kms -github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints -github.com/aliyun/alibaba-cloud-sdk-go/services/sts +github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials +github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider +github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers +github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils -github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider +github.com/aliyun/alibaba-cloud-sdk-go/services/kms github.com/aliyun/alibaba-cloud-sdk-go/services/ram +github.com/aliyun/alibaba-cloud-sdk-go/services/sts # github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 github.com/aliyun/aliyun-oss-go-sdk/oss # github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 @@ -95,52 +97,52 @@ github.com/armon/go-radix github.com/asaskevich/govalidator # github.com/aws/aws-sdk-go v1.19.39 github.com/aws/aws-sdk-go/aws -github.com/aws/aws-sdk-go/aws/credentials -github.com/aws/aws-sdk-go/aws/credentials/stscreds -github.com/aws/aws-sdk-go/aws/endpoints -github.com/aws/aws-sdk-go/aws/session -github.com/aws/aws-sdk-go/service/ec2 -github.com/aws/aws-sdk-go/service/iam -github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr -github.com/aws/aws-sdk-go/service/iam/iamiface -github.com/aws/aws-sdk-go/service/sts/stsiface -github.com/aws/aws-sdk-go/aws/defaults -github.com/aws/aws-sdk-go/aws/ec2metadata -github.com/aws/aws-sdk-go/aws/request -github.com/aws/aws-sdk-go/service/dynamodb -github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute -github.com/aws/aws-sdk-go/service/s3 -github.com/aws/aws-sdk-go/service/kms -github.com/aws/aws-sdk-go/service/kms/kmsiface -github.com/aws/aws-sdk-go/internal/sdkio -github.com/aws/aws-sdk-go/internal/ini -github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client -github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/aws/client/metadata github.com/aws/aws-sdk-go/aws/corehandlers +github.com/aws/aws-sdk-go/aws/credentials +github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds +github.com/aws/aws-sdk-go/aws/credentials/endpointcreds github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/stscreds +github.com/aws/aws-sdk-go/aws/crr github.com/aws/aws-sdk-go/aws/csm -github.com/aws/aws-sdk-go/aws/awsutil -github.com/aws/aws-sdk-go/aws/client/metadata +github.com/aws/aws-sdk-go/aws/defaults +github.com/aws/aws-sdk-go/aws/ec2metadata +github.com/aws/aws-sdk-go/aws/endpoints +github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/ini +github.com/aws/aws-sdk-go/internal/s3err +github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/internal/shareddefaults github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/ec2query -github.com/aws/aws-sdk-go/private/protocol/query -github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds -github.com/aws/aws-sdk-go/aws/credentials/endpointcreds -github.com/aws/aws-sdk-go/internal/sdkuri -github.com/aws/aws-sdk-go/aws/crr -github.com/aws/aws-sdk-go/private/protocol/jsonrpc -github.com/aws/aws-sdk-go/internal/s3err github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/jsonrpc +github.com/aws/aws-sdk-go/private/protocol/query +github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil -github.com/aws/aws-sdk-go/private/protocol/query/queryutil -github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/service/dynamodb +github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute +github.com/aws/aws-sdk-go/service/ec2 +github.com/aws/aws-sdk-go/service/iam +github.com/aws/aws-sdk-go/service/iam/iamiface +github.com/aws/aws-sdk-go/service/kms +github.com/aws/aws-sdk-go/service/kms/kmsiface +github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/beorn7/perks/quantile # github.com/bgentry/speakeasy v0.1.0 @@ -150,10 +152,10 @@ github.com/boombuler/barcode github.com/boombuler/barcode/qr github.com/boombuler/barcode/utils # github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f +github.com/briankassouf/jose github.com/briankassouf/jose/crypto github.com/briankassouf/jose/jws github.com/briankassouf/jose/jwt -github.com/briankassouf/jose # github.com/cenkalti/backoff v2.2.1+incompatible github.com/cenkalti/backoff # github.com/census-instrumentation/opencensus-proto v0.2.0 @@ -171,8 +173,8 @@ github.com/chrismalek/oktasdk-go/okta # github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible github.com/circonus-labs/circonus-gometrics github.com/circonus-labs/circonus-gometrics/api -github.com/circonus-labs/circonus-gometrics/checkmgr github.com/circonus-labs/circonus-gometrics/api/config +github.com/circonus-labs/circonus-gometrics/checkmgr # github.com/circonus-labs/circonusllhist v0.1.3 github.com/circonus-labs/circonusllhist # github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 @@ -203,12 +205,12 @@ github.com/docker/go-connections/nat # github.com/docker/go-units v0.4.0 github.com/docker/go-units # github.com/dsnet/compress v0.0.1 +github.com/dsnet/compress github.com/dsnet/compress/bzip2 github.com/dsnet/compress/bzip2/internal/sais github.com/dsnet/compress/internal github.com/dsnet/compress/internal/errors github.com/dsnet/compress/internal/prefix -github.com/dsnet/compress # github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 github.com/duosecurity/duo_api_golang github.com/duosecurity/duo_api_golang/authapi @@ -243,25 +245,25 @@ github.com/gocql/gocql/internal/lru github.com/gocql/gocql/internal/murmur github.com/gocql/gocql/internal/streams # github.com/gogo/protobuf v1.2.1 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/io github.com/gogo/protobuf/proto -github.com/gogo/protobuf/sortkeys -github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/protoc-gen-gogo/descriptor +github.com/gogo/protobuf/sortkeys # github.com/golang/protobuf v1.3.2 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto +github.com/golang/protobuf/protoc-gen-go/descriptor +github.com/golang/protobuf/protoc-gen-go/generator +github.com/golang/protobuf/protoc-gen-go/generator/internal/remap +github.com/golang/protobuf/protoc-gen-go/plugin github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/struct github.com/golang/protobuf/ptypes/empty +github.com/golang/protobuf/ptypes/struct +github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -github.com/golang/protobuf/protoc-gen-go/descriptor -github.com/golang/protobuf/jsonpb -github.com/golang/protobuf/protoc-gen-go/generator -github.com/golang/protobuf/protoc-gen-go/generator/internal/remap -github.com/golang/protobuf/protoc-gen-go/plugin # github.com/golang/snappy v0.0.1 github.com/golang/snappy # github.com/google/go-github v17.0.0+incompatible @@ -279,12 +281,22 @@ github.com/googleapis/gax-go/v2 # github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 github.com/gorhill/cronexpr # github.com/grpc-ecosystem/grpc-gateway v1.8.5 +github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities -github.com/grpc-ecosystem/grpc-gateway/internal # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed github.com/hailocab/go-hostpool -# github.com/hashicorp/consul/api v1.0.1 +# github.com/hashicorp/consul-template v0.22.0 +github.com/hashicorp/consul-template/child +github.com/hashicorp/consul-template/config +github.com/hashicorp/consul-template/dependency +github.com/hashicorp/consul-template/manager +github.com/hashicorp/consul-template/renderer +github.com/hashicorp/consul-template/signals +github.com/hashicorp/consul-template/template +github.com/hashicorp/consul-template/version +github.com/hashicorp/consul-template/watch +# github.com/hashicorp/consul/api v1.1.0 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap @@ -314,6 +326,7 @@ github.com/hashicorp/go-retryablehttp github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-sockaddr +github.com/hashicorp/go-sockaddr/template # github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-syslog # github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 @@ -326,12 +339,12 @@ github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl github.com/hashicorp/hcl/hcl/ast -github.com/hashicorp/hcl/hcl/printer github.com/hashicorp/hcl/hcl/parser -github.com/hashicorp/hcl/hcl/token -github.com/hashicorp/hcl/json/parser +github.com/hashicorp/hcl/hcl/printer github.com/hashicorp/hcl/hcl/scanner github.com/hashicorp/hcl/hcl/strconv +github.com/hashicorp/hcl/hcl/token +github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token # github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf @@ -341,7 +354,7 @@ github.com/hashicorp/nomad/api/contexts github.com/hashicorp/raft # github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab github.com/hashicorp/raft-snapshot -# github.com/hashicorp/serf v0.8.2 +# github.com/hashicorp/serf v0.8.3 github.com/hashicorp/serf/coordinate # github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190814210027-93970f08f2ec github.com/hashicorp/vault-plugin-auth-alicloud @@ -352,11 +365,11 @@ github.com/hashicorp/vault-plugin-auth-azure github.com/hashicorp/vault-plugin-auth-centrify # github.com/hashicorp/vault-plugin-auth-cf v0.0.0-20190821162840-1c2205826fee github.com/hashicorp/vault-plugin-auth-cf -github.com/hashicorp/vault-plugin-auth-cf/signatures github.com/hashicorp/vault-plugin-auth-cf/models -github.com/hashicorp/vault-plugin-auth-cf/util +github.com/hashicorp/vault-plugin-auth-cf/signatures github.com/hashicorp/vault-plugin-auth-cf/testing/certificates github.com/hashicorp/vault-plugin-auth-cf/testing/cf +github.com/hashicorp/vault-plugin-auth-cf/util # github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190814210049-1ccb3dc10102 github.com/hashicorp/vault-plugin-auth-gcp/plugin github.com/hashicorp/vault-plugin-auth-gcp/plugin/cache @@ -388,49 +401,50 @@ github.com/hashicorp/vault-plugin-secrets-kv # github.com/hashicorp/vault/api v1.0.5-0.20191017194845-99f7184d3326 => ./api github.com/hashicorp/vault/api # github.com/hashicorp/vault/sdk v0.1.14-0.20191017211055-9bd3a27a36c4 => ./sdk -github.com/hashicorp/vault/sdk/helper/salt -github.com/hashicorp/vault/sdk/helper/strutil -github.com/hashicorp/vault/sdk/helper/wrapping -github.com/hashicorp/vault/sdk/logical -github.com/hashicorp/vault/sdk/helper/parseutil +github.com/hashicorp/vault/sdk/database/dbplugin +github.com/hashicorp/vault/sdk/database/helper/connutil +github.com/hashicorp/vault/sdk/database/helper/credsutil +github.com/hashicorp/vault/sdk/database/helper/dbutil github.com/hashicorp/vault/sdk/framework -github.com/hashicorp/vault/sdk/helper/policyutil -github.com/hashicorp/vault/sdk/plugin +github.com/hashicorp/vault/sdk/helper/base62 +github.com/hashicorp/vault/sdk/helper/certutil github.com/hashicorp/vault/sdk/helper/cidrutil +github.com/hashicorp/vault/sdk/helper/compressutil github.com/hashicorp/vault/sdk/helper/consts -github.com/hashicorp/vault/sdk/helper/locksutil -github.com/hashicorp/vault/sdk/helper/tokenutil -github.com/hashicorp/vault/sdk/helper/jsonutil -github.com/hashicorp/vault/sdk/helper/certutil -github.com/hashicorp/vault/sdk/helper/password -github.com/hashicorp/vault/sdk/helper/ldaputil -github.com/hashicorp/vault/sdk/helper/tlsutil -github.com/hashicorp/vault/sdk/database/dbplugin -github.com/hashicorp/vault/sdk/database/helper/dbutil -github.com/hashicorp/vault/sdk/queue +github.com/hashicorp/vault/sdk/helper/cryptoutil github.com/hashicorp/vault/sdk/helper/dbtxn +github.com/hashicorp/vault/sdk/helper/entropy github.com/hashicorp/vault/sdk/helper/errutil +github.com/hashicorp/vault/sdk/helper/hclutil +github.com/hashicorp/vault/sdk/helper/jsonutil +github.com/hashicorp/vault/sdk/helper/kdf github.com/hashicorp/vault/sdk/helper/keysutil -github.com/hashicorp/vault/sdk/helper/base62 +github.com/hashicorp/vault/sdk/helper/ldaputil +github.com/hashicorp/vault/sdk/helper/license +github.com/hashicorp/vault/sdk/helper/locksutil github.com/hashicorp/vault/sdk/helper/logging github.com/hashicorp/vault/sdk/helper/mlock +github.com/hashicorp/vault/sdk/helper/parseutil +github.com/hashicorp/vault/sdk/helper/password +github.com/hashicorp/vault/sdk/helper/pathmanager +github.com/hashicorp/vault/sdk/helper/pluginutil +github.com/hashicorp/vault/sdk/helper/pointerutil +github.com/hashicorp/vault/sdk/helper/policyutil +github.com/hashicorp/vault/sdk/helper/salt +github.com/hashicorp/vault/sdk/helper/strutil +github.com/hashicorp/vault/sdk/helper/tlsutil +github.com/hashicorp/vault/sdk/helper/tokenutil github.com/hashicorp/vault/sdk/helper/useragent +github.com/hashicorp/vault/sdk/helper/wrapping +github.com/hashicorp/vault/sdk/logical github.com/hashicorp/vault/sdk/physical github.com/hashicorp/vault/sdk/physical/file github.com/hashicorp/vault/sdk/physical/inmem -github.com/hashicorp/vault/sdk/version -github.com/hashicorp/vault/sdk/helper/cryptoutil -github.com/hashicorp/vault/sdk/helper/hclutil -github.com/hashicorp/vault/sdk/database/helper/credsutil -github.com/hashicorp/vault/sdk/helper/compressutil -github.com/hashicorp/vault/sdk/helper/pathmanager -github.com/hashicorp/vault/sdk/plugin/pb -github.com/hashicorp/vault/sdk/database/helper/connutil -github.com/hashicorp/vault/sdk/helper/license -github.com/hashicorp/vault/sdk/helper/pluginutil -github.com/hashicorp/vault/sdk/helper/entropy -github.com/hashicorp/vault/sdk/helper/kdf +github.com/hashicorp/vault/sdk/plugin github.com/hashicorp/vault/sdk/plugin/mock +github.com/hashicorp/vault/sdk/plugin/pb +github.com/hashicorp/vault/sdk/queue +github.com/hashicorp/vault/sdk/version # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux # github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 @@ -439,11 +453,11 @@ github.com/influxdata/influxdb/models github.com/influxdata/influxdb/pkg/escape # github.com/jackc/pgx v3.3.0+incompatible github.com/jackc/pgx +github.com/jackc/pgx/chunkreader github.com/jackc/pgx/internal/sanitize github.com/jackc/pgx/pgio github.com/jackc/pgx/pgproto3 github.com/jackc/pgx/pgtype -github.com/jackc/pgx/chunkreader # github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 github.com/jeffchao/backoff # github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f @@ -455,25 +469,25 @@ github.com/jmespath/go-jmespath # github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869 github.com/joyent/triton-go github.com/joyent/triton-go/authentication +github.com/joyent/triton-go/client github.com/joyent/triton-go/errors github.com/joyent/triton-go/storage -github.com/joyent/triton-go/client # github.com/json-iterator/go v1.1.6 github.com/json-iterator/go # github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f -github.com/keybase/go-crypto/openpgp -github.com/keybase/go-crypto/openpgp/packet -github.com/keybase/go-crypto/openpgp/armor -github.com/keybase/go-crypto/openpgp/errors -github.com/keybase/go-crypto/openpgp/s2k -github.com/keybase/go-crypto/rsa github.com/keybase/go-crypto/brainpool github.com/keybase/go-crypto/cast5 github.com/keybase/go-crypto/curve25519 github.com/keybase/go-crypto/ed25519 +github.com/keybase/go-crypto/ed25519/internal/edwards25519 +github.com/keybase/go-crypto/openpgp +github.com/keybase/go-crypto/openpgp/armor github.com/keybase/go-crypto/openpgp/ecdh github.com/keybase/go-crypto/openpgp/elgamal -github.com/keybase/go-crypto/ed25519/internal/edwards25519 +github.com/keybase/go-crypto/openpgp/errors +github.com/keybase/go-crypto/openpgp/packet +github.com/keybase/go-crypto/openpgp/s2k +github.com/keybase/go-crypto/rsa # github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/konsorten/go-windows-terminal-sequences # github.com/kr/pretty v0.1.0 @@ -488,6 +502,8 @@ github.com/lib/pq/scram github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.8 github.com/mattn/go-isatty +# github.com/mattn/go-shellwords v1.0.5 +github.com/mattn/go-shellwords # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mholt/archiver v3.1.1+incompatible @@ -502,6 +518,8 @@ github.com/mitchellh/copystructure github.com/mitchellh/go-homedir # github.com/mitchellh/go-testing-interface v1.0.0 github.com/mitchellh/go-testing-interface +# github.com/mitchellh/hashstructure v1.0.0 +github.com/mitchellh/hashstructure # github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure # github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8 @@ -521,15 +539,15 @@ github.com/oklog/run # github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 -github.com/opencontainers/image-spec/specs-go/v1 github.com/opencontainers/image-spec/specs-go +github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v0.1.1 github.com/opencontainers/runc/libcontainer/user # github.com/oracle/oci-go-sdk v7.0.0+incompatible github.com/oracle/oci-go-sdk/common github.com/oracle/oci-go-sdk/common/auth -github.com/oracle/oci-go-sdk/objectstorage github.com/oracle/oci-go-sdk/keymanagement +github.com/oracle/oci-go-sdk/objectstorage # github.com/ory/dockertest v3.3.4+incompatible github.com/ory/dockertest github.com/ory/dockertest/docker @@ -537,23 +555,23 @@ github.com/ory/dockertest/docker/opts github.com/ory/dockertest/docker/pkg/archive github.com/ory/dockertest/docker/pkg/fileutils github.com/ory/dockertest/docker/pkg/homedir -github.com/ory/dockertest/docker/pkg/jsonmessage -github.com/ory/dockertest/docker/pkg/stdcopy -github.com/ory/dockertest/docker/types/registry -github.com/ory/dockertest/docker/types github.com/ory/dockertest/docker/pkg/idtools github.com/ory/dockertest/docker/pkg/ioutils +github.com/ory/dockertest/docker/pkg/jsonmessage github.com/ory/dockertest/docker/pkg/longpath +github.com/ory/dockertest/docker/pkg/mount github.com/ory/dockertest/docker/pkg/pools +github.com/ory/dockertest/docker/pkg/stdcopy github.com/ory/dockertest/docker/pkg/system github.com/ory/dockertest/docker/pkg/term +github.com/ory/dockertest/docker/pkg/term/windows +github.com/ory/dockertest/docker/types +github.com/ory/dockertest/docker/types/blkiodev github.com/ory/dockertest/docker/types/container github.com/ory/dockertest/docker/types/filters github.com/ory/dockertest/docker/types/mount github.com/ory/dockertest/docker/types/network -github.com/ory/dockertest/docker/pkg/mount -github.com/ory/dockertest/docker/pkg/term/windows -github.com/ory/dockertest/docker/types/blkiodev +github.com/ory/dockertest/docker/types/registry github.com/ory/dockertest/docker/types/strslice github.com/ory/dockertest/docker/types/versions # github.com/patrickmn/go-cache v2.1.0+incompatible @@ -567,16 +585,16 @@ github.com/pkg/errors github.com/pmezard/go-difflib/difflib # github.com/posener/complete v1.2.1 github.com/posener/complete -github.com/posener/complete/cmd/install github.com/posener/complete/cmd +github.com/posener/complete/cmd/install github.com/posener/complete/match # github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 github.com/pquerna/cachecontrol github.com/pquerna/cachecontrol/cacheobject # github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d github.com/pquerna/otp -github.com/pquerna/otp/totp github.com/pquerna/otp/hotp +github.com/pquerna/otp/totp # github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal @@ -584,13 +602,13 @@ github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_model/go # github.com/prometheus/common v0.2.0 github.com/prometheus/common/expfmt -github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +github.com/prometheus/common/model # github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 github.com/prometheus/procfs +github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/nfs github.com/prometheus/procfs/xfs -github.com/prometheus/procfs/internal/util # github.com/ryanuber/columnize v2.1.0+incompatible github.com/ryanuber/columnize # github.com/ryanuber/go-glob v1.0.0 @@ -603,10 +621,10 @@ github.com/satori/go.uuid github.com/shirou/gopsutil/cpu github.com/shirou/gopsutil/disk github.com/shirou/gopsutil/host -github.com/shirou/gopsutil/mem github.com/shirou/gopsutil/internal/common -github.com/shirou/gopsutil/process +github.com/shirou/gopsutil/mem github.com/shirou/gopsutil/net +github.com/shirou/gopsutil/process # github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 github.com/shirou/w32 # github.com/sirupsen/logrus v1.4.2 @@ -620,183 +638,186 @@ github.com/tv42/httpunix github.com/ugorji/go/codec # github.com/ulikunitz/xz v0.5.6 github.com/ulikunitz/xz +github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -github.com/ulikunitz/xz/internal/hash # github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 github.com/xi2/xz # go.etcd.io/bbolt v1.3.2 go.etcd.io/bbolt # go.etcd.io/etcd v0.0.0-20190412021913-f29b1ada1971 +go.etcd.io/etcd/auth/authpb go.etcd.io/etcd/client go.etcd.io/etcd/clientv3 -go.etcd.io/etcd/clientv3/concurrency -go.etcd.io/etcd/pkg/transport -go.etcd.io/etcd/pkg/pathutil -go.etcd.io/etcd/pkg/srv -go.etcd.io/etcd/pkg/types -go.etcd.io/etcd/version -go.etcd.io/etcd/auth/authpb go.etcd.io/etcd/clientv3/balancer go.etcd.io/etcd/clientv3/balancer/picker go.etcd.io/etcd/clientv3/balancer/resolver/endpoint +go.etcd.io/etcd/clientv3/concurrency go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes go.etcd.io/etcd/etcdserver/etcdserverpb go.etcd.io/etcd/mvcc/mvccpb go.etcd.io/etcd/pkg/logutil -go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/pkg/pathutil +go.etcd.io/etcd/pkg/srv go.etcd.io/etcd/pkg/systemd +go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/pkg/transport +go.etcd.io/etcd/pkg/types go.etcd.io/etcd/raft go.etcd.io/etcd/raft/raftpb +go.etcd.io/etcd/version # go.opencensus.io v0.21.0 -go.opencensus.io/stats -go.opencensus.io/stats/view -go.opencensus.io/plugin/ochttp -go.opencensus.io/plugin/ochttp/propagation/tracecontext -go.opencensus.io/trace -go.opencensus.io/metric/metricdata -go.opencensus.io/stats/internal -go.opencensus.io/tag +go.opencensus.io +go.opencensus.io/internal go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricdata go.opencensus.io/metric/metricproducer go.opencensus.io/plugin/ocgrpc +go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/b3 -go.opencensus.io/trace/propagation -go.opencensus.io +go.opencensus.io/plugin/ochttp/propagation/tracecontext go.opencensus.io/resource -go.opencensus.io/trace/tracestate -go.opencensus.io/internal +go.opencensus.io/stats +go.opencensus.io/stats/internal +go.opencensus.io/stats/view +go.opencensus.io/tag +go.opencensus.io/trace go.opencensus.io/trace/internal +go.opencensus.io/trace/propagation +go.opencensus.io/trace/tracestate # go.uber.org/atomic v1.4.0 go.uber.org/atomic # go.uber.org/multierr v1.1.0 go.uber.org/multierr # go.uber.org/zap v1.9.1 go.uber.org/zap -go.uber.org/zap/zapcore -go.uber.org/zap/internal/bufferpool go.uber.org/zap/buffer +go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit +go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/crypto/bcrypt -golang.org/x/crypto/ed25519 -golang.org/x/crypto/ssh -golang.org/x/crypto/ssh/agent -golang.org/x/crypto/curve25519 +golang.org/x/crypto/blake2b +golang.org/x/crypto/blowfish +golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 -golang.org/x/crypto/ssh/terminal -golang.org/x/crypto/blowfish -golang.org/x/crypto/md4 +golang.org/x/crypto/curve25519 +golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 -golang.org/x/crypto/internal/chacha20 -golang.org/x/crypto/poly1305 -golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/hkdf +golang.org/x/crypto/internal/chacha20 +golang.org/x/crypto/internal/subtle +golang.org/x/crypto/md4 golang.org/x/crypto/pbkdf2 -golang.org/x/crypto/blake2b golang.org/x/crypto/pkcs12 -golang.org/x/crypto/internal/subtle golang.org/x/crypto/pkcs12/internal/rc2 +golang.org/x/crypto/poly1305 +golang.org/x/crypto/ssh +golang.org/x/crypto/ssh/agent +golang.org/x/crypto/ssh/terminal # golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 -golang.org/x/net/idna -golang.org/x/net/http/httpproxy -golang.org/x/net/http2 golang.org/x/net/context +golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts +golang.org/x/net/http/httpproxy +golang.org/x/net/http2 golang.org/x/net/http2/hpack -golang.org/x/net/trace -golang.org/x/net/context/ctxhttp +golang.org/x/net/idna golang.org/x/net/internal/timeseries +golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a golang.org/x/oauth2 -golang.org/x/oauth2/internal +golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/google -golang.org/x/oauth2/jwt +golang.org/x/oauth2/internal golang.org/x/oauth2/jws -golang.org/x/oauth2/clientcredentials +golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a +golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows -golang.org/x/sys/cpu # golang.org/x/text v0.3.2 -golang.org/x/text/secure/bidirule -golang.org/x/text/unicode/bidi -golang.org/x/text/unicode/norm -golang.org/x/text/transform -golang.org/x/text/encoding/unicode golang.org/x/text/encoding golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier +golang.org/x/text/encoding/unicode golang.org/x/text/internal/utf8internal golang.org/x/text/runes +golang.org/x/text/secure/bidirule +golang.org/x/text/transform +golang.org/x/text/unicode/bidi +golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time/rate # google.golang.org/api v0.5.0 -google.golang.org/api/option -google.golang.org/api/iam/v1 -google.golang.org/api/googleapi -google.golang.org/api/iterator -google.golang.org/api/transport google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 +google.golang.org/api/gensupport +google.golang.org/api/googleapi +google.golang.org/api/googleapi/internal/uritemplates +google.golang.org/api/googleapi/transport +google.golang.org/api/iam/v1 google.golang.org/api/internal +google.golang.org/api/iterator google.golang.org/api/oauth2/v2 -google.golang.org/api/gensupport -google.golang.org/api/transport/http +google.golang.org/api/option google.golang.org/api/storage/v1 -google.golang.org/api/googleapi/internal/uritemplates +google.golang.org/api/support/bundler +google.golang.org/api/transport google.golang.org/api/transport/grpc -google.golang.org/api/googleapi/transport +google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -google.golang.org/api/support/bundler # google.golang.org/appengine v1.6.0 -google.golang.org/appengine/cloudsql -google.golang.org/appengine/urlfetch google.golang.org/appengine -google.golang.org/appengine/socket +google.golang.org/appengine/cloudsql google.golang.org/appengine/internal -google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/app_identity -google.golang.org/appengine/internal/modules -google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log +google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api +google.golang.org/appengine/internal/socket +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/socket +google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 -google.golang.org/genproto/googleapis/cloud/kms/v1 +google.golang.org/genproto/googleapis/api +google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/api/distribution +google.golang.org/genproto/googleapis/api/httpbody +google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres -google.golang.org/genproto/googleapis/monitoring/v3 -google.golang.org/genproto/googleapis/api/distribution -google.golang.org/genproto/protobuf/field_mask +google.golang.org/genproto/googleapis/cloud/kms/v1 google.golang.org/genproto/googleapis/iam/v1 +google.golang.org/genproto/googleapis/monitoring/v3 +google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails -google.golang.org/genproto/googleapis/spanner/v1 google.golang.org/genproto/googleapis/rpc/status -google.golang.org/genproto/googleapis/api/annotations -google.golang.org/genproto/googleapis/api -google.golang.org/genproto/googleapis/api/label -google.golang.org/genproto/googleapis/rpc/code +google.golang.org/genproto/googleapis/spanner/v1 google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/googleapis/api/httpbody +google.golang.org/genproto/protobuf/field_mask # google.golang.org/grpc v1.22.0 -google.golang.org/grpc/grpclog -google.golang.org/grpc/codes google.golang.org/grpc -google.golang.org/grpc/keepalive -google.golang.org/grpc/status -google.golang.org/grpc/metadata -google.golang.org/grpc/credentials google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/codes google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/health +google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancerload @@ -805,7 +826,10 @@ google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver @@ -813,14 +837,8 @@ google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats +google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/health -google.golang.org/grpc/health/grpc_health_v1 -google.golang.org/grpc/credentials/internal -google.golang.org/grpc/credentials/oauth -google.golang.org/grpc/balancer/base -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/internal/syscall # gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d gopkg.in/asn1-ber.v1 # gopkg.in/inf.v0 v0.9.1 @@ -830,43 +848,43 @@ gopkg.in/ini.v1 # gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/mgo.v2 gopkg.in/mgo.v2/bson +gopkg.in/mgo.v2/internal/json gopkg.in/mgo.v2/internal/sasl gopkg.in/mgo.v2/internal/scram -gopkg.in/mgo.v2/internal/json # gopkg.in/ory-am/dockertest.v3 v3.3.4 gopkg.in/ory-am/dockertest.v3 # gopkg.in/square/go-jose.v2 v2.3.1 -gopkg.in/square/go-jose.v2/jwt gopkg.in/square/go-jose.v2 -gopkg.in/square/go-jose.v2/json gopkg.in/square/go-jose.v2/cipher +gopkg.in/square/go-jose.v2/json +gopkg.in/square/go-jose.v2/jwt # gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 # k8s.io/api v0.0.0-20190409092523-d687e77c8ae9 k8s.io/api/authentication/v1 # k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/runtime -k8s.io/apimachinery/pkg/runtime/schema -k8s.io/apimachinery/pkg/types -k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/conversion +k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/selection -k8s.io/apimachinery/pkg/util/intstr -k8s.io/apimachinery/pkg/util/runtime -k8s.io/apimachinery/pkg/watch -k8s.io/apimachinery/pkg/conversion/queryparams +k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/naming +k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets -k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/pkg/util/validation -k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 k8s.io/klog # layeh.com/radius v0.0.0-20190322222518-890bc1058917 From 97c8a92201a5a265a8df7693f988f24f5b63d59d Mon Sep 17 00:00:00 2001 From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com> Date: Fri, 25 Oct 2019 09:33:52 -0400 Subject: [PATCH 11/90] changelog++ --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a8bd68cc3c6e..2bcdac93c428 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,8 @@ FEATURES: store. Bad storage state can be caused by bugs. However, this is usually observed when known (and fixed) bugs are hit by older versions of Vault. * **Entropy Augmentation (Enterprise)**: Vault now supports sourcing entropy from - external source for critical security parameters. The + external source for critical security parameters. Currently an HSM that + supports PKCS#11 is the only supported source. * **Active Directory Secret Check-In/Check-Out**: In the Active Directory secrets engine, users or applications can check out a service account for use, and its password will be rotated when it's checked back in. From d9153a5872501f377d556131cccfbcde5ae87e61 Mon Sep 17 00:00:00 2001 From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com> Date: Fri, 25 Oct 2019 09:40:21 -0400 Subject: [PATCH 12/90] changelog++ --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bcdac93c428..8eacdddc6686 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,8 @@ FEATURES: * **Transit Key Type Support**: Signing and verification is now supported with the P-384 (secp384r1) and P-521 (secp521r1) ECDSA curves [GH-7551] and encryption and decryption is now supported via AES128-GCM96 [GH-7555] + * **SSRF Protection for Vault Agent**: Vault Agent has a configuration option to + require a specific header beffore allowing requests. * **New UI Features** The UI now supports managing users and groups for the Userpass, Cert, Okta, and Radius auth methods. * **Shamir with Stored Master Key** The on disk format for Shamir seals has changed, From e4045334f44367f8365ac88e17289c6809cd7c6a Mon Sep 17 00:00:00 2001 From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com> Date: Fri, 25 Oct 2019 09:45:27 -0400 Subject: [PATCH 13/90] changelog++ --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8eacdddc6686..b5e59ea4569e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ IMPROVEMENTS: thereby improving the performance and storage capacity. * replication (enterprise): added more replication metrics * secrets/aws: The root config can now be read [GH-7245] + * secrets/database/cassandra: Add ability to skip verfication of connection [GH-7614] * storage/azure: Add config parameter to Azure storage backend to allow specifying the ARM endpoint [GH-7567] * storage/cassandra: Improve storage efficiency by eliminating unnecessary From 6a07c56f833c1fa7cd80938ebefed5f4911f7f13 Mon Sep 17 00:00:00 2001 From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com> Date: Fri, 25 Oct 2019 09:50:17 -0400 Subject: [PATCH 14/90] changelog++ --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5e59ea4569e..14a132f765c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,8 @@ IMPROVEMENTS: specifying the ARM endpoint [GH-7567] * storage/cassandra: Improve storage efficiency by eliminating unnecessary copies of value data [GH-7199] + * storage/raft: Improve raft write performance by utilizing FSM Batching [GH-7527] + * storage/raft: Add support for non-voter nodes [GH-7634] * sys: Add a new `sys/host-info` endpoint for querying information about the host [GH-7330] * sys: Add a new set of endpoints under `sys/pprof/` that allows profiling From 05d239cffb5603ad64b958ef36edc2c44a63790b Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 25 Oct 2019 10:55:38 -0400 Subject: [PATCH 15/90] fix token counter test so the token won't time out (#7737) --- vault/counters_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vault/counters_test.go b/vault/counters_test.go index be9249dff1e8..164d58cda0d9 100644 --- a/vault/counters_test.go +++ b/vault/counters_test.go @@ -124,6 +124,8 @@ func TestRequestCounterSaveCurrent(t *testing.T) { } func testCountActiveTokens(t *testing.T, c *Core, root string, expectedServiceTokens int) { + t.Helper() + rootCtx := namespace.RootContext(nil) resp, err := c.HandleRequest(rootCtx, &logical.Request{ ClientToken: root, @@ -157,6 +159,9 @@ func TestTokenStore_CountActiveTokens(t *testing.T) { ClientToken: root, Operation: logical.UpdateOperation, Path: "create", + Data: map[string]interface{}{ + "ttl": "1h", + }, } tokens := make([]string, 10) for i := 0; i < 10; i++ { @@ -184,6 +189,8 @@ func TestTokenStore_CountActiveTokens(t *testing.T) { } func testCountActiveEntities(t *testing.T, c *Core, root string, expectedEntities int) { + t.Helper() + rootCtx := namespace.RootContext(nil) resp, err := c.HandleRequest(rootCtx, &logical.Request{ ClientToken: root, From a988e51fdb8f117a5a1e5a681c72bfdf1f58dad4 Mon Sep 17 00:00:00 2001 From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com> Date: Fri, 25 Oct 2019 11:41:25 -0400 Subject: [PATCH 16/90] changelog++ --- CHANGELOG.md | 1553 +------------------------------------------------- 1 file changed, 3 insertions(+), 1550 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 14a132f765c9..09927bd03df3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,8 @@ FEATURES: decryption is now supported via AES128-GCM96 [GH-7555] * **SSRF Protection for Vault Agent**: Vault Agent has a configuration option to require a specific header beffore allowing requests. + * **AWS Auth Method Root Rotation**: The credential used by the AWS auth method can + now be rotated, to ensure that only Vault knows the credentials it is using [GH-7131] * **New UI Features** The UI now supports managing users and groups for the Userpass, Cert, Okta, and Radius auth methods. * **Shamir with Stored Master Key** The on disk format for Shamir seals has changed, @@ -2941,1553 +2943,4 @@ DEPRECATIONS/CHANGES: to hundreds of thousands, or millions), this will significantly improve Vault startup time since leases associated with these certificates will not have to be loaded; however note that it also means that revocation of a - token used to issue certificates will no longer add these certificates to a - CRL. If this behavior is desired or needed, consider keeping leases enabled - and ensuring lifetimes are reasonable, and issue long-lived certificates via - a different role with leases disabled. - -FEATURES: - - * **Replication (Enterprise)**: Vault Enterprise now has support for creating - a multi-datacenter replication set between clusters. The current replication - offering is based on an asynchronous primary/secondary (1:N) model that - replicates static data while keeping dynamic data (leases, tokens) - cluster-local, focusing on horizontal scaling for high-throughput and - high-fanout deployments. - * **Response Wrapping & Replication in the Vault Enterprise UI**: Vault - Enterprise UI now supports looking up and rotating response wrapping tokens, - as well as creating tokens with arbitrary values inside. It also now - supports replication functionality, enabling the configuration of a - replication set in the UI. - * **Expanded Access Control Policies**: Access control policies can now - specify allowed and denied parameters -- and, optionally, their values -- to - control what a client can and cannot submit during an API call. Policies can - also specify minimum/maximum response wrapping TTLs to both enforce the use - of response wrapping and control the duration of resultant wrapping tokens. - See the [policies concepts - page](https://www.vaultproject.io/docs/concepts/policies.html) for more - information. - * **SSH Backend As Certificate Authority**: The SSH backend can now be - configured to sign host and user certificates. Each mount of the backend - acts as an independent signing authority. The CA key pair can be configured - for each mount and the public key is accessible via an unauthenticated API - call; additionally, the backend can generate a public/private key pair for - you. We recommend using separate mounts for signing host and user - certificates. - -IMPROVEMENTS: - - * api/request: Passing username and password information in API request - [GH-2469] - * audit: Logging the token's use count with authentication response and - logging the remaining uses of the client token with request [GH-2437] - * auth/approle: Support for restricting the number of uses on the tokens - issued [GH-2435] - * auth/aws-ec2: AWS EC2 auth backend now supports constraints for VPC ID, - Subnet ID and Region [GH-2407] - * auth/ldap: Use the value of the `LOGNAME` or `USER` env vars for the - username if not explicitly set on the command line when authenticating - [GH-2154] - * audit: Support adding a configurable prefix (such as `@cee`) before each - line [GH-2359] - * core: Canonicalize list operations to use a trailing slash [GH-2390] - * core: Add option to disable caching on a per-mount level [GH-2455] - * core: Add ability to require valid client certs in listener config [GH-2457] - * physical/dynamodb: Implement a session timeout to avoid having to use - recovery mode in the case of an unclean shutdown, which makes HA much safer - [GH-2141] - * secret/pki: O (Organization) values can now be set to role-defined values - for issued/signed certificates [GH-2369] - * secret/pki: Certificates issued/signed from PKI backend do not generate - leases by default [GH-2403] - * secret/pki: When using DER format, still return the private key type - [GH-2405] - * secret/pki: Add an intermediate to the CA chain even if it lacks an - authority key ID [GH-2465] - * secret/pki: Add role option to use CSR SANs [GH-2489] - * secret/ssh: SSH backend as CA to sign user and host certificates [GH-2208] - * secret/ssh: Support reading of SSH CA public key from `config/ca` endpoint - and also return it when CA key pair is generated [GH-2483] - -BUG FIXES: - - * audit: When auditing headers use case-insensitive comparisons [GH-2362] - * auth/aws-ec2: Return role period in seconds and not nanoseconds [GH-2374] - * auth/okta: Fix panic if user had no local groups and/or policies set - [GH-2367] - * command/server: Fix parsing of redirect address when port is not mentioned - [GH-2354] - * physical/postgresql: Fix listing returning incorrect results if there were - multiple levels of children [GH-2393] - -## 0.6.5 (February 7th, 2017) - -FEATURES: - - * **Okta Authentication**: A new Okta authentication backend allows you to use - Okta usernames and passwords to authenticate to Vault. If provided with an - appropriate Okta API token, group membership can be queried to assign - policies; users and groups can be defined locally as well. - * **RADIUS Authentication**: A new RADIUS authentication backend allows using - a RADIUS server to authenticate to Vault. Policies can be configured for - specific users or for any authenticated user. - * **Exportable Transit Keys**: Keys in `transit` can now be marked as - `exportable` at creation time. This allows a properly ACL'd user to retrieve - the associated signing key, encryption key, or HMAC key. The `exportable` - value is returned on a key policy read and cannot be changed, so if a key is - marked `exportable` it will always be exportable, and if it is not it will - never be exportable. - * **Batch Transit Operations**: `encrypt`, `decrypt` and `rewrap` operations - in the transit backend now support processing multiple input items in one - call, returning the output of each item in the response. - * **Configurable Audited HTTP Headers**: You can now specify headers that you - want to have included in each audit entry, along with whether each header - should be HMAC'd or kept plaintext. This can be useful for adding additional - client or network metadata to the audit logs. - * **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit - backend, allowing creation, viewing and editing of named keys as well as using - those keys to perform supported transit operations directly in the UI. - * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent - through TCP, UDP, or UNIX Sockets. - -IMPROVEMENTS: - - * auth/aws-ec2: Add support for cross-account auth using STS [GH-2148] - * auth/aws-ec2: Support issuing periodic tokens [GH-2324] - * auth/github: Support listing teams and users [GH-2261] - * auth/ldap: Support adding policies to local users directly, in addition to - local groups [GH-2152] - * command/server: Add ability to select and prefer server cipher suites - [GH-2293] - * core: Add a nonce to unseal operations as a check (useful mostly for - support, not as a security principle) [GH-2276] - * duo: Added ability to supply extra context to Duo pushes [GH-2118] - * physical/consul: Add option for setting consistency mode on Consul gets - [GH-2282] - * physical/etcd: Full v3 API support; code will autodetect which API version - to use. The v3 code path is significantly less complicated and may be much - more stable. [GH-2168] - * secret/pki: Allow specifying OU entries in generated certificate subjects - [GH-2251] - * secret mount ui (Enterprise): the secret mount list now shows all mounted - backends even if the UI cannot browse them. Additional backends can now be - mounted from the UI as well. - -BUG FIXES: - - * auth/token: Fix regression in 0.6.4 where using token store roles as a - blacklist (with only `disallowed_policies` set) would not work in most - circumstances [GH-2286] - * physical/s3: Page responses in client so list doesn't truncate [GH-2224] - * secret/cassandra: Stop a connection leak that could occur on active node - failover [GH-2313] - * secret/pki: When using `sign-verbatim`, don't require a role and use the - CSR's common name [GH-2243] - -## 0.6.4 (December 16, 2016) - -SECURITY: - -Further details about these security issues can be found in the 0.6.4 upgrade -guide. - - * `default` Policy Privilege Escalation: If a parent token did not have the - `default` policy attached to its token, it could still create children with - the `default` policy. This is no longer allowed (unless the parent has - `sudo` capability for the creation path). In most cases this is low severity - since the access grants in the `default` policy are meant to be access - grants that are acceptable for all tokens to have. - * Leases Not Expired When Limited Use Token Runs Out of Uses: When using - limited-use tokens to create leased secrets, if the limited-use token was - revoked due to running out of uses (rather than due to TTL expiration or - explicit revocation) it would fail to revoke the leased secrets. These - secrets would still be revoked when their TTL expired, limiting the severity - of this issue. An endpoint has been added (`auth/token/tidy`) that can - perform housekeeping tasks on the token store; one of its tasks can detect - this situation and revoke the associated leases. - -FEATURES: - - * **Policy UI (Enterprise)**: Vault Enterprise UI now supports viewing, - creating, and editing policies. - -IMPROVEMENTS: - - * http: Vault now sets a `no-store` cache control header to make it more - secure in setups that are not end-to-end encrypted [GH-2183] - -BUG FIXES: - - * auth/ldap: Don't panic if dialing returns an error and starttls is enabled; - instead, return the error [GH-2188] - * ui (Enterprise): Submitting an unseal key now properly resets the - form so a browser refresh isn't required to continue. - -## 0.6.3 (December 6, 2016) - -DEPRECATIONS/CHANGES: - - * Request size limitation: A maximum request size of 32MB is imposed to - prevent a denial of service attack with arbitrarily large requests [GH-2108] - * LDAP denies passwordless binds by default: In new LDAP mounts, or when - existing LDAP mounts are rewritten, passwordless binds will be denied by - default. The new `deny_null_bind` parameter can be set to `false` to allow - these. [GH-2103] - * Any audit backend activated satisfies conditions: Previously, when a new - Vault node was taking over service in an HA cluster, all audit backends were - required to be loaded successfully to take over active duty. This behavior - now matches the behavior of the audit logging system itself: at least one - audit backend must successfully be loaded. The server log contains an error - when this occurs. This helps keep a Vault HA cluster working when there is a - misconfiguration on a standby node. [GH-2083] - -FEATURES: - - * **Web UI (Enterprise)**: Vault Enterprise now contains a built-in web UI - that offers access to a number of features, including init/unsealing/sealing, - authentication via userpass or LDAP, and K/V reading/writing. The capability - set of the UI will be expanding rapidly in further releases. To enable it, - set `ui = true` in the top level of Vault's configuration file and point a - web browser at your Vault address. - * **Google Cloud Storage Physical Backend**: You can now use GCS for storing - Vault data [GH-2099] - -IMPROVEMENTS: - - * auth/github: Policies can now be assigned to users as well as to teams - [GH-2079] - * cli: Set the number of retries on 500 down to 0 by default (no retrying). It - can be very confusing to users when there is a pause while the retries - happen if they haven't explicitly set it. With request forwarding the need - for this is lessened anyways. [GH-2093] - * core: Response wrapping is now allowed to be specified by backend responses - (requires backends gaining support) [GH-2088] - * physical/consul: When announcing service, use the scheme of the Vault server - rather than the Consul client [GH-2146] - * secret/consul: Added listing functionality to roles [GH-2065] - * secret/postgresql: Added `revocation_sql` parameter on the role endpoint to - enable customization of user revocation SQL statements [GH-2033] - * secret/transit: Add listing of keys [GH-1987] - -BUG FIXES: - - * api/unwrap, command/unwrap: Increase compatibility of `unwrap` command with - Vault 0.6.1 and older [GH-2014] - * api/unwrap, command/unwrap: Fix error when no client token exists [GH-2077] - * auth/approle: Creating the index for the role_id properly [GH-2004] - * auth/aws-ec2: Handle the case of multiple upgrade attempts when setting the - instance-profile ARN [GH-2035] - * auth/ldap: Avoid leaking connections on login [GH-2130] - * command/path-help: Use the actual error generated by Vault rather than - always using 500 when there is a path help error [GH-2153] - * command/ssh: Use temporary file for identity and ensure its deletion before - the command returns [GH-2016] - * cli: Fix error printing values with `-field` if the values contained - formatting directives [GH-2109] - * command/server: Don't say mlock is supported on OSX when it isn't. [GH-2120] - * core: Fix bug where a failure to come up as active node (e.g. if an audit - backend failed) could lead to deadlock [GH-2083] - * physical/mysql: Fix potential crash during setup due to a query failure - [GH-2105] - * secret/consul: Fix panic on user error [GH-2145] - -## 0.6.2 (October 5, 2016) - -DEPRECATIONS/CHANGES: - - * Convergent Encryption v2: New keys in `transit` using convergent mode will - use a new nonce derivation mechanism rather than require the user to supply - a nonce. While not explicitly increasing security, it minimizes the - likelihood that a user will use the mode improperly and impact the security - of their keys. Keys in convergent mode that were created in v0.6.1 will - continue to work with the same mechanism (user-supplied nonce). - * `etcd` HA off by default: Following in the footsteps of `dynamodb`, the - `etcd` storage backend now requires that `ha_enabled` be explicitly - specified in the configuration file. The backend currently has known broken - HA behavior, so this flag discourages use by default without explicitly - enabling it. If you are using this functionality, when upgrading, you should - set `ha_enabled` to `"true"` *before* starting the new versions of Vault. - * Default/Max lease/token TTLs are now 32 days: In previous versions of Vault - the default was 30 days, but moving it to 32 days allows some operations - (e.g. reauthenticating, renewing, etc.) to be performed via a monthly cron - job. - * AppRole Secret ID endpoints changed: Secret ID and Secret ID accessors are - no longer part of request URLs. The GET and DELETE operations are now moved - to new endpoints (`/lookup` and `/destroy`) which consumes the input from - the body and not the URL. - * AppRole requires at least one constraint: previously it was sufficient to - turn off all AppRole authentication constraints (secret ID, CIDR block) and - use the role ID only. It is now required that at least one additional - constraint is enabled. Existing roles are unaffected, but any new roles or - updated roles will require this. - * Reading wrapped responses from `cubbyhole/response` is deprecated. The - `sys/wrapping/unwrap` endpoint should be used instead as it provides - additional security, auditing, and other benefits. The ability to read - directly will be removed in a future release. - * Request Forwarding is now on by default: in 0.6.1 this required toggling on, - but is now enabled by default. This can be disabled via the - `"disable_clustering"` parameter in Vault's - [config](https://www.vaultproject.io/docs/config/index.html), or per-request - with the `X-Vault-No-Request-Forwarding` header. - * In prior versions a bug caused the `bound_iam_role_arn` value in the - `aws-ec2` authentication backend to actually use the instance profile ARN. - This has been corrected, but as a result there is a behavior change. To - match using the instance profile ARN, a new parameter - `bound_iam_instance_profile_arn` has been added. Existing roles will - automatically transfer the value over to the correct parameter, but the next - time the role is updated, the new meanings will take effect. - -FEATURES: - - * **Secret ID CIDR Restrictions in `AppRole`**: Secret IDs generated under an - approle can now specify a list of CIDR blocks from where the requests to - generate secret IDs should originate from. If an approle already has CIDR - restrictions specified, the CIDR restrictions on the secret ID should be a - subset of those specified on the role [GH-1910] - * **Initial Root Token PGP Encryption**: Similar to `generate-root`, the root - token created at initialization time can now be PGP encrypted [GH-1883] - * **Support Chained Intermediate CAs in `pki`**: The `pki` backend now allows, - when a CA cert is being supplied as a signed root or intermediate, a trust - chain of arbitrary length. The chain is returned as a parameter at - certificate issue/sign time and is retrievable independently as well. - [GH-1694] - * **Response Wrapping Enhancements**: There are new endpoints to look up - response wrapped token parameters; wrap arbitrary values; rotate wrapping - tokens; and unwrap with enhanced validation. In addition, list operations - can now be response-wrapped. [GH-1927] - * **Transit Features**: The `transit` backend now supports generating random - bytes and SHA sums; HMACs; and signing and verification functionality using - EC keys (P-256 curve) - -IMPROVEMENTS: - - * api: Return error when an invalid (as opposed to incorrect) unseal key is - submitted, rather than ignoring it [GH-1782] - * api: Add method to call `auth/token/create-orphan` endpoint [GH-1834] - * api: Rekey operation now redirects from standbys to master [GH-1862] - * audit/file: Sending a `SIGHUP` to Vault now causes Vault to close and - re-open the log file, making it easier to rotate audit logs [GH-1953] - * auth/aws-ec2: EC2 instances can get authenticated by presenting the identity - document and its SHA256 RSA digest [GH-1961] - * auth/aws-ec2: IAM bound parameters on the aws-ec2 backend will perform a - prefix match instead of exact match [GH-1943] - * auth/aws-ec2: Added a new constraint `bound_iam_instance_profile_arn` to - refer to IAM instance profile ARN and fixed the earlier `bound_iam_role_arn` - to refer to IAM role ARN instead of the instance profile ARN [GH-1913] - * auth/aws-ec2: Backend generates the nonce by default and clients can - explicitly disable reauthentication by setting empty nonce [GH-1889] - * auth/token: Added warnings if tokens and accessors are used in URLs [GH-1806] - * command/format: The `format` flag on select CLI commands takes `yml` as an - alias for `yaml` [GH-1899] - * core: Allow the size of the read cache to be set via the config file, and - change the default value to 1MB (from 32KB) [GH-1784] - * core: Allow single and two-character path parameters for most places - [GH-1811] - * core: Allow list operations to be response-wrapped [GH-1814] - * core: Provide better protection against timing attacks in Shamir code - [GH-1877] - * core: Unmounting/disabling backends no longer returns an error if the mount - didn't exist. This is line with elsewhere in Vault's API where `DELETE` is - an idempotent operation. [GH-1903] - * credential/approle: At least one constraint is required to be enabled while - creating and updating a role [GH-1882] - * secret/cassandra: Added consistency level for use with roles [GH-1931] - * secret/mysql: SQL for revoking user can be configured on the role [GH-1914] - * secret/transit: Use HKDF (RFC 5869) as the key derivation function for new - keys [GH-1812] - * secret/transit: Empty plaintext values are now allowed [GH-1874] - -BUG FIXES: - - * audit: Fix panic being caused by some values logging as underlying Go types - instead of formatted strings [GH-1912] - * auth/approle: Fixed panic on deleting approle that doesn't exist [GH-1920] - * auth/approle: Not letting secret IDs and secret ID accessors to get logged - in plaintext in audit logs [GH-1947] - * auth/aws-ec2: Allow authentication if the underlying host is in a bad state - but the instance is running [GH-1884] - * auth/token: Fixed metadata getting missed out from token lookup response by - gracefully handling token entry upgrade [GH-1924] - * cli: Don't error on newline in token file [GH-1774] - * core: Pass back content-type header for forwarded requests [GH-1791] - * core: Fix panic if the same key was given twice to `generate-root` [GH-1827] - * core: Fix potential deadlock on unmount/remount [GH-1793] - * physical/file: Remove empty directories from the `file` storage backend [GH-1821] - * physical/zookeeper: Remove empty directories from the `zookeeper` storage - backend and add a fix to the `file` storage backend's logic [GH-1964] - * secret/aws: Added update operation to `aws/sts` path to consider `ttl` - parameter [39b75c6] - * secret/aws: Mark STS secrets as non-renewable [GH-1804] - * secret/cassandra: Properly store session for re-use [GH-1802] - * secret/ssh: Fix panic when revoking SSH dynamic keys [GH-1781] - -## 0.6.1 (August 22, 2016) - -DEPRECATIONS/CHANGES: - - * Once the active node is 0.6.1, standby nodes must also be 0.6.1 in order to - connect to the HA cluster. We recommend following our [general upgrade - instructions](https://www.vaultproject.io/docs/install/upgrade.html) in - addition to 0.6.1-specific upgrade instructions to ensure that this is not - an issue. - * Status codes for sealed/uninitialized Vaults have changed to `503`/`501` - respectively. See the [version-specific upgrade - guide](https://www.vaultproject.io/docs/install/upgrade-to-0.6.1.html) for - more details. - * Root tokens (tokens with the `root` policy) can no longer be created except - by another root token or the `generate-root` endpoint. - * Issued certificates from the `pki` backend against new roles created or - modified after upgrading will contain a set of default key usages. - * The `dynamodb` physical data store no longer supports HA by default. It has - some non-ideal behavior around failover that was causing confusion. See the - [documentation](https://www.vaultproject.io/docs/config/index.html#ha_enabled) - for information on enabling HA mode. It is very important that this - configuration is added _before upgrading_. - * The `ldap` backend no longer searches for `memberOf` groups as part of its - normal flow. Instead, the desired group filter must be specified. This fixes - some errors and increases speed for directories with different structures, - but if this behavior has been relied upon, ensure that you see the upgrade - notes _before upgrading_. - * `app-id` is now deprecated with the addition of the new AppRole backend. - There are no plans to remove it, but we encourage using AppRole whenever - possible, as it offers enhanced functionality and can accommodate many more - types of authentication paradigms. - -FEATURES: - - * **AppRole Authentication Backend**: The `approle` backend is a - machine-oriented authentication backend that provides a similar concept to - App-ID while adding many missing features, including a pull model that - allows for the backend to generate authentication credentials rather than - requiring operators or other systems to push credentials in. It should be - useful in many more situations than App-ID. The inclusion of this backend - deprecates App-ID. [GH-1426] - * **Request Forwarding**: Vault servers can now forward requests to each other - rather than redirecting clients. This feature is off by default in 0.6.1 but - will be on by default in the next release. See the [HA concepts - page](https://www.vaultproject.io/docs/concepts/ha.html) for information on - enabling and configuring it. [GH-443] - * **Convergent Encryption in `Transit`**: The `transit` backend now supports a - convergent encryption mode where the same plaintext will produce the same - ciphertext. Although very useful in some situations, this has potential - security implications, which are mostly mitigated by requiring the use of - key derivation when convergent encryption is enabled. See [the `transit` - backend - documentation](https://www.vaultproject.io/docs/secrets/transit/index.html) - for more details. [GH-1537] - * **Improved LDAP Group Filters**: The `ldap` auth backend now uses templates - to define group filters, providing the capability to support some - directories that could not easily be supported before (especially specific - Active Directory setups with nested groups). [GH-1388] - * **Key Usage Control in `PKI`**: Issued certificates from roles created or - modified after upgrading contain a set of default key usages for increased - compatibility with OpenVPN and some other software. This set can be changed - when writing a role definition. Existing roles are unaffected. [GH-1552] - * **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx` - error code will now retry after a backoff. The maximum total number of - retries (including disabling this functionality) can be set with an - environment variable. See the [environment variable - documentation](https://www.vaultproject.io/docs/commands/environment.html) - for more details. [GH-1594] - * **Service Discovery in `vault init`**: The new `-auto` option on `vault init` - will perform service discovery using Consul. When only one node is discovered, - it will be initialized and when more than one node is discovered, they will - be output for easy selection. See `vault init --help` for more details. [GH-1642] - * **MongoDB Secret Backend**: Generate dynamic unique MongoDB database - credentials based on configured roles. Sponsored by - [CommerceHub](http://www.commercehub.com/). [GH-1414] - * **Circonus Metrics Integration**: Vault can now send metrics to - [Circonus](http://www.circonus.com/). See the [configuration - documentation](https://www.vaultproject.io/docs/config/index.html) for - details. [GH-1646] - -IMPROVEMENTS: - - * audit: Added a unique identifier to each request which will also be found in - the request portion of the response. [GH-1650] - * auth/aws-ec2: Added a new constraint `bound_account_id` to the role - [GH-1523] - * auth/aws-ec2: Added a new constraint `bound_iam_role_arn` to the role - [GH-1522] - * auth/aws-ec2: Added `ttl` field for the role [GH-1703] - * auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config` - have the minimum TLS version set to 1.2 by default. This is configurable. - * auth/token: Added endpoint to list accessors [GH-1676] - * auth/token: Added `disallowed_policies` option to token store roles [GH-1681] - * auth/token: `root` or `sudo` tokens can now create periodic tokens via - `auth/token/create`; additionally, the same token can now be periodic and - have an explicit max TTL [GH-1725] - * build: Add support for building on Solaris/Illumos [GH-1726] - * cli: Output formatting in the presence of warnings in the response object - [GH-1533] - * cli: `vault auth` command supports a `-path` option to take in the path at - which the auth backend is enabled, thereby allowing authenticating against - different paths using the command options [GH-1532] - * cli: `vault auth -methods` will now display the config settings of the mount - [GH-1531] - * cli: `vault read/write/unwrap -field` now allows selecting token response - fields [GH-1567] - * cli: `vault write -field` now allows selecting wrapped response fields - [GH-1567] - * command/status: Version information and cluster details added to the output - of `vault status` command [GH-1671] - * core: Response wrapping is now enabled for login endpoints [GH-1588] - * core: The duration of leadership is now exported via events through - telemetry [GH-1625] - * core: `sys/capabilities-self` is now accessible as part of the `default` - policy [GH-1695] - * core: `sys/renew` is now accessible as part of the `default` policy [GH-1701] - * core: Unseal keys will now be returned in both hex and base64 forms, and - either can be used [GH-1734] - * core: Responses from most `/sys` endpoints now return normal `api.Secret` - structs in addition to the values they carried before. This means that - response wrapping can now be used with most authenticated `/sys` operations - [GH-1699] - * physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576] - * physical/consul: Allowing additional tags to be added to Consul service - registration via `service_tags` option [GH-1643] - * secret/aws: Listing of roles is supported now [GH-1546] - * secret/cassandra: Add `connect_timeout` value for Cassandra connection - configuration [GH-1581] - * secret/mssql,mysql,postgresql: Reading of connection settings is supported - in all the sql backends [GH-1515] - * secret/mysql: Added optional maximum idle connections value to MySQL - connection configuration [GH-1635] - * secret/mysql: Use a combination of the role name and token display name in - generated user names and allow the length to be controlled [GH-1604] - * secret/{cassandra,mssql,mysql,postgresql}: SQL statements can now be passed - in via one of four ways: a semicolon-delimited string, a base64-delimited - string, a serialized JSON string array, or a base64-encoded serialized JSON - string array [GH-1686] - * secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning - role name as part of response of `verify` API - * secret/ssh: Added passthrough of command line arguments to `ssh` [GH-1680] - * sys/health: Added version information to the response of health status - endpoint [GH-1647] - * sys/health: Cluster information isbe returned as part of health status when - Vault is unsealed [GH-1671] - * sys/mounts: MountTable data is compressed before serializing to accommodate - thousands of mounts [GH-1693] - * website: The [token - concepts](https://www.vaultproject.io/docs/concepts/tokens.html) page has - been completely rewritten [GH-1725] - -BUG FIXES: - - * auth/aws-ec2: Added a nil check for stored whitelist identity object - during renewal [GH-1542] - * auth/cert: Fix panic if no client certificate is supplied [GH-1637] - * auth/token: Don't report that a non-expiring root token is renewable, as - attempting to renew it results in an error [GH-1692] - * cli: Don't retry a command when a redirection is received [GH-1724] - * core: Fix regression causing status codes to be `400` in most non-5xx error - cases [GH-1553] - * core: Fix panic that could occur during a leadership transition [GH-1627] - * physical/postgres: Remove use of prepared statements as this causes - connection multiplexing software to break [GH-1548] - * physical/consul: Multiple Vault nodes on the same machine leading to check ID - collisions were resulting in incorrect health check responses [GH-1628] - * physical/consul: Fix deregistration of health checks on exit [GH-1678] - * secret/postgresql: Check for existence of role before attempting deletion - [GH-1575] - * secret/postgresql: Handle revoking roles that have privileges on sequences - [GH-1573] - * secret/postgresql(,mysql,mssql): Fix incorrect use of database over - transaction object which could lead to connection exhaustion [GH-1572] - * secret/pki: Fix parsing CA bundle containing trailing whitespace [GH-1634] - * secret/pki: Fix adding email addresses as SANs [GH-1688] - * secret/pki: Ensure that CRL values are always UTC, per RFC [GH-1727] - * sys/seal-status: Fixed nil Cluster object while checking seal status [GH-1715] - -## 0.6.0 (June 14th, 2016) - -SECURITY: - - * Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via - lease IDs, which incorporate path information) and - `auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using - the tokens' paths and, since 0.5.2, role information), in implementation - they both behaved exactly the same way since a single component in Vault is - responsible for managing lifetimes of both, and the type of the tracked - lifetime was not being checked. The end result was that either endpoint - could revoke both secret leases and tokens. We consider this a very minor - security issue as there are a number of mitigating factors: both endpoints - require `sudo` capability in addition to write capability, preventing - blanket ACL path globs from providing access; both work by using the prefix - to revoke as a part of the endpoint path, allowing them to be properly - ACL'd; and both are intended for emergency scenarios and users should - already not generally have access to either one. In order to prevent - confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and - `sys/revoke-prefix` will be meant for both leases and tokens instead. - -DEPRECATIONS/CHANGES: - - * `auth/token/revoke-prefix` has been removed. See the security notice for - details. [GH-1280] - * Vault will now automatically register itself as the `vault` service when - using the `consul` backend and will perform its own health checks. See - the Consul backend documentation for information on how to disable - auto-registration and service checks. - * List operations that do not find any keys now return a `404` status code - rather than an empty response object [GH-1365] - * CA certificates issued from the `pki` backend no longer have associated - leases, and any CA certs already issued will ignore revocation requests from - the lease manager. This is to prevent CA certificates from being revoked - when the token used to issue the certificate expires; it was not be obvious - to users that they need to ensure that the token lifetime needed to be at - least as long as a potentially very long-lived CA cert. - -FEATURES: - - * **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS - EC2 instances allowing automated retrieval of Vault tokens. Unlike most - Vault authentication backends, this backend does not require first deploying - or provisioning security-sensitive credentials (tokens, username/password, - client certificates, etc). Instead, it treats AWS as a Trusted Third Party - and uses the cryptographically signed dynamic metadata information that - uniquely represents each EC2 instance. [Vault - Enterprise](https://www.hashicorp.com/vault.html) customers have access to a - turnkey client that speaks the backend API and makes access to a Vault token - easy. - * **Response Wrapping**: Nearly any response within Vault can now be wrapped - inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole - Authentication - Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html) - mechanism to its logical conclusion. Retrieving the original response is as - simple as a single API command or the new `vault unwrap` command. This makes - secret distribution easier and more secure, including secure introduction. - * **Azure Physical Backend**: You can now use Azure blob object storage as - your Vault physical data store [GH-1266] - * **Swift Physical Backend**: You can now use Swift blob object storage as - your Vault physical data store [GH-1425] - * **Consul Backend Health Checks**: The Consul backend will automatically - register a `vault` service and perform its own health checking. By default - the active node can be found at `active.vault.service.consul` and all with - standby nodes are `standby.vault.service.consul`. Sealed vaults are marked - critical and are not listed by default in Consul's service discovery. See - the documentation for details. [GH-1349] - * **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on - tokens that do not honor changes in the system- or mount-set values. This is - useful, for instance, when the max TTL of the system or the `auth/token` - mount must be set high to accommodate certain needs but you want more - granular restrictions on tokens being issued directly from the Token - authentication backend at `auth/token`. [GH-1399] - * **Non-Renewable Tokens**: When creating tokens directly through the token - authentication backend, you can now specify in both token store roles and - the API whether or not a token should be renewable, defaulting to `true`. - * **RabbitMQ Secret Backend**: Vault can now generate credentials for - RabbitMQ. Vhosts and tags can be defined within roles. [GH-788] - -IMPROVEMENTS: - - * audit: Add the DisplayName value to the copy of the Request object embedded - in the associated Response, to match the original Request object [GH-1387] - * audit: Enable auditing of the `seal` and `step-down` commands [GH-1435] - * backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms. - A particular exception are any current MFA paths. A few paths in `token` and - `sys` also require `root` or `sudo`. [GH-1478] - * command/auth: Restore the previous authenticated token if the `auth` command - fails to authenticate the provided token [GH-1233] - * command/write: `-format` and `-field` can now be used with the `write` - command [GH-1228] - * core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297] - * core: Don't keep lease timers around when tokens are revoked [GH-1277] - * core: If using the `disable_cache` option, caches for the policy store and - the `transit` backend are now disabled as well [GH-1346] - * credential/cert: Renewal requests are rejected if the set of policies has - changed since the token was issued [GH-477] - * credential/cert: Check CRLs for specific non-CA certs configured in the - backend [GH-1404] - * credential/ldap: If `groupdn` is not configured, skip searching LDAP and - only return policies for local groups, plus a warning [GH-1283] - * credential/ldap: `vault list` support for users and groups [GH-1270] - * credential/ldap: Support for the `memberOf` attribute for group membership - searching [GH-1245] - * credential/userpass: Add list support for users [GH-911] - * credential/userpass: Remove user configuration paths from requiring sudo, in - favor of normal ACL mechanisms [GH-1312] - * credential/token: Sanitize policies and add `default` policies in appropriate - places [GH-1235] - * credential/token: Setting the renewable status of a token is now possible - via `vault token-create` and the API. The default is true, but tokens can be - specified as non-renewable. [GH-1499] - * secret/aws: Use chain credentials to allow environment/EC2 instance/shared - providers [GH-307] - * secret/aws: Support for STS AssumeRole functionality [GH-1318] - * secret/consul: Reading consul access configuration supported. The response - will contain non-sensitive information only [GH-1445] - * secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to - DNS or Email Subject Alternate Names [GH-1220] - * secret/pki: Added list support for certificates [GH-1466] - * sys/capabilities: Enforce ACL checks for requests that query the capabilities - of a token on a given path [GH-1221] - * sys/health: Status information can now be retrieved with `HEAD` [GH-1509] - -BUG FIXES: - - * command/read: Fix panic when using `-field` with a non-string value [GH-1308] - * command/token-lookup: Fix TTL showing as 0 depending on how a token was - created. This only affected the value shown at lookup, not the token - behavior itself. [GH-1306] - * command/various: Tell the JSON decoder to not convert all numbers to floats; - fixes some various places where numbers were showing up in scientific - notation - * command/server: Prioritized `devRootTokenID` and `devListenAddress` flags - over their respective env vars [GH-1480] - * command/ssh: Provided option to disable host key checking. The automated - variant of `vault ssh` command uses `sshpass` which was failing to handle - host key checking presented by the `ssh` binary. [GH-1473] - * core: Properly persist mount-tuned TTLs for auth backends [GH-1371] - * core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372] - * credential/github: Make organization comparison case-insensitive during - login [GH-1359] - * credential/github: Fix panic when renewing a token created with some earlier - versions of Vault [GH-1510] - * credential/github: The token used to log in via `vault auth` can now be - specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511] - * credential/ldap: Fix problem where certain error conditions when configuring - or opening LDAP connections would cause a panic instead of return a useful - error message [GH-1262] - * credential/token: Fall back to normal parent-token semantics if - `allowed_policies` is empty for a role. Using `allowed_policies` of - `default` resulted in the same behavior anyways. [GH-1276] - * credential/token: Fix issues renewing tokens when using the "suffix" - capability of token roles [GH-1331] - * credential/token: Fix lookup via POST showing the request token instead of - the desired token [GH-1354] - * credential/various: Fix renewal conditions when `default` policy is not - contained in the backend config [GH-1256] - * physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353] - * secret/consul: Use non-pooled Consul API client to avoid leaving files open - [GH-1428] - * secret/pki: Don't check whether a certificate is destined to be a CA - certificate if sign-verbatim endpoint is used [GH-1250] - -## 0.5.3 (May 27th, 2016) - -SECURITY: - - * Consul ACL Token Revocation: An issue was reported to us indicating that - generated Consul ACL tokens were not being properly revoked. Upon - investigation, we found that this behavior was reproducible in a specific - scenario: when a generated lease for a Consul ACL token had been renewed - prior to revocation. In this case, the generated token was not being - properly persisted internally through the renewal function, leading to an - error during revocation due to the missing token. Unfortunately, this was - coded as a user error rather than an internal error, and the revocation - logic was expecting internal errors if revocation failed. As a result, the - revocation logic believed the revocation to have succeeded when it in fact - failed, causing the lease to be dropped while the token was still valid - within Consul. In this release, the Consul backend properly persists the - token through renewals, and the revocation logic has been changed to - consider any error type to have been a failure to revoke, causing the lease - to persist and attempt to be revoked later. - -We have written an example shell script that searches through Consul's ACL -tokens and looks for those generated by Vault, which can be used as a template -for a revocation script as deemed necessary for any particular security -response. The script is available at -https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0 - -Please note that any outstanding leases for Consul tokens produced prior to -0.5.3 that have been renewed will continue to exhibit this behavior. As a -result, we recommend either revoking all tokens produced by the backend and -issuing new ones, or if needed, a more advanced variant of the provided example -could use the timestamp embedded in each generated token's name to decide which -tokens are too old and should be deleted. This could then be run periodically -up until the maximum lease time for any outstanding pre-0.5.3 tokens has -expired. - -This is a security-only release. There are no other code changes since 0.5.2. -The binaries have one additional change: they are built against Go 1.6.1 rather -than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming -language itself. - -## 0.5.2 (March 16th, 2016) - -FEATURES: - - * **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based - on configured roles [GH-998] - * **Token Accessors**: Vault now provides an accessor with each issued token. - This accessor is an identifier that can be used for a limited set of - actions, notably for token revocation. This value can be logged in - plaintext to audit logs, and in combination with the plaintext metadata - logged to audit logs, provides a searchable and straightforward way to - revoke particular users' or services' tokens in many cases. To enable - plaintext audit logging of these accessors, set `hmac_accessor=false` when - enabling an audit backend. - * **Token Credential Backend Roles**: Roles can now be created in the `token` - credential backend that allow modifying token behavior in ways that are not - otherwise exposed or easily delegated. This allows creating tokens with a - fixed set (or subset) of policies (rather than a subset of the calling - token's), periodic tokens with a fixed TTL but no expiration, specified - prefixes, and orphans. - * **Listener Certificate Reloading**: Vault's configured listeners now reload - their TLS certificate and private key when the Vault process receives a - SIGHUP. - -IMPROVEMENTS: - - * auth/token: Endpoints optionally accept tokens from the HTTP body rather - than just from the URLs [GH-1211] - * auth/token,sys/capabilities: Added new endpoints - `auth/token/lookup-accessor`, `auth/token/revoke-accessor` and - `sys/capabilities-accessor`, which enables performing the respective actions - with just the accessor of the tokens, without having access to the actual - token [GH-1188] - * core: Ignore leading `/` in policy paths [GH-1170] - * core: Ignore leading `/` in mount paths [GH-1172] - * command/policy-write: Provided HCL is now validated for format violations - and provides helpful information around where the violation occurred - [GH-1200] - * command/server: The initial root token ID when running in `-dev` mode can - now be specified via `-dev-root-token-id` or the environment variable - `VAULT_DEV_ROOT_TOKEN_ID` [GH-1162] - * command/server: The listen address when running in `-dev` mode can now be - specified via `-dev-listen-address` or the environment variable - `VAULT_DEV_LISTEN_ADDRESS` [GH-1169] - * command/server: The configured listeners now reload their TLS - certificates/keys when Vault is SIGHUP'd [GH-1196] - * command/step-down: New `vault step-down` command and API endpoint to force - the targeted node to give up active status, but without sealing. The node - will wait ten seconds before attempting to grab the lock again. [GH-1146] - * command/token-renew: Allow no token to be passed in; use `renew-self` in - this case. Change the behavior for any token being passed in to use `renew`. - [GH-1150] - * credential/app-id: Allow `app-id` parameter to be given in the login path; - this causes the `app-id` to be part of the token path, making it easier to - use with `revoke-prefix` [GH-424] - * credential/cert: Non-CA certificates can be used for authentication. They - must be matched exactly (issuer and serial number) for authentication, and - the certificate must carry the client authentication or 'any' extended usage - attributes. [GH-1153] - * credential/cert: Subject and Authority key IDs are output in metadata; this - allows more flexible searching/revocation in the audit logs [GH-1183] - * credential/cert: Support listing configured certs [GH-1212] - * credential/userpass: Add support for `create`/`update` capability - distinction in user path, and add user-specific endpoints to allow changing - the password and policies [GH-1216] - * credential/token: Add roles [GH-1155] - * secret/mssql: Add MSSQL backend [GH-998] - * secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL` - endpoint [GH-1180] - * secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some - other formats [GH-1187] - * secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint. - [GH-1154] - * sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to - fetch the capabilities of a token on a given path [GH-1171] - * sys: Added `sys/revoke-force`, which enables a user to ignore backend errors - when revoking a lease, necessary in some emergency/failure scenarios - [GH-1168] - * sys: The return codes from `sys/health` can now be user-specified via query - parameters [GH-1199] - -BUG FIXES: - - * logical/cassandra: Apply hyphen/underscore replacement to the entire - generated username, not just the UUID, in order to handle token display name - hyphens [GH-1140] - * physical/etcd: Output actual error when cluster sync fails [GH-1141] - * vault/expiration: Not letting the error responses from the backends to skip - during renewals [GH-1176] - -## 0.5.1 (February 25th, 2016) - -DEPRECATIONS/CHANGES: - - * RSA keys less than 2048 bits are no longer supported in the PKI backend. - 1024-bit keys are considered unsafe and are disallowed in the Internet PKI. - The `pki` backend has enforced SHA256 hashes in signatures from the - beginning, and software that can handle these hashes should be able to - handle larger key sizes. [GH-1095] - * The PKI backend now does not automatically delete expired certificates, - including from the CRL. Doing so could lead to a situation where a time - mismatch between the Vault server and clients could result in a certificate - that would not be considered expired by a client being removed from the CRL. - The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129] - * The `cert` backend now performs a variant of channel binding at renewal time - for increased security. In order to not overly burden clients, a notion of - identity is used. This functionality can be disabled. See the 0.5.1 upgrade - guide for more specific information [GH-1127] - -FEATURES: - - * **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of - the audit contract do not allow us to make the results public.) [GH-220] - -IMPROVEMENTS: - - * api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control - the SNI header during TLS connections [GH-1131] - * api/health: Add the server's time in UTC to health responses [GH-1117] - * command/rekey and command/generate-root: These now return the status at - attempt initialization time, rather than requiring a separate fetch for the - nonce [GH-1054] - * credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/` - paths; use normal ACL behavior instead [GH-468] - * credential/github: The validity of the token used for login will be checked - at renewal time [GH-1047] - * credential/github: The `config` endpoint no longer requires a root token; - normal ACL path matching applies - * deps: Use the standardized Go 1.6 vendoring system - * secret/aws: Inform users of AWS-imposed policy restrictions around STS - tokens if they attempt to use an invalid policy [GH-1113] - * secret/mysql: The MySQL backend now allows disabling verification of the - `connection_url` [GH-1096] - * secret/pki: Submitted CSRs are now verified to have the correct key type and - minimum number of bits according to the role. The exception is intermediate - CA signing and the `sign-verbatim` path [GH-1104] - * secret/pki: New `tidy` endpoint to allow expunging expired certificates. - [GH-1129] - * secret/postgresql: The PostgreSQL backend now allows disabling verification - of the `connection_url` [GH-1096] - * secret/ssh: When verifying an OTP, return 400 if it is not valid instead of - 204 [GH-1086] - * credential/app-id: App ID backend will check the validity of app-id and user-id - during renewal time [GH-1039] - * credential/cert: TLS Certificates backend, during renewal, will now match the - client identity with the client identity used during login [GH-1127] - -BUG FIXES: - - * credential/ldap: Properly escape values being provided to search filters - [GH-1100] - * secret/aws: Capping on length of usernames for both IAM and STS types - [GH-1102] - * secret/pki: If a cert is not found during lookup of a serial number, - respond with a 400 rather than a 500 [GH-1085] - * secret/postgresql: Add extra revocation statements to better handle more - permission scenarios [GH-1053] - * secret/postgresql: Make connection_url work properly [GH-1112] - -## 0.5.0 (February 10, 2016) - -SECURITY: - - * Previous versions of Vault could allow a malicious user to hijack the rekey - operation by canceling an operation in progress and starting a new one. The - practical application of this is very small. If the user was an unseal key - owner, they could attempt to do this in order to either receive unencrypted - reseal keys or to replace the PGP keys used for encryption with ones under - their control. However, since this would invalidate any rekey progress, they - would need other unseal key holders to resubmit, which would be rather - suspicious during this manual operation if they were not also the original - initiator of the rekey attempt. If the user was not an unseal key holder, - there is no benefit to be gained; the only outcome that could be attempted - would be a denial of service against a legitimate rekey operation by sending - cancel requests over and over. Thanks to Josh Snyder for the report! - -DEPRECATIONS/CHANGES: - - * `s3` physical backend: Environment variables are now preferred over - configuration values. This makes it behave similar to the rest of Vault, - which, in increasing order of preference, uses values from the configuration - file, environment variables, and CLI flags. [GH-871] - * `etcd` physical backend: `sync` functionality is now supported and turned on - by default. This can be disabled. [GH-921] - * `transit`: If a client attempts to encrypt a value with a key that does not - yet exist, what happens now depends on the capabilities set in the client's - ACL policies. If the client has `create` (or `create` and `update`) - capability, the key will upsert as in the past. If the client has `update` - capability, they will receive an error. [GH-1012] - * `token-renew` CLI command: If the token given for renewal is the same as the - client token, the `renew-self` endpoint will be used in the API. Given that - the `default` policy (by default) allows all clients access to the - `renew-self` endpoint, this makes it much more likely that the intended - operation will be successful. [GH-894] - * Token `lookup`: the `ttl` value in the response now reflects the actual - remaining TTL rather than the original TTL specified when the token was - created; this value is now located in `creation_ttl` [GH-986] - * Vault no longer uses grace periods on leases or token TTLs. Uncertainty - about the length grace period for any given backend could cause confusion - and uncertainty. [GH-1002] - * `rekey`: Rekey now requires a nonce to be supplied with key shares. This - nonce is generated at the start of a rekey attempt and is unique for that - attempt. - * `status`: The exit code for the `status` CLI command is now `2` for an - uninitialized Vault instead of `1`. `1` is returned for errors. This better - matches the rest of the CLI. - -FEATURES: - - * **Split Data/High Availability Physical Backends**: You can now configure - two separate physical backends: one to be used for High Availability - coordination and another to be used for encrypted data storage. See the - [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-395] - * **Fine-Grained Access Control**: Policies can now use the `capabilities` set - to specify fine-grained control over operations allowed on a path, including - separation of `sudo` privileges from other privileges. These can be mixed - and matched in any way desired. The `policy` value is kept for backwards - compatibility. See the [updated policy - documentation](https://vaultproject.io/docs/concepts/policies.html) for - details. [GH-914] - * **List Support**: Listing is now supported via the API and the new `vault - list` command. This currently supports listing keys in the `generic` and - `cubbyhole` backends and a few other places (noted in the IMPROVEMENTS - section below). Different parts of the API and backends will need to - implement list capabilities in ways that make sense to particular endpoints, - so further support will appear over time. [GH-617] - * **Root Token Generation via Unseal Keys**: You can now use the - `generate-root` CLI command to generate new orphaned, non-expiring root - tokens in case the original is lost or revoked (accidentally or - purposefully). This requires a quorum of unseal key holders. The output - value is protected via any PGP key of the initiator's choosing or a one-time - pad known only to the initiator (a suitable pad can be generated via the - `-genotp` flag to the command. [GH-915] - * **Unseal Key Archiving**: You can now optionally have Vault store your - unseal keys in your chosen physical store for disaster recovery purposes. - This option is only available when the keys are encrypted with PGP. [GH-907] - * **Keybase Support for PGP Encryption Keys**: You can now specify Keybase - users when passing in PGP keys to the `init`, `rekey`, and `generate-root` - CLI commands. Public keys for these users will be fetched automatically. - [GH-901] - * **DynamoDB HA Physical Backend**: There is now a new, community-supported - HA-enabled physical backend using Amazon DynamoDB. See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-878] - * **PostgreSQL Physical Backend**: There is now a new, community-supported - physical backend using PostgreSQL. See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-945] - * **STS Support in AWS Secret Backend**: You can now use the AWS secret - backend to fetch STS tokens rather than IAM users. [GH-927] - * **Speedups in the transit backend**: The `transit` backend has gained a - cache, and now loads only the working set of keys (e.g. from the - `min_decryption_version` to the current key version) into its working set. - This provides large speedups and potential memory savings when the `rotate` - feature of the backend is used heavily. - -IMPROVEMENTS: - - * cli: Output secrets sorted by key name [GH-830] - * cli: Support YAML as an output format [GH-832] - * cli: Show an error if the output format is incorrect, rather than falling - back to an empty table [GH-849] - * cli: Allow setting the `advertise_addr` for HA via the - `VAULT_ADVERTISE_ADDR` environment variable [GH-581] - * cli/generate-root: Add generate-root and associated functionality [GH-915] - * cli/init: Add `-check` flag that returns whether Vault is initialized - [GH-949] - * cli/server: Use internal functions for the token-helper rather than shelling - out, which fixes some problems with using a static binary in Docker or paths - with multiple spaces when launching in `-dev` mode [GH-850] - * cli/token-lookup: Add token-lookup command [GH-892] - * command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for - `-pgp-keys` [GH-940] - * conf: Use normal bool values rather than empty/non-empty for the - `tls_disable` option [GH-802] - * credential/ldap: Add support for binding, both anonymously (to discover a - user DN) and via a username and password [GH-975] - * credential/token: Add `last_renewal_time` to token lookup calls [GH-896] - * credential/token: Change `ttl` to reflect the current remaining TTL; the - original value is in `creation_ttl` [GH-1007] - * helper/certutil: Add ability to parse PKCS#8 bundles [GH-829] - * logical/aws: You can now get STS tokens instead of IAM users [GH-927] - * logical/cassandra: Add `protocol_version` parameter to set the CQL proto - version [GH-1005] - * logical/cubbyhole: Add cubbyhole access to default policy [GH-936] - * logical/mysql: Add list support for roles path [GH-984] - * logical/pki: Fix up key usages being specified for CAs [GH-989] - * logical/pki: Add list support for roles path [GH-985] - * logical/pki: Allow `pem_bundle` to be specified as the format, which - provides a concatenated PEM bundle of returned values [GH-1008] - * logical/pki: Add 30 seconds of slack to the validity start period to - accommodate some clock skew in machines [GH-1036] - * logical/postgres: Add `max_idle_connections` parameter [GH-950] - * logical/postgres: Add list support for roles path - * logical/ssh: Add list support for roles path [GH-983] - * logical/transit: Keys are archived and only keys between the latest version - and `min_decryption_version` are loaded into the working set. This can - provide a very large speed increase when rotating keys very often. [GH-977] - * logical/transit: Keys are now cached, which should provide a large speedup - in most cases [GH-979] - * physical/cache: Use 2Q cache instead of straight LRU [GH-908] - * physical/etcd: Support basic auth [GH-859] - * physical/etcd: Support sync functionality and enable by default [GH-921] - -BUG FIXES: - - * api: Correct the HTTP verb used in the LookupSelf method [GH-887] - * api: Fix the output of `Sys().MountConfig(...)` to return proper values - [GH-1017] - * command/read: Fix panic when an empty argument was given [GH-923] - * command/ssh: Fix panic when username lookup fails [GH-886] - * core: When running in standalone mode, don't advertise that we are active - until post-unseal setup completes [GH-872] - * core: Update go-cleanhttp dependency to ensure idle connections aren't - leaked [GH-867] - * core: Don't allow tokens to have duplicate policies [GH-897] - * core: Fix regression in `sys/renew` that caused information stored in the - Secret part of the response to be lost [GH-912] - * physical: Use square brackets when setting an IPv6-based advertise address - as the auto-detected advertise address [GH-883] - * physical/s3: Use an initialized client when using IAM roles to fix a - regression introduced against newer versions of the AWS Go SDK [GH-836] - * secret/pki: Fix a condition where unmounting could fail if the CA - certificate was not properly loaded [GH-946] - * secret/ssh: Fix a problem where SSH connections were not always closed - properly [GH-942] - -MISC: - - * Clarified our stance on support for community-derived physical backends. - See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - * Add `vault-java` to libraries [GH-851] - * Various minor documentation fixes and improvements [GH-839] [GH-854] - [GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958] - [GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025] - -BUILD NOTE: - - * The HashiCorp-provided binary release of Vault 0.5.0 is built against a - patched version of Go 1.5.3 containing two specific bug fixes affecting TLS - certificate handling. These fixes are in the Go 1.6 tree and were - cherry-picked on top of stock Go 1.5.3. If you want to examine the way in - which the releases were built, please look at our [cross-compilation - Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3). - -## 0.4.1 (January 13, 2016) - -SECURITY: - - * Build against Go 1.5.3 to mitigate a security vulnerability introduced in - Go 1.5. For more information, please see - https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4 - -This is a security-only release; other than the version number and building -against Go 1.5.3, there are no changes from 0.4.0. - -## 0.4.0 (December 10, 2015) - -DEPRECATIONS/CHANGES: - - * Policy Name Casing: Policy names are now normalized to lower-case on write, - helping prevent accidental case mismatches. For backwards compatibility, - policy names are not currently normalized when reading or deleting. [GH-676] - * Default etcd port number: the default connection string for the `etcd` - physical store uses port 2379 instead of port 4001, which is the port used - by the supported version 2.x of etcd. [GH-753] - * As noted below in the FEATURES section, if your Vault installation contains - a policy called `default`, new tokens created will inherit this policy - automatically. - * In the PKI backend there have been a few minor breaking changes: - * The token display name is no longer a valid option for providing a base - domain for issuance. Since this name is prepended with the name of the - authentication backend that issued it, it provided a faulty use-case at best - and a confusing experience at worst. We hope to figure out a better - per-token value in a future release. - * The `allowed_base_domain` parameter has been changed to `allowed_domains`, - which accepts a comma-separated list of domains. This allows issuing - certificates with DNS subjects across multiple domains. If you had a - configured `allowed_base_domain` parameter, it will be migrated - automatically when the role is read (either via a normal read, or via - issuing a certificate). - -FEATURES: - - * **Significantly Enhanced PKI Backend**: The `pki` backend can now generate - and sign root CA certificates and intermediate CA CSRs. It can also now sign - submitted client CSRs, as well as a significant number of other - enhancements. See the updated documentation for the full API. [GH-666] - * **CRL Checking for Certificate Authentication**: The `cert` backend now - supports pushing CRLs into the mount and using the contained serial numbers - for revocation checking. See the documentation for the `cert` backend for - more info. [GH-330] - * **Default Policy**: Vault now ensures that a policy named `default` is added - to every token. This policy cannot be deleted, but it can be modified - (including to an empty policy). There are three endpoints allowed in the - default `default` policy, related to token self-management: `lookup-self`, - which allows a token to retrieve its own information, and `revoke-self` and - `renew-self`, which are self-explanatory. If your existing Vault - installation contains a policy called `default`, it will not be overridden, - but it will be added to each new token created. You can override this - behavior when using manual token creation (i.e. not via an authentication - backend) by setting the "no_default_policy" flag to true. [GH-732] - -IMPROVEMENTS: - - * api: API client now uses a 60 second timeout instead of indefinite [GH-681] - * api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth - tokens [GH-739] - * api: Standardize environment variable reading logic inside the API; the CLI - now uses this but can still override via command-line parameters [GH-618] - * audit: HMAC-SHA256'd client tokens are now stored with each request entry. - Previously they were only displayed at creation time; this allows much - better traceability of client actions. [GH-713] - * audit: There is now a `sys/audit-hash` endpoint that can be used to generate - an HMAC-SHA256'd value from provided data using the given audit backend's - salt [GH-784] - * core: The physical storage read cache can now be disabled via - "disable_cache" [GH-674] - * core: The unsealing process can now be reset midway through (this feature - was documented before, but not enabled) [GH-695] - * core: Tokens can now renew themselves [GH-455] - * core: Base64-encoded PGP keys can be used with the CLI for `init` and - `rekey` operations [GH-653] - * core: Print version on startup [GH-765] - * core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system - instead of requiring a root token [GH-769] - * credential/token: Display whether or not a token is an orphan in the output - of a lookup call [GH-766] - * logical: Allow `.` in path-based variables in many more locations [GH-244] - * logical: Responses now contain a "warnings" key containing a list of - warnings returned from the server. These are conditions that did not require - failing an operation, but of which the client should be aware. [GH-676] - * physical/(consul,etcd): Consul and etcd now use a connection pool to limit - the number of outstanding operations, improving behavior when a lot of - operations must happen at once [GH-677] [GH-780] - * physical/consul: The `datacenter` parameter was removed; It could not be - effective unless the Vault node (or the Consul node it was connecting to) - was in the datacenter specified, in which case it wasn't needed [GH-816] - * physical/etcd: Support TLS-encrypted connections and use a connection pool - to limit the number of outstanding operations [GH-780] - * physical/s3: The S3 endpoint can now be configured, allowing using - S3-API-compatible storage solutions [GH-750] - * physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET` - environment variable [GH-758] - * secret/consul: Management tokens can now be created [GH-714] - -BUG FIXES: - - * api: API client now checks for a 301 response for redirects. Vault doesn't - generate these, but in certain conditions Go's internal HTTP handler can - generate them, leading to client errors. - * cli: `token-create` now supports the `ttl` parameter in addition to the - deprecated `lease` parameter. [GH-688] - * core: Return data from `generic` backends on the last use of a limited-use - token [GH-615] - * core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673] - * core: Stale leader entries will now be reaped [GH-679] - * core: Using `mount-tune` on the auth/token path did not take effect. - [GH-688] - * core: Fix a potential race condition when (un)sealing the vault with metrics - enabled [GH-694] - * core: Fix an error that could happen in some failure scenarios where Vault - could fail to revert to a clean state [GH-733] - * core: Ensure secondary indexes are removed when a lease is expired [GH-749] - * core: Ensure rollback manager uses an up-to-date mounts table [GH-771] - * everywhere: Don't use http.DefaultClient, as it shares state implicitly and - is a source of hard-to-track-down bugs [GH-700] - * credential/token: Allow creating orphan tokens via an API path [GH-748] - * secret/generic: Validate given duration at write time, not just read time; - if stored durations are not parseable, return a warning and the default - duration rather than an error [GH-718] - * secret/generic: Return 400 instead of 500 when `generic` backend is written - to with no data fields [GH-825] - * secret/postgresql: Revoke permissions before dropping a user or revocation - may fail [GH-699] - -MISC: - - * Various documentation fixes and improvements [GH-685] [GH-688] [GH-697] - [GH-710] [GH-715] [GH-831] - -## 0.3.1 (October 6, 2015) - -SECURITY: - - * core: In certain failure scenarios, the full values of requests and - responses would be logged [GH-665] - -FEATURES: - - * **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends - now allow setting the number of maximum open connections to the database, - which was previously capped to 2. [GH-661] - * **Renewable Tokens for GitHub**: The `github` backend now supports - specifying a TTL, enabling renewable tokens. [GH-664] - -BUG FIXES: - - * dist: linux-amd64 distribution was dynamically linked [GH-656] - * credential/github: Fix acceptance tests [GH-651] - -MISC: - - * Various minor documentation fixes and improvements [GH-649] [GH-650] - [GH-654] [GH-663] - -## 0.3.0 (September 28, 2015) - -DEPRECATIONS/CHANGES: - -Note: deprecations and breaking changes in upcoming releases are announced -ahead of time on the "vault-tool" mailing list. - - * **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is - via the X-Vault-Token header. Cookie authentication was hard to properly - test, could result in browsers/tools/applications saving tokens in plaintext - on disk, and other issues. [GH-564] - * **Terminology/Field Names**: Vault is transitioning from overloading the - term "lease" to mean both "a set of metadata" and "the amount of time the - metadata is valid". The latter is now being referred to as TTL (or - "lease_duration" for backwards-compatibility); some parts of Vault have - already switched to using "ttl" and others will follow in upcoming releases. - In particular, the "token", "generic", and "pki" backends accept both "ttl" - and "lease" but in 0.4 only "ttl" will be accepted. [GH-528] - * **Downgrade Not Supported**: Due to enhancements in the storage subsystem, - values written by Vault 0.3+ will not be able to be read by prior versions - of Vault. There are no expected upgrade issues, however, as with all - critical infrastructure it is recommended to back up Vault's physical - storage before upgrading. - -FEATURES: - - * **SSH Backend**: Vault can now be used to delegate SSH access to machines, - via a (recommended) One-Time Password approach or by issuing dynamic keys. - [GH-385] - * **Cubbyhole Backend**: This backend works similarly to the "generic" backend - but provides a per-token workspace. This enables some additional - authentication workflows (especially for containers) and can be useful to - applications to e.g. store local credentials while being restarted or - upgraded, rather than persisting to disk. [GH-612] - * **Transit Backend Improvements**: The transit backend now allows key - rotation and datakey generation. For rotation, data encrypted with previous - versions of the keys can still be decrypted, down to a (configurable) - minimum previous version; there is a rewrap function for manual upgrades of - ciphertext to newer versions. Additionally, the backend now allows - generating and returning high-entropy keys of a configurable bitsize - suitable for AES and other functions; this is returned wrapped by a named - key, or optionally both wrapped and plaintext for immediate use. [GH-626] - * **Global and Per-Mount Default/Max TTL Support**: You can now set the - default and maximum Time To Live for leases both globally and per-mount. - Per-mount settings override global settings. Not all backends honor these - settings yet, but the maximum is a hard limit enforced outside the backend. - See the documentation for "/sys/mounts/" for details on configuring - per-mount TTLs. [GH-469] - * **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's - master key, PGP/GPG public keys can now be provided. The output keys will be - encrypted with the given keys, in order. [GH-570] - * **Duo Multifactor Authentication Support**: Backends that support MFA can - now use Duo as the mechanism. [GH-464] - * **Performance Improvements**: Users of the "generic" backend will see a - significant performance improvement as the backend no longer creates leases, - although it does return TTLs (global/mount default, or set per-item) as - before. [GH-631] - * **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the - audit contract do not allow us to make the results public.) [GH-220] - -IMPROVEMENTS: - - * audit: Log entries now contain a time field [GH-495] - * audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627] - * backends: Add ability for a cleanup function to be called on backend unmount - [GH-608] - * config: Allow specifying minimum acceptable TLS version [GH-447] - * core: If trying to mount in a location that is already mounted, be more - helpful about the error [GH-510] - * core: Be more explicit on failure if the issue is invalid JSON [GH-553] - * core: Tokens can now revoke themselves [GH-620] - * credential/app-id: Give a more specific error when sending a duplicate POST - to sys/auth/app-id [GH-392] - * credential/github: Support custom API endpoints (e.g. for Github Enterprise) - [GH-572] - * credential/ldap: Add per-user policies and option to login with - userPrincipalName [GH-420] - * credential/token: Allow root tokens to specify the ID of a token being - created from CLI [GH-502] - * credential/userpass: Enable renewals for login tokens [GH-623] - * scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446] - * scripts: Use godep for build scripts to use same environment as tests - [GH-404] - * secret/mysql: Allow reading configuration data [GH-529] - * secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to - allow for non-hostname values (e.g. for client certificates) [GH-555] - * storage/consul: Allow specifying certificates used to talk to Consul - [GH-384] - * storage/mysql: Allow SSL encrypted connections [GH-439] - * storage/s3: Allow using temporary security credentials [GH-433] - * telemetry: Put telemetry object in configuration to allow more flexibility - [GH-419] - * testing: Disable mlock for testing of logical backends so as not to require - root [GH-479] - -BUG FIXES: - - * audit/file: Do not enable auditing if file permissions are invalid [GH-550] - * backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559] - * cli: Fixed missing setup of client TLS certificates if no custom CA was - provided - * cli/read: Do not include a carriage return when using raw field output - [GH-624] - * core: Bad input data could lead to a panic for that session, rather than - returning an error [GH-503] - * core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448] - * core: Do not return a Secret if there are no uses left on a token (since it - will be unable to be used) [GH-615] - * core: Code paths that called lookup-self would decrement num_uses and - potentially immediately revoke a token [GH-552] - * core: Some /sys/ paths would not properly redirect from a standby to the - leader [GH-499] [GH-551] - * credential/aws: Translate spaces in a token's display name to avoid making - IAM unhappy [GH-567] - * credential/github: Integration failed if more than ten organizations or - teams [GH-489] - * credential/token: Tokens with sudo access to "auth/token/create" can now use - root-only options [GH-629] - * secret/cassandra: Work around backwards-incompatible change made in - Cassandra 2.2 preventing Vault from properly setting/revoking leases - [GH-549] - * secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues - [GH-522] - * secret/postgres: Explicitly set timezone in connections [GH-597] - * storage/etcd: Renew semaphore periodically to prevent leadership flapping - [GH-606] - * storage/zk: Fix collisions in storage that could lead to data unavailability - [GH-411] - -MISC: - - * Various documentation fixes and improvements [GH-412] [GH-474] [GH-476] - [GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590] - [GH-591] [GH-592] [GH-595] [GH-613] [GH-637] - * Less "armon" in stack traces [GH-453] - * Sourcegraph integration [GH-456] - -## 0.2.0 (July 13, 2015) - -FEATURES: - - * **Key Rotation Support**: The `rotate` command can be used to rotate the - master encryption key used to write data to the storage (physical) backend. - [GH-277] - * **Rekey Support**: Rekey can be used to rotate the master key and change the - configuration of the unseal keys (number of shares, threshold required). - [GH-277] - * **New secret backend: `pki`**: Enable Vault to be a certificate authority - and generate signed TLS certificates. [GH-310] - * **New secret backend: `cassandra`**: Generate dynamic credentials for - Cassandra [GH-363] - * **New storage backend: `etcd`**: store physical data in etcd [GH-259] - [GH-297] - * **New storage backend: `s3`**: store physical data in S3. Does not support - HA. [GH-242] - * **New storage backend: `MySQL`**: store physical data in MySQL. Does not - support HA. [GH-324] - * `transit` secret backend supports derived keys for per-transaction unique - keys [GH-399] - -IMPROVEMENTS: - - * cli/auth: Enable `cert` method [GH-380] - * cli/auth: read input from stdin [GH-250] - * cli/read: Ability to read a single field from a secret [GH-257] - * cli/write: Adding a force flag when no input required - * core: allow time duration format in place of seconds for some inputs - * core: audit log provides more useful information [GH-360] - * core: graceful shutdown for faster HA failover - * core: **change policy format** to use explicit globbing [GH-400] Any - existing policy in Vault is automatically upgraded to avoid issues. All - policy files must be updated for future writes. Adding the explicit glob - character `*` to the path specification is all that is required. - * core: policy merging to give deny highest precedence [GH-400] - * credential/app-id: Protect against timing attack on app-id - * credential/cert: Record the common name in the metadata [GH-342] - * credential/ldap: Allow TLS verification to be disabled [GH-372] - * credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367] - * credential/userpass: Protect against timing attack on password - * credential/userpass: Use bcrypt for password matching - * http: response codes improved to reflect error [GH-366] - * http: the `sys/health` endpoint supports `?standbyok` to return 200 on - standby [GH-389] - * secret/app-id: Support deleting AppID and UserIDs [GH-200] - * secret/consul: Fine grained lease control [GH-261] - * secret/transit: Decouple raw key from key management endpoint [GH-355] - * secret/transit: Upsert named key when encrypt is used [GH-355] - * storage/zk: Support for HA configuration [GH-252] - * storage/zk: Changing node representation. **Backwards incompatible**. - [GH-416] - -BUG FIXES: - - * audit/file: file removing TLS connection state - * audit/syslog: fix removing TLS connection state - * command/*: commands accepting `k=v` allow blank values - * core: Allow building on FreeBSD [GH-365] - * core: Fixed various panics when audit logging enabled - * core: Lease renewal does not create redundant lease - * core: fixed leases with negative duration [GH-354] - * core: token renewal does not create child token - * core: fixing panic when lease increment is null [GH-408] - * credential/app-id: Salt the paths in storage backend to avoid information - leak - * credential/cert: Fixing client certificate not being requested - * credential/cert: Fixing panic when no certificate match found [GH-361] - * http: Accept PUT as POST for sys/auth - * http: Accept PUT as POST for sys/mounts [GH-349] - * http: Return 503 when sealed [GH-225] - * secret/postgres: Username length is capped to exceeding limit - * server: Do not panic if backend not configured [GH-222] - * server: Explicitly check value of tls_diable [GH-201] - * storage/zk: Fixed issues with version conflicts [GH-190] - -MISC: - - * cli/path-help: renamed from `help` to avoid confusion - -## 0.1.2 (May 11, 2015) - -FEATURES: - - * **New physical backend: `zookeeper`**: store physical data in Zookeeper. - HA not supported yet. - * **New credential backend: `ldap`**: authenticate using LDAP credentials. - -IMPROVEMENTS: - - * core: Auth backends can store internal data about auth creds - * audit: display name for auth is shown in logs [GH-176] - * command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130] - * command/*: `VAULT_TOKEN` overrides local stored auth [GH-162] - * command/server: environment variables are copy-pastable - * credential/app-id: hash of app and user ID are in metadata [GH-176] - * http: HTTP API accepts `X-Vault-Token` as auth header [GH-124] - * logical/*: Generate help output even if no synopsis specified - -BUG FIXES: - - * core: login endpoints should never return secrets - * core: Internal data should never be returned from core endpoints - * core: defer barrier initialization to as late as possible to avoid error - cases during init that corrupt data (no data loss) - * core: guard against invalid init config earlier - * audit/file: create file if it doesn't exist [GH-148] - * command/*: ignore directories when traversing CA paths [GH-181] - * credential/*: all policy mapping keys are case insensitive [GH-163] - * physical/consul: Fixing path for locking so HA works in every case - -## 0.1.1 (May 2, 2015) - -SECURITY CHANGES: - - * physical/file: create the storge with 0600 permissions [GH-102] - * token/disk: write the token to disk with 0600 perms - -IMPROVEMENTS: - - * core: Very verbose error if mlock fails [GH-59] - * command/*: On error with TLS oversized record, show more human-friendly - error message. [GH-123] - * command/read: `lease_renewable` is now outputted along with the secret to - show whether it is renewable or not - * command/server: Add configuration option to disable mlock - * command/server: Disable mlock for dev mode so it works on more systems - -BUG FIXES: - - * core: if token helper isn't absolute, prepend with path to Vault - executable, not "vault" (which requires PATH) [GH-60] - * core: Any "mapping" routes allow hyphens in keys [GH-119] - * core: Validate `advertise_addr` is a valid URL with scheme [GH-106] - * command/auth: Using an invalid token won't crash [GH-75] - * credential/app-id: app and user IDs can have hyphens in keys [GH-119] - * helper/password: import proper DLL for Windows to ask password [GH-83] - -## 0.1.0 (April 28, 2015) - - * Initial release + token used to issue From 319fe8ea37ec9b89eb3c529d4bdb236f3eb7fdb1 Mon Sep 17 00:00:00 2001 From: Chris Hoffman <99742+chrishoffman@users.noreply.github.com> Date: Fri, 25 Oct 2019 11:45:32 -0400 Subject: [PATCH 17/90] changelog++ --- CHANGELOG.md | 1670 +------------------------------------------------- 1 file changed, 4 insertions(+), 1666 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09927bd03df3..c6b5e752fc6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,7 +25,7 @@ FEATURES: (secp384r1) and P-521 (secp521r1) ECDSA curves [GH-7551] and encryption and decryption is now supported via AES128-GCM96 [GH-7555] * **SSRF Protection for Vault Agent**: Vault Agent has a configuration option to - require a specific header beffore allowing requests. + require a specific header beffore allowing requests [GH-7627] * **AWS Auth Method Root Rotation**: The credential used by the AWS auth method can now be rotated, to ensure that only Vault knows the credentials it is using [GH-7131] * **New UI Features** The UI now supports managing users and groups for the @@ -38,18 +38,6 @@ FEATURES: documentation](https://www.vaultproject.io/docs/config/index.html) for details. [GH-6957] -CHANGES: - - * auth/aws: If a custom `sts_endpoint` is configured, Vault Agent and the CLI - should provide the corresponding region via the `region` parameter (which - already existed as a CLI parameter, and has now been added to Agent). The - automatic region detection added to the CLI and Agent in 1.2 has been removed. - * sys/seal-status now has a `storage_type` field denoting what type of storage - the cluster is configured to use - * Vault Agent now has a new optional `require_request_header` option per - listener. If the option is set, each incoming request must have a - `X-Vault-Request: true` header entry. [GH-7627] - IMPROVEMENTS: * auth/jwt: The redirect callback host may now be specified for CLI logins @@ -79,6 +67,8 @@ IMPROVEMENTS: * sys: Add a new set of endpoints under `sys/pprof/` that allows profiling information to be extracted [GH-7473] * sys: Add endpoint that counts the total number of active identity entities [GH-7541] + * sys: `sys/seal-status` now has a `storage_type` field denoting what type of storage + the cluster is configured to use * sys/config: Add a new endpoint under `sys/config/state/sanitized` that returns the configuration state of the server. It excludes config values from `storage`, `ha_storage`, and `seal` stanzas and some values @@ -1291,1656 +1281,4 @@ FEATURES: * **ACL Templating**: ACL policies can now be templated using identity Entity, Groups, and Metadata. * **UI Onboarding wizards**: The Vault UI can provide contextual help and - guidance, linking out to relevant links or guides on vaultproject.io for - various workflows in Vault. - -IMPROVEMENTS: - - * agent: Add `exit_after_auth` to be able to use the Agent for a single - authentication [GH-5013] - * auth/approle: Add ability to set token bound CIDRs on individual Secret IDs - [GH-5034] - * cli: Add support for passing parameters to `vault read` operations [GH-5093] - * secrets/aws: Make credential types more explicit [GH-4360] - * secrets/nomad: Support for longer token names [GH-5117] - * secrets/pki: Allow disabling CRL generation [GH-5134] - * storage/azure: Add support for different Azure environments [GH-4997] - * storage/file: Sort keys in list responses [GH-5141] - * storage/mysql: Support special characters in database and table names. - -BUG FIXES: - - * auth/jwt: Always validate `aud` claim even if `bound_audiences` isn't set - (IOW, error in this case) - * core: Prevent Go's HTTP library from interspersing logs in a different - format and/or interleaved [GH-5135] - * identity: Properly populate `mount_path` and `mount_type` on group lookup - [GH-5074] - * identity: Fix persisting alias metadata [GH-5188] - * identity: Fix carryover issue from previously fixed race condition that - could cause Vault not to start up due to two entities referencing the same - alias. These entities are now merged. [GH-5000] - * replication: Fix issue causing some pages not to flush to storage - * secrets/database: Fix inability to update custom SQL statements on - database roles. [GH-5080] - * secrets/pki: Disallow putting the CA's serial on its CRL. While technically - legal, doing so inherently means the CRL can't be trusted anyways, so it's - not useful and easy to footgun. [GH-5134] - * storage/gcp,spanner: Fix data races [GH-5081] - -## 0.10.4 (July 25th, 2018) - -SECURITY: - - * Control Groups: The associated Identity entity with a request was not being - properly persisted. As a result, the same authorizer could provide more than - one authorization. - -DEPRECATIONS/CHANGES: - - * Revocations of dynamic secrets leases are now queued/asynchronous rather - than synchronous. This allows Vault to take responsibility for revocation - even if the initial attempt fails. The previous synchronous behavior can be - attained via the `-sync` CLI flag or `sync` API parameter. When in - synchronous mode, if the operation results in failure it is up to the user - to retry. - * CLI Retries: The CLI will no longer retry commands on 5xx errors. This was a - source of confusion to users as to why Vault would "hang" before returning a - 5xx error. The Go API client still defaults to two retries. - * Identity Entity Alias metadata: You can no longer manually set metadata on - entity aliases. All alias data (except the canonical entity ID it refers to) - is intended to be managed by the plugin providing the alias information, so - allowing it to be set manually didn't make sense. - -FEATURES: - - * **JWT/OIDC Auth Method**: The new `jwt` auth method accepts JWTs and either - validates signatures locally or uses OIDC Discovery to fetch the current set - of keys for signature validation. Various claims can be specified for - validation (in addition to the cryptographic signature) and a user and - optional groups claim can be used to provide Identity information. - * **FoundationDB Storage**: You can now use FoundationDB for storing Vault - data. - * **UI Control Group Workflow (enterprise)**: The UI will now detect control - group responses and provides a workflow to view the status of the request - and to authorize requests. - * **Vault Agent (Beta)**: Vault Agent is a daemon that can automatically - authenticate for you across a variety of authentication methods, provide - tokens to clients, and keep the tokens renewed, reauthenticating as - necessary. - -IMPROVEMENTS: - - * auth/azure: Add support for virtual machine scale sets - * auth/gcp: Support multiple bindings for region, zone, and instance group - * cli: Add subcommands for interacting with the plugin catalog [GH-4911] - * cli: Add a `-description` flag to secrets and auth tune subcommands to allow - updating an existing secret engine's or auth method's description. This - change also allows the description to be unset by providing an empty string. - * core: Add config flag to disable non-printable character check [GH-4917] - * core: A `max_request_size` parameter can now be set per-listener to adjust - the maximum allowed size per request [GH-4824] - * core: Add control group request endpoint to default policy [GH-4904] - * identity: Identity metadata is now passed through to plugins [GH-4967] - * replication: Add additional saftey checks and logging when replication is - in a bad state - * secrets/kv: Add support for using `-field=data` to KVv2 when using `vault - kv` [GH-4895] - * secrets/pki: Add the ability to tidy revoked but unexpired certificates - [GH-4916] - * secrets/ssh: Allow Vault to work with single-argument SSH flags [GH-4825] - * secrets/ssh: SSH executable path can now be configured in the CLI [GH-4937] - * storage/swift: Add additional configuration options [GH-4901] - * ui: Choose which auth methods to show to unauthenticated users via - `listing_visibility` in the auth method edit forms [GH-4854] - * ui: Authenticate users automatically by passing a wrapped token to the UI via - the new `wrapped_token` query parameter [GH-4854] - -BUG FIXES: - - * api: Fix response body being cleared too early [GH-4987] - * auth/approle: Fix issue with tidy endpoint that would unnecessarily remove - secret accessors [GH-4981] - * auth/aws: Fix updating `max_retries` [GH-4980] - * auth/kubernetes: Trim trailing whitespace when sending JWT - * cli: Fix parsing of environment variables for integer flags [GH-4925] - * core: Fix returning 500 instead of 503 if a rekey is attempted when Vault is - sealed [GH-4874] - * core: Fix issue releasing the leader lock in some circumstances [GH-4915] - * core: Fix a panic that could happen if the server was shut down while still - starting up - * core: Fix deadlock that would occur if a leadership loss occurs at the same - time as a seal operation [GH-4932] - * core: Fix issue with auth mounts failing to renew tokens due to policies - changing [GH-4960] - * auth/radius: Fix issue where some radius logins were being canceled too early - [GH-4941] - * core: Fix accidental seal of vault of we lose leadership during startup - [GH-4924] - * core: Fix standby not being able to forward requests larger than 4MB - [GH-4844] - * core: Avoid panic while processing group memberships [GH-4841] - * identity: Fix a race condition creating aliases [GH-4965] - * plugins: Fix being unable to send very large payloads to or from plugins - [GH-4958] - * physical/azure: Long list responses would sometimes be truncated [GH-4983] - * replication: Allow replication status requests to be processed while in - merkle sync - * replication: Ensure merkle reindex flushes all changes to storage immediately - * replication: Fix a case where a network interruption could cause a secondary - to be unable to reconnect to a primary - * secrets/pki: Fix permitted DNS domains performing improper validation - [GH-4863] - * secrets/database: Fix panic during DB creds revocation [GH-4846] - * ui: Fix usage of cubbyhole backend in the UI [GH-4851] - * ui: Fix toggle state when a secret is JSON-formatted [GH-4913] - * ui: Fix coercion of falsey values to empty string when editing secrets as - JSON [GH-4977] - -## 0.10.3 (June 20th, 2018) - -DEPRECATIONS/CHANGES: - - * In the audit log and in client responses, policies are now split into three - parameters: policies that came only from tokens, policies that came only - from Identity, and the combined set. Any previous location of policies via - the API now contains the full, combined set. - * When a token is tied to an Identity entity and the entity is deleted, the - token will no longer be usable, regardless of the validity of the token - itself. - * When authentication succeeds but no policies were defined for that specific - user, most auth methods would allow a token to be generated but a few would - reject the authentication, namely `ldap`, `okta`, and `radius`. Since the - `default` policy is added by Vault's core, this would incorrectly reject - valid authentications before they would in fact be granted policies. This - inconsistency has been addressed; valid authentications for these methods - now succeed even if no policy was specifically defined in that method for - that user. - -FEATURES: - - * Root Rotation for Active Directory: You can now command Vault to rotate the - configured root credentials used in the AD secrets engine, to ensure that - only Vault knows the credentials it's using. - * URI SANs in PKI: You can now configure URI Subject Alternate Names in the - `pki` backend. Roles can limit which SANs are allowed via globbing. - * `kv rollback` Command: You can now use `vault kv rollback` to roll a KVv2 - path back to a previous non-deleted/non-destroyed version. The previous - version becomes the next/newest version for the path. - * Token Bound CIDRs in AppRole: You can now add CIDRs to which a token - generated from AppRole will be bound. - -IMPROVEMENTS: - - * approle: Return 404 instead of 202 on invalid role names during POST - operations [GH-4778] - * core: Add idle and initial header read/TLS handshake timeouts to connections - to ensure server resources are cleaned up [GH-4760] - * core: Report policies in token, identity, and full sets [GH-4747] - * secrets/databases: Add `create`/`update` distinction for connection - configurations [GH-3544] - * secrets/databases: Add `create`/`update` distinction for role configurations - [GH-3544] - * secrets/databases: Add best-effort revocation logic for use when a role has - been deleted [GH-4782] - * secrets/kv: Add `kv rollback` [GH-4774] - * secrets/pki: Add URI SANs support [GH-4675] - * secrets/ssh: Allow standard SSH command arguments to be used, without - requiring username@hostname syntax [GH-4710] - * storage/consul: Add context support so that requests are cancelable - [GH-4739] - * sys: Added `hidden` option to `listing_visibility` field on `sys/mounts` - API [GH-4827] - * ui: Secret values are obfuscated by default and visibility is toggleable [GH-4422] - -BUG FIXES: - - * auth/approle: Fix panic due to metadata being nil [GH-4719] - * auth/aws: Fix delete path for tidy operations [GH-4799] - * core: Optimizations to remove some speed regressions due to the - security-related changes in 0.10.2 - * storage/dynamodb: Fix errors seen when reading existing DynamoDB data [GH-4721] - * secrets/database: Fix default MySQL root rotation statement [GH-4748] - * secrets/gcp: Fix renewal for GCP account keys - * secrets/kv: Fix writing to the root of a KVv2 mount from `vault kv` commands - incorrectly operating on a root+mount path instead of being an error - [GH-4726] - * seal/pkcs11: Add `CKK_SHA256_HMAC` to the search list when finding HMAC - keys, fixing lookup on some Thales devices - * replication: Fix issue enabling replication when a non-auth mount and auth - mount have the same name - * auth/kubernetes: Fix issue verifying ECDSA signed JWTs - * ui: add missing edit mode for auth method configs [GH-4770] - -## 0.10.2 (June 6th, 2018) - -SECURITY: - - * Tokens: A race condition was identified that could occur if a token's - lease expired while Vault was not running. In this case, when Vault came - back online, sometimes it would properly revoke the lease but other times it - would not, leading to a Vault token that no longer had an expiration and had - essentially unlimited lifetime. This race was per-token, not all-or-nothing - for all tokens that may have expired during Vault's downtime. We have fixed - the behavior and put extra checks in place to help prevent any similar - future issues. In addition, the logic we have put in place ensures that such - lease-less tokens can no longer be used (unless they are root tokens that - never had an expiration to begin with). - * Convergent Encryption: The version 2 algorithm used in `transit`'s - convergent encryption feature is susceptible to offline - plaintext-confirmation attacks. As a result, we are introducing a version 3 - algorithm that mitigates this. If you are currently using convergent - encryption, we recommend upgrading, rotating your encryption key (the new - key version will use the new algorithm), and rewrapping your data (the - `rewrap` endpoint can be used to allow a relatively non-privileged user to - perform the rewrapping while never divulging the plaintext). - * AppRole case-sensitive role name secret-id leaking: When using a mixed-case - role name via AppRole, deleting a secret-id via accessor or other operations - could end up leaving the secret-id behind and valid but without an accessor. - This has now been fixed, and we have put checks in place to prevent these - secret-ids from being used. - -DEPRECATIONS/CHANGES: - - * PKI duration return types: The PKI backend now returns durations (e.g. when - reading a role) as an integer number of seconds instead of a Go-style - string, in line with how the rest of Vault's API returns durations. - -FEATURES: - - * Active Directory Secrets Engine: A new `ad` secrets engine has been created - which allows Vault to rotate and provide credentials for configured AD - accounts. - * Rekey Verification: Rekey operations can now require verification. This - turns on a two-phase process where the existing key shares authorize - generating a new master key, and a threshold of the new, returned key shares - must be provided to verify that they have been successfully received in - order for the actual master key to be rotated. - * CIDR restrictions for `cert`, `userpass`, and `kubernetes` auth methods: - You can now limit authentication to specific CIDRs; these will also be - encoded in resultant tokens to limit their use. - * Vault UI Browser CLI: The UI now supports usage of read/write/list/delete - commands in a CLI that can be accessed from the nav bar. Complex inputs such - as JSON files are not currently supported. This surfaces features otherwise - unsupported in Vault's UI. - * Azure Key Vault Auto Unseal/Seal Wrap Support (Enterprise): Azure Key Vault - can now be used a support seal for Auto Unseal and Seal Wrapping. - -IMPROVEMENTS: - - * api: Close renewer's doneCh when the renewer is stopped, so that programs - expecting a final value through doneCh behave correctly [GH-4472] - * auth/cert: Break out `allowed_names` into component parts and add - `allowed_uri_sans` [GH-4231] - * auth/ldap: Obfuscate error messages pre-bind for greater security [GH-4700] - * cli: `vault login` now supports a `-no-print` flag to suppress printing - token information but still allow storing into the token helper [GH-4454] - * core/pkcs11 (enterprise): Add support for CKM_AES_CBC_PAD, CKM_RSA_PKCS, and - CKM_RSA_PKCS_OAEP mechanisms - * core/pkcs11 (enterprise): HSM slots can now be selected by token label - instead of just slot number - * core/token: Optimize token revocation by removing unnecessary list call - against the storage backend when calling revoke-orphan on tokens [GH-4465] - * core/token: Refactor token revocation logic to not block on the call when - underlying leases are pending revocation by moving the expiration logic to - the expiration manager [GH-4512] - * expiration: Allow revoke-prefix and revoke-force to work on single leases as - well as prefixes [GH-4450] - * identity: Return parent group info when reading a group [GH-4648] - * identity: Provide more contextual key information when listing entities, - groups, and aliases - * identity: Passthrough EntityID to backends [GH-4663] - * identity: Adds ability to request entity information through system view - [GH_4681] - * secret/pki: Add custom extended key usages [GH-4667] - * secret/pki: Add custom PKIX serial numbers [GH-4694] - * secret/ssh: Use hostname instead of IP in OTP mode, similar to CA mode - [GH-4673] - * storage/file: Attempt in some error conditions to do more cleanup [GH-4684] - * ui: wrapping lookup now distplays the path [GH-4644] - * ui: Identity interface now has more inline actions to make editing and adding - aliases to an entity or group easier [GH-4502] - * ui: Identity interface now lists groups by name [GH-4655] - * ui: Permission denied errors still render the sidebar in the Access section - [GH-4658] - * replication: Improve performance of index page flushes and WAL garbage - collecting - -BUG FIXES: - - * auth/approle: Make invalid role_id a 400 error instead of 500 [GH-4470] - * auth/cert: Fix Identity alias using serial number instead of common name - [GH-4475] - * cli: Fix panic running `vault token capabilities` with multiple paths - [GH-4552] - * core: When using the `use_always` option with PROXY protocol support, do not - require `authorized_addrs` to be set [GH-4065] - * core: Fix panic when certain combinations of policy paths and allowed/denied - parameters were used [GH-4582] - * secret/gcp: Make `bound_region` able to use short names - * secret/kv: Fix response wrapping for KV v2 [GH-4511] - * secret/kv: Fix address flag not being honored correctly [GH-4617] - * secret/pki: Fix `safety_buffer` for tidy being allowed to be negative, - clearing all certs [GH-4641] - * secret/pki: Fix `key_type` not being allowed to be set to `any` [GH-4595] - * secret/pki: Fix path length parameter being ignored when using - `use_csr_values` and signing an intermediate CA cert [GH-4459] - * secret/ssh: Only append UserKnownHostsFile to args when configured with a - value [GH-4674] - * storage/dynamodb: Fix listing when one child is left within a nested path - [GH-4570] - * storage/gcs: Fix swallowing an error on connection close [GH-4691] - * ui: Fix HMAC algorithm in transit [GH-4604] - * ui: Fix unwrap of auth responses via the UI's unwrap tool [GH-4611] - * ui (enterprise): Fix parsing of version string that blocked some users from seeing - enterprise-specific pages in the UI [GH-4547] - * ui: Fix incorrect capabilities path check when viewing policies [GH-4566] - * replication: Fix error while running plugins on a newly created replication - secondary - * replication: Fix issue with token store lookups after a secondary's mount table - is invalidated. - * replication: Improve startup time when a large merkle index is in use. - * replication: Fix panic when storage becomes unreachable during unseal. - -## 0.10.1/0.9.7 (April 25th, 2018) - -The following two items are in both 0.9.7 and 0.10.1. They only affect -Enterprise, and as such 0.9.7 is an Enterprise-only release: - -SECURITY: - - * EGPs: A regression affecting 0.9.6 and 0.10.0 causes EGPs to not be applied - correctly if an EGP is updated in a running Vault after initial write or - after it is loaded on unseal. This has been fixed. - -BUG FIXES: - - * Fixed an upgrade issue affecting performance secondaries when migrating from - a version that did not include Identity to one that did. - -All other content in this release is for 0.10.1 only. - -DEPRECATIONS/CHANGES: - - * `vault kv` and Vault versions: In 0.10.1 some issues with `vault kv` against - v1 K/V engine mounts are fixed. However, using 0.10.1 for both the server - and CLI versions is required. - * Mount information visibility: Users that have access to any path within a - mount can now see information about that mount, such as its type and - options, via some API calls. - * Identity and Local Mounts: Local mounts would allow creating Identity - entities but these would not be able to be used successfully (even locally) - in replicated scenarios. We have now disallowed entities and groups from - being created for local mounts in the first place. - -FEATURES: - - * X-Forwarded-For support: `X-Forwarded-For` headers can now be used to set the - client IP seen by Vault. See the [TCP listener configuration - page](https://www.vaultproject.io/docs/configuration/listener/tcp.html) for - details. - * CIDR IP Binding for Tokens: Tokens now support being bound to specific - CIDR(s) for usage. Currently this is implemented in Token Roles; usage can be - expanded to other authentication backends over time. - * `vault kv patch` command: A new `kv patch` helper command that allows - modifying only some values in existing data at a K/V path, but uses - check-and-set to ensure that this modification happens safely. - * AppRole Local Secret IDs: Roles can now be configured to generate secret IDs - local to the cluster. This enables performance secondaries to generate and - consume secret IDs without contacting the primary. - * AES-GCM Support for PKCS#11 [BETA] (Enterprise): For supporting HSMs, - AES-GCM can now be used in lieu of AES-CBC/HMAC-SHA256. This has currently - only been fully tested on AWS CloudHSM. - * Auto Unseal/Seal Wrap Key Rotation Support (Enterprise): Auto Unseal - mechanisms, including PKCS#11 HSMs, now support rotation of encryption keys, - and migration between key and encryption types, such as from AES-CBC to - AES-GCM, can be performed at the same time (where supported). - -IMPROVEMENTS: - - * auth/approle: Support for cluster local secret IDs. This enables secondaries - to generate secret IDs without contacting the primary [GH-4427] - * auth/token: Add to the token lookup response, the policies inherited due to - identity associations [GH-4366] - * auth/token: Add CIDR binding to token roles [GH-815] - * cli: Add `vault kv patch` [GH-4432] - * core: Add X-Forwarded-For support [GH-4380] - * core: Add token CIDR-binding support [GH-815] - * identity: Add the ability to disable an entity. Disabling an entity does not - revoke associated tokens, but while the entity is disabled they cannot be - used. [GH-4353] - * physical/consul: Allow tuning of session TTL and lock wait time [GH-4352] - * replication: Dynamically adjust WAL cleanup over a period of time based on - the rate of writes committed - * secret/ssh: Update dynamic key install script to use shell locking to avoid - concurrent modifications [GH-4358] - * ui: Access to `sys/mounts` is no longer needed to use the UI - the list of - engines will show you the ones you implicitly have access to (because you have - access to to secrets in those engines) [GH-4439] - -BUG FIXES: - - * cli: Fix `vault kv` backwards compatibility with KV v1 engine mounts - [GH-4430] - * identity: Persist entity memberships in external identity groups across - mounts [GH-4365] - * identity: Fix error preventing authentication using local mounts on - performance secondary replication clusters [GH-4407] - * replication: Fix issue causing secondaries to not connect properly to a - pre-0.10 primary until the primary was upgraded - * secret/gcp: Fix panic on rollback when a roleset wasn't created properly - [GH-4344] - * secret/gcp: Fix panic on renewal - * ui: Fix IE11 form submissions in a few parts of the application [GH-4378] - * ui: Fix IE file saving on policy pages and init screens [GH-4376] - * ui: Fixed an issue where the AWS secret backend would show the wrong menu - [GH-4371] - * ui: Fixed an issue where policies with commas would not render in the - interface properly [GH-4398] - * ui: Corrected the saving of mount tune ttls for auth methods [GH-4431] - * ui: Credentials generation no longer checks capabilities before making - api calls. This should fix needing "update" capabilites to read IAM - credentials in the AWS secrets engine [GH-4446] - -## 0.10.0 (April 10th, 2018) - -SECURITY: - - * Log sanitization for Combined Database Secret Engine: In certain failure - scenarios with incorrectly formatted connection urls, the raw connection - errors were being returned to the user with the configured database - credentials. Errors are now sanitized before being returned to the user. - -DEPRECATIONS/CHANGES: - - * Database plugin compatibility: The database plugin interface was enhanced to - support some additional functionality related to root credential rotation - and supporting templated URL strings. The changes were made in a - backwards-compatible way and all builtin plugins were updated with the new - features. Custom plugins not built into Vault will need to be upgraded to - support templated URL strings and root rotation. Additionally, the - Initialize method was deprecated in favor of a new Init method that supports - configuration modifications that occur in the plugin back to the primary - data store. - * Removal of returned secret information: For a long time Vault has returned - configuration given to various secret engines and auth methods with secret - values (such as secret API keys or passwords) still intact, and with a - warning to the user on write that anyone with read access could see the - secret. This was mostly done to make it easy for tools like Terraform to - judge whether state had drifted. However, it also feels quite un-Vault-y to - do this and we've never felt very comfortable doing so. In 0.10 we have gone - through and removed this behavior from the various backends; fields which - contained secret values are simply no longer returned on read. We are - working with the Terraform team to make changes to their provider to - accommodate this as best as possible, and users of other tools may have to - make adjustments, but in the end we felt that the ends did not justify the - means and we needed to prioritize security over operational convenience. - * LDAP auth method case sensitivity: We now treat usernames and groups - configured locally for policy assignment in a case insensitive fashion by - default. Existing configurations will continue to work as they do now; - however, the next time a configuration is written `case_sensitive_names` - will need to be explicitly set to `true`. - * TTL handling within core: All lease TTL handling has been centralized within - the core of Vault to ensure consistency across all backends. Since this was - previously delegated to individual backends, there may be some slight - differences in TTLs generated from some backends. - * Removal of default `secret/` mount: In 0.12 we will stop mounting `secret/` - by default at initialization time (it will still be available in `dev` - mode). - -FEATURES: - - * OSS UI: The Vault UI is now fully open-source. Similarly to the CLI, some - features are only available with a supporting version of Vault, but the code - base is entirely open. - * Versioned K/V: The `kv` backend has been completely revamped, featuring - flexible versioning of values, check-and-set protections, and more. A new - `vault kv` subcommand allows friendly interactions with it. Existing mounts - of the `kv` backend can be upgraded to the new versioned mode (downgrades - are not currently supported). The old "passthrough" mode is still the - default for new mounts; versioning can be turned on by setting the - `-version=2` flag for the `vault secrets enable` command. - * Database Root Credential Rotation: Database configurations can now rotate - their own configured admin/root credentials, allowing configured credentials - for a database connection to be rotated immediately after sending them into - Vault, invalidating the old credentials and ensuring only Vault knows the - actual valid values. - * Azure Authentication Plugin: There is now a plugin (pulled in to Vault) that - allows authenticating Azure machines to Vault using Azure's Managed Service - Identity credentials. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-auth-azure) for more - information. - * GCP Secrets Plugin: There is now a plugin (pulled in to Vault) that allows - generating secrets to allow access to GCP. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-secrets-gcp) for more - information. - * Selective Audit HMACing of Request and Response Data Keys: HMACing in audit - logs can be turned off for specific keys in the request input map and - response `data` map on a per-mount basis. - * Passthrough Request Headers: Request headers can now be selectively passed - through to backends on a per-mount basis. This is useful in various cases - when plugins are interacting with external services. - * HA for Google Cloud Storage: The GCS storage type now supports HA. - * UI support for identity: Add and edit entities, groups, and their associated - aliases. - * UI auth method support: Enable, disable, and configure all of the built-in - authentication methods. - * UI (Enterprise): View and edit Sentinel policies. - -IMPROVEMENTS: - - * core: Centralize TTL generation for leases in core [GH-4230] - * identity: API to update group-alias by ID [GH-4237] - * secret/cassandra: Update Cassandra storage delete function to not use batch - operations [GH-4054] - * storage/mysql: Allow setting max idle connections and connection lifetime - [GH-4211] - * storage/gcs: Add HA support [GH-4226] - * ui: Add Nomad to the list of available secret engines - * ui: Adds ability to set static headers to be returned by the UI - -BUG FIXES: - - * api: Fix retries not working [GH-4322] - * auth/gcp: Invalidate clients on config change - * auth/token: Revoke-orphan and tidy operations now correctly cleans up the - parent prefix entry in the underlying storage backend. These operations also - mark corresponding child tokens as orphans by removing the parent/secondary - index from the entries. [GH-4193] - * command: Re-add `-mfa` flag and migrate to OSS binary [GH-4223] - * core: Fix issue occurring from mounting two auth backends with the same path - with one mount having `auth/` in front [GH-4206] - * mfa: Invalidation of MFA configurations (Enterprise) - * replication: Fix a panic on some non-64-bit platforms - * replication: Fix invalidation of policies on performance secondaries - * secret/pki: When tidying if a value is unexpectedly nil, delete it and move - on [GH-4214] - * storage/s3: Fix panic if S3 returns no Content-Length header [GH-4222] - * ui: Fixed an issue where the UI was checking incorrect paths when operating - on transit keys. Capabilities are now checked when attempting to encrypt / - decrypt, etc. - * ui: Fixed IE 11 layout issues and JS errors that would stop the application - from running. - * ui: Fixed the link that gets rendered when a user doesn't have permissions - to view the root of a secret engine. The link now sends them back to the list - of secret engines. - * replication: Fix issue with DR secondaries when using mount specified local - paths. - * cli: Fix an issue where generating a dr operation token would not output the - token [GH-4328] - -## 0.9.6 (March 20th, 2018) - -DEPRECATIONS/CHANGES: - - * The AWS authentication backend now allows binds for inputs as either a - comma-delimited string or a string array. However, to keep consistency with - input and output, when reading a role the binds will now be returned as - string arrays rather than strings. - * In order to prefix-match IAM role and instance profile ARNs in AWS auth - backend, you now must explicitly opt-in by adding a `*` to the end of the - ARN. Existing configurations will be upgraded automatically, but when - writing a new role configuration the updated behavior will be used. - -FEATURES: - - * Replication Activation Enhancements: When activating a replication - secondary, a public key can now be fetched first from the target cluster. - This public key can be provided to the primary when requesting the - activation token. If provided, the public key will be used to perform a - Diffie-Hellman key exchange resulting in a shared key that encrypts the - contents of the activation token. The purpose is to protect against - accidental disclosure of the contents of the token if unwrapped by the wrong - party, given that the contents of the token are highly sensitive. If - accidentally unwrapped, the contents of the token are not usable by the - unwrapping party. It is important to note that just as a malicious operator - could unwrap the contents of the token, a malicious operator can pretend to - be a secondary and complete the Diffie-Hellman exchange on their own; this - feature provides defense in depth but still requires due diligence around - replication activation, including multiple eyes on the commands/tokens and - proper auditing. - -IMPROVEMENTS: - - * api: Update renewer grace period logic. It no longer is static, but rather - dynamically calculates one based on the current lease duration after each - renew. [GH-4090] - * auth/approle: Allow array input for bound_cidr_list [4078] - * auth/aws: Allow using lists in role bind parameters [GH-3907] - * auth/aws: Allow binding by EC2 instance IDs [GH-3816] - * auth/aws: Allow non-prefix-matched IAM role and instance profile ARNs - [GH-4071] - * auth/ldap: Set a very large size limit on queries [GH-4169] - * core: Log info notifications of revoked leases for all leases/reasons, not - just expirations [GH-4164] - * physical/couchdb: Removed limit on the listing of items [GH-4149] - * secret/pki: Support certificate policies [GH-4125] - * secret/pki: Add ability to have CA:true encoded into intermediate CSRs, to - improve compatibility with some ADFS scenarios [GH-3883] - * secret/transit: Allow selecting signature algorithm as well as hash - algorithm when signing/verifying [GH-4018] - * server: Make sure `tls_disable_client_cert` is actually a true value rather - than just set [GH-4049] - * storage/dynamodb: Allow specifying max retries for dynamo client [GH-4115] - * storage/gcs: Allow specifying chunk size for transfers, which can reduce - memory utilization [GH-4060] - * sys/capabilities: Add the ability to use multiple paths for capability - checking [GH-3663] - -BUG FIXES: - - * auth/aws: Fix honoring `max_ttl` when a corresponding role `ttl` is not also - set [GH-4107] - * auth/okta: Fix honoring configured `max_ttl` value [GH-4110] - * auth/token: If a periodic token being issued has a period greater than the - max_lease_ttl configured on the token store mount, truncate it. This matches - renewal behavior; before it was inconsistent between issuance and renewal. - [GH-4112] - * cli: Improve error messages around `vault auth help` when there is no CLI - helper for a particular method [GH-4056] - * cli: Fix autocomplete installation when using Fish as the shell [GH-4094] - * secret/database: Properly honor mount-tuned max TTL [GH-4051] - * secret/ssh: Return `key_bits` value when reading a role [GH-4098] - * sys: When writing policies on a performance replication secondary, properly - forward requests to the primary [GH-4129] - -## 0.9.5 (February 26th, 2018) - -IMPROVEMENTS: - - * auth: Allow sending default_lease_ttl and max_lease_ttl values when enabling - auth methods. [GH-4019] - * secret/database: Add list functionality to `database/config` endpoint - [GH-4026] - * physical/consul: Allow setting a specific service address [GH-3971] - * replication: When bootstrapping a new secondary, if the initial cluster - connection fails, Vault will attempt to roll back state so that - bootstrapping can be tried again, rather than having to recreate the - downstream cluster. This will still require fetching a new secondary - activation token. - -BUG FIXES: - - * auth/aws: Update libraries to fix regression verifying PKCS#7 identity - documents [GH-4014] - * listener: Revert to Go 1.9 for now to allow certificates with non-DNS names - in their DNS SANs to be used for Vault's TLS connections [GH-4028] - * replication: Fix issue with a performance secondary/DR primary node losing - its DR primary status when performing an update-primary operation - * replication: Fix issue where performance secondaries could be unable to - automatically connect to a performance primary after that performance - primary has been promoted to a DR primary from a DR secondary - * ui: Fix behavior when a value contains a `.` - -## 0.9.4 (February 20th, 2018) - -SECURITY: - - * Role Tags used with the EC2 style of AWS auth were being improperly parsed; - as a result they were not being used to properly restrict values. - Implementations following our suggestion of using these as defense-in-depth - rather than the only source of restriction should not have significant - impact. - -FEATURES: - - * **ChaCha20-Poly1305 support in `transit`**: You can now encrypt and decrypt - with ChaCha20-Poly1305 in `transit`. Key derivation and convergent - encryption is also supported. - * **Okta Push support in Okta Auth Backend**: If a user account has MFA - required within Okta, an Okta Push MFA flow can be used to successfully - finish authentication. - * **PKI Improvements**: Custom OID subject alternate names can now be set, - subject to allow restrictions that support globbing. Additionally, Country, - Locality, Province, Street Address, and Postal Code can now be set in - certificate subjects. - * **Manta Storage**: Joyent Triton Manta can now be used for Vault storage - * **Google Cloud Spanner Storage**: Google Cloud Spanner can now be used for - Vault storage - -IMPROVEMENTS: - - * auth/centrify: Add CLI helper - * audit: Always log failure metrics, even if zero, to ensure the values appear - on dashboards [GH-3937] - * cli: Disable color when output is not a TTY [GH-3897] - * cli: Add `-format` flag to all subcommands [GH-3897] - * cli: Do not display deprecation warnings when the format is not table - [GH-3897] - * core: If over a predefined lease count (256k), log a warning not more than - once a minute. Too many leases can be problematic for many of the storage - backends and often this number of leases is indicative of a need for - workflow improvements. [GH-3957] - * secret/nomad: Have generated ACL tokens cap out at 64 characters [GH-4009] - * secret/pki: Country, Locality, Province, Street Address, and Postal Code can - now be set on certificates [GH-3992] - * secret/pki: UTF-8 Other Names can now be set in Subject Alternate Names in - issued certs; allowed values can be set per role and support globbing - [GH-3889] - * secret/pki: Add a flag to make the common name optional on certs [GH-3940] - * secret/pki: Ensure only DNS-compatible names go into DNS SANs; additionally, - properly handle IDNA transformations for these DNS names [GH-3953] - * secret/ssh: Add `valid-principles` flag to CLI for CA mode [GH-3922] - * storage/manta: Add Manta storage [GH-3270] - * ui (Enterprise): Support for ChaCha20-Poly1305 keys in the transit engine. - -BUG FIXES: - * api/renewer: Honor increment value in renew auth calls [GH-3904] - * auth/approle: Fix inability to use limited-use-count secret IDs on - replication performance secondaries - * auth/approle: Cleanup of secret ID accessors during tidy and removal of - dangling accessor entries [GH-3924] - * auth/aws-ec2: Avoid masking of role tag response [GH-3941] - * auth/cert: Verify DNS SANs in the authenticating certificate [GH-3982] - * auth/okta: Return configured durations as seconds, not nanoseconds [GH-3871] - * auth/okta: Get all okta groups for a user vs. default 200 limit [GH-4034] - * auth/token: Token creation via the CLI no longer forces periodic token - creation. Passing an explicit zero value for the period no longer create - periodic tokens. [GH-3880] - * command: Fix interpreted formatting directives when printing raw fields - [GH-4005] - * command: Correctly format output when using -field and -format flags at the - same time [GH-3987] - * command/rekey: Re-add lost `stored-shares` parameter [GH-3974] - * command/ssh: Create and reuse the api client [GH-3909] - * command/status: Fix panic when status returns 500 from leadership lookup - [GH-3998] - * identity: Fix race when creating entities [GH-3932] - * plugin/gRPC: Fixed an issue with list requests and raw responses coming from - plugins using gRPC transport [GH-3881] - * plugin/gRPC: Fix panic when special paths are not set [GH-3946] - * secret/pki: Verify a name is a valid hostname before adding to DNS SANs - [GH-3918] - * secret/transit: Fix auditing when reading a key after it has been backed up - or restored [GH-3919] - * secret/transit: Fix storage/memory consistency when persistence fails - [GH-3959] - * storage/consul: Validate that service names are RFC 1123 compliant [GH-3960] - * storage/etcd3: Fix memory ballooning with standby instances [GH-3798] - * storage/etcd3: Fix large lists (like token loading at startup) not being - handled [GH-3772] - * storage/postgresql: Fix compatibility with versions using custom string - version tags [GH-3949] - * storage/zookeeper: Update vendoring to fix freezing issues [GH-3896] - * ui (Enterprise): Decoding the replication token should no longer error and - prevent enabling of a secondary replication cluster via the ui. - * plugin/gRPC: Add connection info to the request object [GH-3997] - -## 0.9.3 (January 28th, 2018) - -A regression from a feature merge disabled the Nomad secrets backend in 0.9.2. -This release re-enables the Nomad secrets backend; it is otherwise identical to -0.9.2. - -## 0.9.2 (January 26th, 2018) - -SECURITY: - - * Okta Auth Backend: While the Okta auth backend was successfully verifying - usernames and passwords, it was not checking the returned state of the - account, so accounts that had been marked locked out could still be used to - log in. Only accounts in SUCCESS or PASSWORD_WARN states are now allowed. - * Periodic Tokens: A regression in 0.9.1 meant that periodic tokens created by - the AppRole, AWS, and Cert auth backends would expire when the max TTL for - the backend/mount/system was hit instead of their stated behavior of living - as long as they are renewed. This is now fixed; existing tokens do not have - to be reissued as this was purely a regression in the renewal logic. - * Seal Wrapping: During certain replication states values written marked for - seal wrapping may not be wrapped on the secondaries. This has been fixed, - and existing values will be wrapped on next read or write. This does not - affect the barrier keys. - -DEPRECATIONS/CHANGES: - - * `sys/health` DR Secondary Reporting: The `replication_dr_secondary` bool - returned by `sys/health` could be misleading since it would be `false` both - when a cluster was not a DR secondary but also when the node is a standby in - the cluster and has not yet fully received state from the active node. This - could cause health checks on LBs to decide that the node was acceptable for - traffic even though DR secondaries cannot handle normal Vault traffic. (In - other words, the bool could only convey "yes" or "no" but not "not sure - yet".) This has been replaced by `replication_dr_mode` and - `replication_perf_mode` which are string values that convey the current - state of the node; a value of `disabled` indicates that replication is - disabled or the state is still being discovered. As a result, an LB check - can positively verify that the node is both not `disabled` and is not a DR - secondary, and avoid sending traffic to it if either is true. - * PKI Secret Backend Roles parameter types: For `ou` and `organization` - in role definitions in the PKI secret backend, input can now be a - comma-separated string or an array of strings. Reading a role will - now return arrays for these parameters. - * Plugin API Changes: The plugin API has been updated to utilize golang's - context.Context package. Many function signatures now accept a context - object as the first parameter. Existing plugins will need to pull in the - latest Vault code and update their function signatures to begin using - context and the new gRPC transport. - -FEATURES: - - * **gRPC Backend Plugins**: Backend plugins now use gRPC for transport, - allowing them to be written in other languages. - * **Brand New CLI**: Vault has a brand new CLI interface that is significantly - streamlined, supports autocomplete, and is almost entirely backwards - compatible. - * **UI: PKI Secret Backend (Enterprise)**: Configure PKI secret backends, - create and browse roles and certificates, and issue and sign certificates via - the listed roles. - -IMPROVEMENTS: - - * auth/aws: Handle IAM headers produced by clients that formulate numbers as - ints rather than strings [GH-3763] - * auth/okta: Support JSON lists when specifying groups and policies [GH-3801] - * autoseal/hsm: Attempt reconnecting to the HSM on certain kinds of issues, - including HA scenarios for some Gemalto HSMs. - (Enterprise) - * cli: Output password prompts to stderr to make it easier to pipe an output - token to another command [GH-3782] - * core: Report replication status in `sys/health` [GH-3810] - * physical/s3: Allow using paths with S3 for non-AWS deployments [GH-3730] - * physical/s3: Add ability to disable SSL for non-AWS deployments [GH-3730] - * plugins: Args for plugins can now be specified separately from the command, - allowing the same output format and input format for plugin information - [GH-3778] - * secret/pki: `ou` and `organization` can now be specified as a - comma-separated string or an array of strings [GH-3804] - * plugins: Plugins will fall back to using netrpc as the communication protocol - on older versions of Vault [GH-3833] - -BUG FIXES: - - * auth/(approle,aws,cert): Fix behavior where periodic tokens generated by - these backends could not have their TTL renewed beyond the system/mount max - TTL value [GH-3803] - * auth/aws: Fix error returned if `bound_iam_principal_arn` was given to an - existing role update [GH-3843] - * core/sealwrap: Speed improvements and bug fixes (Enterprise) - * identity: Delete group alias when an external group is deleted [GH-3773] - * legacymfa/duo: Fix intermittent panic when Duo could not be reached - [GH-2030] - * secret/database: Fix a location where a lock could potentially not be - released, leading to deadlock [GH-3774] - * secret/(all databases) Fix behavior where if a max TTL was specified but no - default TTL was specified the system/mount default TTL would be used but not - be capped by the local max TTL [GH-3814] - * secret/database: Fix an issue where plugins were not closed properly if they - failed to initialize [GH-3768] - * ui: mounting a secret backend will now properly set `max_lease_ttl` and - `default_lease_ttl` when specified - previously both fields set - `default_lease_ttl`. - -## 0.9.1 (December 21st, 2017) - -DEPRECATIONS/CHANGES: - - * AppRole Case Sensitivity: In prior versions of Vault, `list` operations - against AppRole roles would require preserving case in the role name, even - though most other operations within AppRole are case-insensitive with - respect to the role name. This has been fixed; existing roles will behave as - they have in the past, but new roles will act case-insensitively in these - cases. - * Token Auth Backend Roles parameter types: For `allowed_policies` and - `disallowed_policies` in role definitions in the token auth backend, input - can now be a comma-separated string or an array of strings. Reading a role - will now return arrays for these parameters. - * Transit key exporting: You can now mark a key in the `transit` backend as - `exportable` at any time, rather than just at creation time; however, once - this value is set, it still cannot be unset. - * PKI Secret Backend Roles parameter types: For `allowed_domains` and - `key_usage` in role definitions in the PKI secret backend, input - can now be a comma-separated string or an array of strings. Reading a role - will now return arrays for these parameters. - * SSH Dynamic Keys Method Defaults to 2048-bit Keys: When using the dynamic - key method in the SSH backend, the default is now to use 2048-bit keys if no - specific key bit size is specified. - * Consul Secret Backend lease handling: The `consul` secret backend can now - accept both strings and integer numbers of seconds for its lease value. The - value returned on a role read will be an integer number of seconds instead - of a human-friendly string. - * Unprintable characters not allowed in API paths: Unprintable characters are - no longer allowed in names in the API (paths and path parameters), with an - extra restriction on whitespace characters. Allowed characters are those - that are considered printable by Unicode plus spaces. - -FEATURES: - - * **Transit Backup/Restore**: The `transit` backend now supports a backup - operation that can export a given key, including all key versions and - configuration, as well as a restore operation allowing import into another - Vault. - * **gRPC Database Plugins**: Database plugins now use gRPC for transport, - allowing them to be written in other languages. - * **Nomad Secret Backend**: Nomad ACL tokens can now be generated and revoked - using Vault. - * **TLS Cert Auth Backend Improvements**: The `cert` auth backend can now - match against custom certificate extensions via exact or glob matching, and - additionally supports max_ttl and periodic token toggles. - -IMPROVEMENTS: - - * auth/cert: Support custom certificate constraints [GH-3634] - * auth/cert: Support setting `max_ttl` and `period` [GH-3642] - * audit/file: Setting a file mode of `0000` will now disable Vault from - automatically `chmod`ing the log file [GH-3649] - * auth/github: The legacy MFA system can now be used with the GitHub auth - backend [GH-3696] - * auth/okta: The legacy MFA system can now be used with the Okta auth backend - [GH-3653] - * auth/token: `allowed_policies` and `disallowed_policies` can now be specified - as a comma-separated string or an array of strings [GH-3641] - * command/server: The log level can now be specified with `VAULT_LOG_LEVEL` - [GH-3721] - * core: Period values from auth backends will now be checked and applied to the - TTL value directly by core on login and renewal requests [GH-3677] - * database/mongodb: Add optional `write_concern` parameter, which can be set - during database configuration. This establishes a session-wide [write - concern](https://docs.mongodb.com/manual/reference/write-concern/) for the - lifecycle of the mount [GH-3646] - * http: Request path containing non-printable characters will return 400 - Bad - Request [GH-3697] - * mfa/okta: Filter a given email address as a login filter, allowing operation - when login email and account email are different - * plugins: Make Vault more resilient when unsealing when plugins are - unavailable [GH-3686] - * secret/pki: `allowed_domains` and `key_usage` can now be specified - as a comma-separated string or an array of strings [GH-3642] - * secret/ssh: Allow 4096-bit keys to be used in dynamic key method [GH-3593] - * secret/consul: The Consul secret backend now uses the value of `lease` set - on the role, if set, when renewing a secret. [GH-3796] - * storage/mysql: Don't attempt database creation if it exists, which can help - under certain permissions constraints [GH-3716] - -BUG FIXES: - - * api/status (enterprise): Fix status reporting when using an auto seal - * auth/approle: Fix case-sensitive/insensitive comparison issue [GH-3665] - * auth/cert: Return `allowed_names` on role read [GH-3654] - * auth/ldap: Fix incorrect control information being sent [GH-3402] [GH-3496] - [GH-3625] [GH-3656] - * core: Fix seal status reporting when using an autoseal - * core: Add creation path to wrap info for a control group token - * core: Fix potential panic that could occur using plugins when a node - transitioned from active to standby [GH-3638] - * core: Fix memory ballooning when a connection would connect to the cluster - port and then go away -- redux! [GH-3680] - * core: Replace recursive token revocation logic with depth-first logic, which - can avoid hitting stack depth limits in extreme cases [GH-2348] - * core: When doing a read on configured audited-headers, properly handle case - insensitivity [GH-3701] - * core/pkcs11 (enterprise): Fix panic when PKCS#11 library is not readable - * database/mysql: Allow the creation statement to use commands that are not yet - supported by the prepare statement protocol [GH-3619] - * plugin/auth-gcp: Fix IAM roles when using `allow_gce_inference` [VPAG-19] - -## 0.9.0.1 (November 21st, 2017) (Enterprise Only) - -IMPROVEMENTS: - - * auth/gcp: Support seal wrapping of configuration parameters - * auth/kubernetes: Support seal wrapping of configuration parameters - -BUG FIXES: - - * Fix an upgrade issue with some physical backends when migrating from legacy - HSM stored key support to the new Seal Wrap mechanism (Enterprise) - * mfa: Add the 'mfa' flag that was removed by mistake [GH-4223] - -## 0.9.0 (November 14th, 2017) - -DEPRECATIONS/CHANGES: - - * HSM config parameter requirements: When using Vault with an HSM, a new - parameter is required: `hmac_key_label`. This performs a similar function to - `key_label` but for the HMAC key Vault will use. Vault will generate a - suitable key if this value is specified and `generate_key` is set true. - * API HTTP client behavior: When calling `NewClient` the API no longer - modifies the provided client/transport. In particular this means it will no - longer enable redirection limiting and HTTP/2 support on custom clients. It - is suggested that if you want to make changes to an HTTP client that you use - one created by `DefaultConfig` as a starting point. - * AWS EC2 client nonce behavior: The client nonce generated by the backend - that gets returned along with the authentication response will be audited in - plaintext. If this is undesired, the clients can choose to supply a custom - nonce to the login endpoint. The custom nonce set by the client will from - now on, not be returned back with the authentication response, and hence not - audit logged. - * AWS Auth role options: The API will now error when trying to create or - update a role with the mutually-exclusive options - `disallow_reauthentication` and `allow_instance_migration`. - * SSH CA role read changes: When reading back a role from the `ssh` backend, - the TTL/max TTL values will now be an integer number of seconds rather than - a string. This better matches the API elsewhere in Vault. - * SSH role list changes: When listing roles from the `ssh` backend via the API, - the response data will additionally return a `key_info` map that will contain - a map of each key with a corresponding object containing the `key_type`. - * More granularity in audit logs: Audit request and response entries are still - in RFC3339 format but now have a granularity of nanoseconds. - * High availability related values have been moved out of the `storage` and - `ha_storage` stanzas, and into the top-level configuration. `redirect_addr` - has been renamed to `api_addr`. The stanzas still support accepting - HA-related values to maintain backward compatibility, but top-level values - will take precedence. - * A new `seal` stanza has been added to the configuration file, which is - optional and enables configuration of the seal type to use for additional - data protection, such as using HSM or Cloud KMS solutions to encrypt and - decrypt data. - -FEATURES: - - * **RSA Support for Transit Backend**: Transit backend can now generate RSA - keys which can be used for encryption and signing. [GH-3489] - * **Identity System**: Now in open source and with significant enhancements, - Identity is an integrated system for understanding users across tokens and - enabling easier management of users directly and via groups. - * **External Groups in Identity**: Vault can now automatically assign users - and systems to groups in Identity based on their membership in external - groups. - * **Seal Wrap / FIPS 140-2 Compatibility (Enterprise)**: Vault can now take - advantage of FIPS 140-2-certified HSMs to ensure that Critical Security - Parameters are protected in a compliant fashion. Vault's implementation has - received a statement of compliance from Leidos. - * **Control Groups (Enterprise)**: Require multiple members of an Identity - group to authorize a requested action before it is allowed to run. - * **Cloud Auto-Unseal (Enterprise)**: Automatically unseal Vault using AWS KMS - and GCP CKMS. - * **Sentinel Integration (Enterprise)**: Take advantage of HashiCorp Sentinel - to create extremely flexible access control policies -- even on - unauthenticated endpoints. - * **Barrier Rekey Support for Auto-Unseal (Enterprise)**: When using auto-unsealing - functionality, the `rekey` operation is now supported; it uses recovery keys - to authorize the master key rekey. - * **Operation Token for Disaster Recovery Actions (Enterprise)**: When using - Disaster Recovery replication, a token can be created that can be used to - authorize actions such as promotion and updating primary information, rather - than using recovery keys. - * **Trigger Auto-Unseal with Recovery Keys (Enterprise)**: When using - auto-unsealing, a request to unseal Vault can be triggered by a threshold of - recovery keys, rather than requiring the Vault process to be restarted. - * **UI Redesign (Enterprise)**: All new experience for the Vault Enterprise - UI. The look and feel has been completely redesigned to give users a better - experience and make managing secrets fast and easy. - * **UI: SSH Secret Backend (Enterprise)**: Configure an SSH secret backend, - create and browse roles. And use them to sign keys or generate one time - passwords. - * **UI: AWS Secret Backend (Enterprise)**: You can now configure the AWS - backend via the Vault Enterprise UI. In addition you can create roles, - browse the roles and Generate IAM Credentials from them in the UI. - -IMPROVEMENTS: - - * api: Add ability to set custom headers on each call [GH-3394] - * command/server: Add config option to disable requesting client certificates - [GH-3373] - * auth/aws: Max retries can now be customized for the AWS client [GH-3965] - * core: Disallow mounting underneath an existing path, not just over [GH-2919] - * physical/file: Use `700` as permissions when creating directories. The files - themselves were `600` and are all encrypted, but this doesn't hurt. - * secret/aws: Add ability to use custom IAM/STS endpoints [GH-3416] - * secret/aws: Max retries can now be customized for the AWS client [GH-3965] - * secret/cassandra: Work around Cassandra ignoring consistency levels for a - user listing query [GH-3469] - * secret/pki: Private keys can now be marshalled as PKCS#8 [GH-3518] - * secret/pki: Allow entering URLs for `pki` as both comma-separated strings and JSON - arrays [GH-3409] - * secret/ssh: Role TTL/max TTL can now be specified as either a string or an - integer [GH-3507] - * secret/transit: Sign and verify operations now support a `none` hash - algorithm to allow signing/verifying pre-hashed data [GH-3448] - * secret/database: Add the ability to glob allowed roles in the Database Backend [GH-3387] - * ui (enterprise): Support for RSA keys in the transit backend - * ui (enterprise): Support for DR Operation Token generation, promoting, and - updating primary on DR Secondary clusters - -BUG FIXES: - - * api: Fix panic when setting a custom HTTP client but with a nil transport - [GH-3435] [GH-3437] - * api: Fix authing to the `cert` backend when the CA for the client cert is - not known to the server's listener [GH-2946] - * auth/approle: Create role ID index during read if a role is missing one [GH-3561] - * auth/aws: Don't allow mutually exclusive options [GH-3291] - * auth/radius: Fix logging in in some situations [GH-3461] - * core: Fix memleak when a connection would connect to the cluster port and - then go away [GH-3513] - * core: Fix panic if a single-use token is used to step-down or seal [GH-3497] - * core: Set rather than add headers to prevent some duplicated headers in - responses when requests were forwarded to the active node [GH-3485] - * physical/etcd3: Fix some listing issues due to how etcd3 does prefix - matching [GH-3406] - * physical/etcd3: Fix case where standbys can lose their etcd client lease - [GH-3031] - * physical/file: Fix listing when underscores are the first component of a - path [GH-3476] - * plugins: Allow response errors to be returned from backend plugins [GH-3412] - * secret/transit: Fix panic if the length of the input ciphertext was less - than the expected nonce length [GH-3521] - * ui (enterprise): Reinstate support for generic secret backends - this was - erroneously removed in a previous release - -## 0.8.3 (September 19th, 2017) - -CHANGES: - - * Policy input/output standardization: For all built-in authentication - backends, policies can now be specified as a comma-delimited string or an - array if using JSON as API input; on read, policies will be returned as an - array; and the `default` policy will not be forcefully added to policies - saved in configurations. Please note that the `default` policy will continue - to be added to generated tokens, however, rather than backends adding - `default` to the given set of input policies (in some cases, and not in - others), the stored set will reflect the user-specified set. - * `sign-self-issued` modifies Issuer in generated certificates: In 0.8.2 the - endpoint would not modify the Issuer in the generated certificate, leaving - the output self-issued. Although theoretically valid, in practice crypto - stacks were unhappy validating paths containing such certs. As a result, - `sign-self-issued` now encodes the signing CA's Subject DN into the Issuer - DN of the generated certificate. - * `sys/raw` requires enabling: While the `sys/raw` endpoint can be extremely - useful in break-glass or support scenarios, it is also extremely dangerous. - As of now, a configuration file option `raw_storage_endpoint` must be set in - order to enable this API endpoint. Once set, the available functionality has - been enhanced slightly; it now supports listing and decrypting most of - Vault's core data structures, except for the encryption keyring itself. - * `generic` is now `kv`: To better reflect its actual use, the `generic` - backend is now `kv`. Using `generic` will still work for backwards - compatibility. - -FEATURES: - - * **GCE Support for GCP Auth**: GCE instances can now authenticate to Vault - using machine credentials. - * **Support for Kubernetes Service Account Auth**: Kubernetes Service Accounts - can now authenticate to vault using JWT tokens. - -IMPROVEMENTS: - - * configuration: Provide a config option to store Vault server's process ID - (PID) in a file [GH-3321] - * mfa (Enterprise): Add the ability to use identity metadata in username format - * mfa/okta (Enterprise): Add support for configuring base_url for API calls - * secret/pki: `sign-intermediate` will now allow specifying a `ttl` value - longer than the signing CA certificate's NotAfter value. [GH-3325] - * sys/raw: Raw storage access is now disabled by default [GH-3329] - -BUG FIXES: - - * auth/okta: Fix regression that removed the ability to set base_url [GH-3313] - * core: Fix panic while loading leases at startup on ARM processors - [GH-3314] - * secret/pki: Fix `sign-self-issued` encoding the wrong subject public key - [GH-3325] - -## 0.8.2.1 (September 11th, 2017) (Enterprise Only) - -BUG FIXES: - - * Fix an issue upgrading to 0.8.2 for Enterprise customers. - -## 0.8.2 (September 5th, 2017) - -SECURITY: - -* In prior versions of Vault, if authenticating via AWS IAM and requesting a - periodic token, the period was not properly respected. This could lead to - tokens expiring unexpectedly, or a token lifetime being longer than expected. - Upon token renewal with Vault 0.8.2 the period will be properly enforced. - -DEPRECATIONS/CHANGES: - -* `vault ssh` users should supply `-mode` and `-role` to reduce the number of - API calls. A future version of Vault will mark these optional values are - required. Failure to supply `-mode` or `-role` will result in a warning. -* Vault plugins will first briefly run a restricted version of the plugin to - fetch metadata, and then lazy-load the plugin on first request to prevent - crash/deadlock of Vault during the unseal process. Plugins will need to be - built with the latest changes in order for them to run properly. - -FEATURES: - -* **Lazy Lease Loading**: On startup, Vault will now load leases from storage - in a lazy fashion (token checks and revocation/renewal requests still force - an immediate load). For larger installations this can significantly reduce - downtime when switching active nodes or bringing Vault up from cold start. -* **SSH CA Login with `vault ssh`**: `vault ssh` now supports the SSH CA - backend for authenticating to machines. It also supports remote host key - verification through the SSH CA backend, if enabled. -* **Signing of Self-Issued Certs in PKI**: The `pki` backend now supports - signing self-issued CA certs. This is useful when switching root CAs. - -IMPROVEMENTS: - - * audit/file: Allow specifying `stdout` as the `file_path` to log to standard - output [GH-3235] - * auth/aws: Allow wildcards in `bound_iam_principal_arn` [GH-3213] - * auth/okta: Compare groups case-insensitively since Okta is only - case-preserving [GH-3240] - * auth/okta: Standardize Okta configuration APIs across backends [GH-3245] - * cli: Add subcommand autocompletion that can be enabled with - `vault -autocomplete-install` [GH-3223] - * cli: Add ability to handle wrapped responses when using `vault auth`. What - is output depends on the other given flags; see the help output for that - command for more information. [GH-3263] - * core: TLS cipher suites used for cluster behavior can now be set via - `cluster_cipher_suites` in configuration [GH-3228] - * core: The `plugin_name` can now either be specified directly as part of the - parameter or within the `config` object when mounting a secret or auth backend - via `sys/mounts/:path` or `sys/auth/:path` respectively [GH-3202] - * core: It is now possible to update the `description` of a mount when - mount-tuning, although this must be done through the HTTP layer [GH-3285] - * secret/databases/mongo: If an EOF is encountered, attempt reconnecting and - retrying the operation [GH-3269] - * secret/pki: TTLs can now be specified as a string or an integer number of - seconds [GH-3270] - * secret/pki: Self-issued certs can now be signed via - `pki/root/sign-self-issued` [GH-3274] - * storage/gcp: Use application default credentials if they exist [GH-3248] - -BUG FIXES: - - * auth/aws: Properly use role-set period values for IAM-derived token renewals - [GH-3220] - * auth/okta: Fix updating organization/ttl/max_ttl after initial setting - [GH-3236] - * core: Fix PROXY when underlying connection is TLS [GH-3195] - * core: Policy-related commands would sometimes fail to act case-insensitively - [GH-3210] - * storage/consul: Fix parsing TLS configuration when using a bare IPv6 address - [GH-3268] - * plugins: Lazy-load plugins to prevent crash/deadlock during unseal process. - [GH-3255] - * plugins: Skip mounting plugin-based secret and credential mounts when setting - up mounts if the plugin is no longer present in the catalog. [GH-3255] - -## 0.8.1 (August 16th, 2017) - -DEPRECATIONS/CHANGES: - - * PKI Root Generation: Calling `pki/root/generate` when a CA cert/key already - exists will now return a `204` instead of overwriting an existing root. If - you want to recreate the root, first run a delete operation on `pki/root` - (requires `sudo` capability), then generate it again. - -FEATURES: - - * **Oracle Secret Backend**: There is now an external plugin to support leased - credentials for Oracle databases (distributed separately). - * **GCP IAM Auth Backend**: There is now an authentication backend that allows - using GCP IAM credentials to retrieve Vault tokens. This is available as - both a plugin and built-in to Vault. - * **PingID Push Support for Path-Based MFA (Enterprise)**: PingID Push can - now be used for MFA with the new path-based MFA introduced in Vault - Enterprise 0.8. - * **Permitted DNS Domains Support in PKI**: The `pki` backend now supports - specifying permitted DNS domains for CA certificates, allowing you to - narrowly scope the set of domains for which a CA can issue or sign child - certificates. - * **Plugin Backend Reload Endpoint**: Plugin backends can now be triggered to - reload using the `sys/plugins/reload/backend` endpoint and providing either - the plugin name or the mounts to reload. - * **Self-Reloading Plugins**: The plugin system will now attempt to reload a - crashed or stopped plugin, once per request. - -IMPROVEMENTS: - - * auth/approle: Allow array input for policies in addition to comma-delimited - strings [GH-3163] - * plugins: Send logs through Vault's logger rather than stdout [GH-3142] - * secret/pki: Add `pki/root` delete operation [GH-3165] - * secret/pki: Don't overwrite an existing root cert/key when calling generate - [GH-3165] - -BUG FIXES: - - * aws: Don't prefer a nil HTTP client over an existing one [GH-3159] - * core: If there is an error when checking for create/update existence, return - 500 instead of 400 [GH-3162] - * secret/database: Avoid creating usernames that are too long for legacy MySQL - [GH-3138] - -## 0.8.0 (August 9th, 2017) - -SECURITY: - - * We've added a note to the docs about the way the GitHub auth backend works - as it may not be readily apparent that GitHub personal access tokens, which - are used by the backend, can be used for unauthorized access if they are - stolen from third party services and access to Vault is public. - -DEPRECATIONS/CHANGES: - - * Database Plugin Backends: Passwords generated for these backends now - enforce stricter password requirements, as opposed to the previous behavior - of returning a randomized UUID. Passwords are of length 20, and have a `A1a-` - characters prepended to ensure stricter requirements. No regressions are - expected from this change. (For database backends that were previously - substituting underscores for hyphens in passwords, this will remain the - case.) - * Lease Endpoints: The endpoints `sys/renew`, `sys/revoke`, `sys/revoke-prefix`, - `sys/revoke-force` have been deprecated and relocated under `sys/leases`. - Additionally, the deprecated path `sys/revoke-force` now requires the `sudo` - capability. - * Response Wrapping Lookup Unauthenticated: The `sys/wrapping/lookup` endpoint - is now unauthenticated. This allows introspection of the wrapping info by - clients that only have the wrapping token without then invalidating the - token. Validation functions/checks are still performed on the token. - -FEATURES: - - * **Cassandra Storage**: Cassandra can now be used for Vault storage - * **CockroachDB Storage**: CockroachDB can now be used for Vault storage - * **CouchDB Storage**: CouchDB can now be used for Vault storage - * **SAP HANA Database Plugin**: The `databases` backend can now manage users - for SAP HANA databases - * **Plugin Backends**: Vault now supports running secret and auth backends as - plugins. Plugins can be mounted like normal backends and can be developed - independently from Vault. - * **PROXY Protocol Support** Vault listeners can now be configured to honor - PROXY protocol v1 information to allow passing real client IPs into Vault. A - list of authorized addresses (IPs or subnets) can be defined and - accept/reject behavior controlled. - * **Lease Lookup and Browsing in the Vault Enterprise UI**: Vault Enterprise UI - now supports lookup and listing of leases and the associated actions from the - `sys/leases` endpoints in the API. These are located in the new top level - navigation item "Leases". - * **Filtered Mounts for Performance Mode Replication**: Whitelists or - blacklists of mounts can be defined per-secondary to control which mounts - are actually replicated to that secondary. This can allow targeted - replication of specific sets of data to specific geolocations/datacenters. - * **Disaster Recovery Mode Replication (Enterprise Only)**: There is a new - replication mode, Disaster Recovery (DR), that performs full real-time - replication (including tokens and leases) to DR secondaries. DR secondaries - cannot handle client requests, but can be promoted to primary as needed for - failover. - * **Manage New Replication Features in the Vault Enterprise UI**: Support for - Replication features in Vault Enterprise UI has expanded to include new DR - Replication mode and management of Filtered Mounts in Performance Replication - mode. - * **Vault Identity (Enterprise Only)**: Vault's new Identity system allows - correlation of users across tokens. At present this is only used for MFA, - but will be the foundation of many other features going forward. - * **Duo Push, Okta Push, and TOTP MFA For All Authenticated Paths (Enterprise - Only)**: A brand new MFA system built on top of Identity allows MFA - (currently Duo Push, Okta Push, and TOTP) for any authenticated path within - Vault. MFA methods can be configured centrally, and TOTP keys live within - the user's Identity information to allow using the same key across tokens. - Specific MFA method(s) required for any given path within Vault can be - specified in normal ACL path statements. - -IMPROVEMENTS: - - * api: Add client method for a secret renewer background process [GH-2886] - * api: Add `RenewTokenAsSelf` [GH-2886] - * api: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env - var or with a new API function [GH-2956] - * api/cli: Client will now attempt to look up SRV records for the given Vault - hostname [GH-3035] - * audit/socket: Enhance reconnection logic and don't require the connection to - be established at unseal time [GH-2934] - * audit/file: Opportunistically try re-opening the file on error [GH-2999] - * auth/approle: Add role name to token metadata [GH-2985] - * auth/okta: Allow specifying `ttl`/`max_ttl` inside the mount [GH-2915] - * cli: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env - var [GH-2956] - * command/auth: Add `-token-only` flag to `vault auth` that returns only the - token on stdout and does not store it via the token helper [GH-2855] - * core: CORS allowed origins can now be configured [GH-2021] - * core: Add metrics counters for audit log failures [GH-2863] - * cors: Allow setting allowed headers via the API instead of always using - wildcard [GH-3023] - * secret/ssh: Allow specifying the key ID format using template values for CA - type [GH-2888] - * server: Add `tls_client_ca_file` option for specifying a CA file to use for - client certificate verification when `tls_require_and_verify_client_cert` is - enabled [GH-3034] - * storage/cockroachdb: Add CockroachDB storage backend [GH-2713] - * storage/couchdb: Add CouchDB storage backend [GH-2880] - * storage/mssql: Add `max_parallel` [GH-3026] - * storage/postgresql: Add `max_parallel` [GH-3026] - * storage/postgresql: Improve listing speed [GH-2945] - * storage/s3: More efficient paging when an object has a lot of subobjects - [GH-2780] - * sys/wrapping: Make `sys/wrapping/lookup` unauthenticated [GH-3084] - * sys/wrapping: Wrapped tokens now store the original request path of the data - [GH-3100] - * telemetry: Add support for DogStatsD [GH-2490] - -BUG FIXES: - - * api/health: Don't treat standby `429` codes as an error [GH-2850] - * api/leases: Fix lease lookup returning lease properties at the top level - * audit: Fix panic when audit logging a read operation on an asymmetric - `transit` key [GH-2958] - * auth/approle: Fix panic when secret and cidr list not provided in role - [GH-3075] - * auth/aws: Look up proper account ID on token renew [GH-3012] - * auth/aws: Store IAM header in all cases when it changes [GH-3004] - * auth/ldap: Verify given certificate is PEM encoded instead of failing - silently [GH-3016] - * auth/token: Don't allow using the same token ID twice when manually - specifying [GH-2916] - * cli: Fix issue with parsing keys that start with special characters [GH-2998] - * core: Relocated `sys/leases/renew` returns same payload as original - `sys/leases` endpoint [GH-2891] - * secret/ssh: Fix panic when signing with incorrect key type [GH-3072] - * secret/totp: Ensure codes can only be used once. This makes some automated - workflows harder but complies with the RFC. [GH-2908] - * secret/transit: Fix locking when creating a key with unsupported options - [GH-2974] - -## 0.7.3 (June 7th, 2017) - -SECURITY: - - * Cert auth backend now checks validity of individual certificates: In - previous versions of Vault, validity (e.g. expiration) of individual leaf - certificates added for authentication was not checked. This was done to make - it easier for administrators to control lifecycles of individual - certificates added to the backend, e.g. the authentication material being - checked was access to that specific certificate's private key rather than - all private keys signed by a CA. However, this behavior is often unexpected - and as a result can lead to insecure deployments, so we are now validating - these certificates as well. - * App-ID path salting was skipped in 0.7.1/0.7.2: A regression in 0.7.1/0.7.2 - caused the HMACing of any App-ID information stored in paths (including - actual app-IDs and user-IDs) to be unsalted and written as-is from the API. - In 0.7.3 any such paths will be automatically changed to salted versions on - access (e.g. login or read); however, if you created new app-IDs or user-IDs - in 0.7.1/0.7.2, you may want to consider whether any users with access to - Vault's underlying data store may have intercepted these values, and - revoke/roll them. - -DEPRECATIONS/CHANGES: - - * Step-Down is Forwarded: When a step-down is issued against a non-active node - in an HA cluster, it will now forward the request to the active node. - -FEATURES: - - * **ed25519 Signing/Verification in Transit with Key Derivation**: The - `transit` backend now supports generating - [ed25519](https://ed25519.cr.yp.to/) keys for signing and verification - functionality. These keys support derivation, allowing you to modify the - actual encryption key used by supplying a `context` value. - * **Key Version Specification for Encryption in Transit**: You can now specify - the version of a key you use to wish to generate a signature, ciphertext, or - HMAC. This can be controlled by the `min_encryption_version` key - configuration property. - * **Replication Primary Discovery (Enterprise)**: Replication primaries will - now advertise the addresses of their local HA cluster members to replication - secondaries. This helps recovery if the primary active node goes down and - neither service discovery nor load balancers are in use to steer clients. - -IMPROVEMENTS: - - * api/health: Add Sys().Health() [GH-2805] - * audit: Add auth information to requests that error out [GH-2754] - * command/auth: Add `-no-store` option that prevents the auth command from - storing the returned token into the configured token helper [GH-2809] - * core/forwarding: Request forwarding now heartbeats to prevent unused - connections from being terminated by firewalls or proxies - * plugins/databases: Add MongoDB as an internal database plugin [GH-2698] - * storage/dynamodb: Add a method for checking the existence of children, - speeding up deletion operations in the DynamoDB storage backend [GH-2722] - * storage/mysql: Add max_parallel parameter to MySQL backend [GH-2760] - * secret/databases: Support listing connections [GH-2823] - * secret/databases: Support custom renewal statements in Postgres database - plugin [GH-2788] - * secret/databases: Use the role name as part of generated credentials - [GH-2812] - * ui (Enterprise): Transit key and secret browsing UI handle large lists better - * ui (Enterprise): root tokens are no longer persisted - * ui (Enterprise): support for mounting Database and TOTP secret backends - -BUG FIXES: - - * auth/app-id: Fix regression causing loading of salts to be skipped - * auth/aws: Improve EC2 describe instances performance [GH-2766] - * auth/aws: Fix lookup of some instance profile ARNs [GH-2802] - * auth/aws: Resolve ARNs to internal AWS IDs which makes lookup at various - points (e.g. renewal time) more robust [GH-2814] - * auth/aws: Properly honor configured period when using IAM authentication - [GH-2825] - * auth/aws: Check that a bound IAM principal is not empty (in the current - state of the role) before requiring it match the previously authenticated - client [GH-2781] - * auth/cert: Fix panic on renewal [GH-2749] - * auth/cert: Certificate verification for non-CA certs [GH-2761] - * core/acl: Prevent race condition when compiling ACLs in some scenarios - [GH-2826] - * secret/database: Increase wrapping token TTL; in a loaded scenario it could - be too short - * secret/generic: Allow integers to be set as the value of `ttl` field as the - documentation claims is supported [GH-2699] - * secret/ssh: Added host key callback to ssh client config [GH-2752] - * storage/s3: Avoid a panic when some bad data is returned [GH-2785] - * storage/dynamodb: Fix list functions working improperly on Windows [GH-2789] - * storage/file: Don't leak file descriptors in some error cases - * storage/swift: Fix pre-v3 project/tenant name reading [GH-2803] - -## 0.7.2 (May 8th, 2017) - -BUG FIXES: - - * audit: Fix auditing entries containing certain kinds of time values - [GH-2689] - -## 0.7.1 (May 5th, 2017) - -DEPRECATIONS/CHANGES: - - * LDAP Auth Backend: Group membership queries will now run as the `binddn` - user when `binddn`/`bindpass` are configured, rather than as the - authenticating user as was the case previously. - -FEATURES: - - * **AWS IAM Authentication**: IAM principals can get Vault tokens - automatically, opening AWS-based authentication to users, ECS containers, - Lambda instances, and more. Signed client identity information retrieved - using the AWS API `sts:GetCallerIdentity` is validated against the AWS STS - service before issuing a Vault token. This backend is unified with the - `aws-ec2` authentication backend under the name `aws`, and allows additional - EC2-related restrictions to be applied during the IAM authentication; the - previous EC2 behavior is also still available. [GH-2441] - * **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your - Vault physical data store [GH-2546] - * **Lease Listing and Lookup**: You can now introspect a lease to get its - creation and expiration properties via `sys/leases/lookup`; with `sudo` - capability you can also list leases for lookup, renewal, or revocation via - that endpoint. Various lease functions (renew, revoke, revoke-prefix, - revoke-force) have also been relocated to `sys/leases/`, but they also work - at the old paths for compatibility. Reading (but not listing) leases via - `sys/leases/lookup` is now a part of the current `default` policy. [GH-2650] - * **TOTP Secret Backend**: You can now store multi-factor authentication keys - in Vault and use the API to retrieve time-based one-time use passwords on - demand. The backend can also be used to generate a new key and validate - passwords generated by that key. [GH-2492] - * **Database Secret Backend & Secure Plugins (Beta)**: This new secret backend - combines the functionality of the MySQL, PostgreSQL, MSSQL, and Cassandra - backends. It also provides a plugin interface for extendability through - custom databases. [GH-2200] - -IMPROVEMENTS: - - * auth/cert: Support for constraints on subject Common Name and DNS/email - Subject Alternate Names in certificates [GH-2595] - * auth/ldap: Use the binding credentials to search group membership rather - than the user credentials [GH-2534] - * cli/revoke: Add `-self` option to allow revoking the currently active token - [GH-2596] - * core: Randomize x coordinate in Shamir shares [GH-2621] - * replication: Fix a bug when enabling `approle` on a primary before - secondaries were connected - * replication: Add heartbeating to ensure firewalls don't kill connections to - primaries - * secret/pki: Add `no_store` option that allows certificates to be issued - without being stored. This removes the ability to look up and/or add to a - CRL but helps with scaling to very large numbers of certificates. [GH-2565] - * secret/pki: If used with a role parameter, the `sign-verbatim/` - endpoint honors the values of `generate_lease`, `no_store`, `ttl` and - `max_ttl` from the given role [GH-2593] - * secret/pki: Add role parameter `allow_glob_domains` that enables defining - names in `allowed_domains` containing `*` glob patterns [GH-2517] - * secret/pki: Update certificate storage to not use characters that are not - supported on some filesystems [GH-2575] - * storage/etcd3: Add `discovery_srv` option to query for SRV records to find - servers [GH-2521] - * storage/s3: Support `max_parallel` option to limit concurrent outstanding - requests [GH-2466] - * storage/s3: Use pooled transport for http client [GH-2481] - * storage/swift: Allow domain values for V3 authentication [GH-2554] - * tidy: Improvements to `auth/token/tidy` and `sys/leases/tidy` to handle more - cleanup cases [GH-2452] - -BUG FIXES: - - * api: Respect a configured path in Vault's address [GH-2588] - * auth/aws-ec2: New bounds added as criteria to allow role creation [GH-2600] - * auth/ldap: Don't lowercase groups attached to users [GH-2613] - * cli: Don't panic if `vault write` is used with the `force` flag but no path - [GH-2674] - * core: Help operations should request forward since standbys may not have - appropriate info [GH-2677] - * replication: Fix enabling secondaries when certain mounts already existed on - the primary - * secret/mssql: Update mssql driver to support queries with colons [GH-2610] - * secret/pki: Don't lowercase O/OU values in certs [GH-2555] - * secret/pki: Don't attempt to validate IP SANs if none are provided [GH-2574] - * secret/ssh: Don't automatically lowercase principles in issued SSH certs - [GH-2591] - * storage/consul: Properly handle state events rather than timing out - [GH-2548] - * storage/etcd3: Ensure locks are released if client is improperly shut down - [GH-2526] - -## 0.7.0 (March 21th, 2017) - -SECURITY: - - * Common name not being validated when `exclude_cn_from_sans` option used in - `pki` backend: When using a role in the `pki` backend that specified the - `exclude_cn_from_sans` option, the common name would not then be properly - validated against the role's constraints. This has been fixed. We recommend - any users of this feature to upgrade to 0.7 as soon as feasible. - -DEPRECATIONS/CHANGES: - - * List Operations Always Use Trailing Slash: Any list operation, whether via - the `GET` or `LIST` HTTP verb, will now internally canonicalize the path to - have a trailing slash. This makes policy writing more predictable, as it - means clients will no longer work or fail based on which client they're - using or which HTTP verb they're using. However, it also means that policies - allowing `list` capability must be carefully checked to ensure that they - contain a trailing slash; some policies may need to be split into multiple - stanzas to accommodate. - * PKI Defaults to Unleased Certificates: When issuing certificates from the - PKI backend, by default, no leases will be issued. If you want to manually - revoke a certificate, its serial number can be used with the `pki/revoke` - endpoint. Issuing leases is still possible by enabling the `generate_lease` - toggle in PKI role entries (this will default to `true` for upgrades, to - keep existing behavior), which will allow using lease IDs to revoke - certificates. For installations issuing large numbers of certificates (tens - to hundreds of thousands, or millions), this will significantly improve - Vault startup time since leases associated with these certificates will not - have to be loaded; however note that it also means that revocation of a - token used to issue + guidance, linking out to relevant From b16087cd1496af5c8517109fd51cd0a4c2b2d596 Mon Sep 17 00:00:00 2001 From: will-quan-bird <54047629+will-quan-bird@users.noreply.github.com> Date: Fri, 25 Oct 2019 09:01:01 -0700 Subject: [PATCH 18/90] allows emails@sign to be within the aws secrets engine path (#7553) --- builtin/logical/aws/path_roles.go | 2 +- builtin/logical/aws/path_user.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go index 6df1459cc1c9..6633c48a62ac 100644 --- a/builtin/logical/aws/path_roles.go +++ b/builtin/logical/aws/path_roles.go @@ -37,7 +37,7 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("name"), + Pattern: "roles/" + framework.GenericNameWithAtRegex("name"), Fields: map[string]*framework.FieldSchema{ "name": &framework.FieldSchema{ Type: framework.TypeString, diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go index def69c826dc1..6c1f89ad15eb 100644 --- a/builtin/logical/aws/path_user.go +++ b/builtin/logical/aws/path_user.go @@ -18,7 +18,7 @@ import ( func pathUser(b *backend) *framework.Path { return &framework.Path{ - Pattern: "(creds|sts)/" + framework.GenericNameRegex("name"), + Pattern: "(creds|sts)/" + framework.GenericNameWithAtRegex("name"), Fields: map[string]*framework.FieldSchema{ "name": &framework.FieldSchema{ Type: framework.TypeString, From a87e6faa5656eb77ed48b1fe425db29d034087df Mon Sep 17 00:00:00 2001 From: Jim Kalafut Date: Fri, 25 Oct 2019 09:03:22 -0700 Subject: [PATCH 19/90] changelog++ --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6b5e752fc6a..a53ad0620b4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ IMPROVEMENTS: thereby improving the performance and storage capacity. * replication (enterprise): added more replication metrics * secrets/aws: The root config can now be read [GH-7245] + * secrets/aws: Role paths may now contain the '@' character [GH-7553] * secrets/database/cassandra: Add ability to skip verfication of connection [GH-7614] * storage/azure: Add config parameter to Azure storage backend to allow specifying the ARM endpoint [GH-7567] From 1afddeeaa421fe5f1553b9879344a9bce6ec89f0 Mon Sep 17 00:00:00 2001 From: spiff Date: Fri, 25 Oct 2019 09:21:55 -0700 Subject: [PATCH 20/90] Change "Generate Intermediate" example to exported (#7515) The example request for "Generate Intermediate" was type "internal", but the example response contained the private key, which "internal" doesn't do. This patch fixes the example request to be type "exported" to match the example response. --- website/source/api/secret/pki/index.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/api/secret/pki/index.html.md b/website/source/api/secret/pki/index.html.md index 40b8f9332c45..d7aa2409b2bf 100644 --- a/website/source/api/secret/pki/index.html.md +++ b/website/source/api/secret/pki/index.html.md @@ -523,7 +523,7 @@ $ curl \ --header "X-Vault-Token: ..." \ --request POST \ --data @payload.json \ - http://127.0.0.1:8200/v1/pki/intermediate/generate/internal + http://127.0.0.1:8200/v1/pki/intermediate/generate/exported ``` ```json From fa9660379fdbd372ca8c446ec9edc308f81337f5 Mon Sep 17 00:00:00 2001 From: Brian Shumate Date: Fri, 25 Oct 2019 12:25:04 -0400 Subject: [PATCH 21/90] Docs: Add version command (#7719) * Docs: Add version command * adding to --- website/source/docs/commands/version.html.md | 24 ++++++++++++++++++++ website/source/layouts/docs.erb | 1 + 2 files changed, 25 insertions(+) create mode 100644 website/source/docs/commands/version.html.md diff --git a/website/source/docs/commands/version.html.md b/website/source/docs/commands/version.html.md new file mode 100644 index 000000000000..99ec93cbc583 --- /dev/null +++ b/website/source/docs/commands/version.html.md @@ -0,0 +1,24 @@ +--- +layout: "docs" +page_title: "version - Command" +sidebar_title: "version" +sidebar_current: "docs-commands-version" +description: |- + The "version" command prints the version of Vault. +--- + +# version + +The `version` command prints the Vault version: + +``` +$ vault version +Vault v1.2.3 +``` + +It can also be printed by adding the flags `--version` or `-v` to the `vault` command: + +``` +$ vault -v +Vault v1.2.3 +``` diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 8ea778239717..2eff84a986b9 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -184,6 +184,7 @@ ] }, 'unwrap', + 'version', 'write', 'token-helper' ] From 6c059d92531893da0e1f709eb94df294370abcf7 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Sat, 26 Oct 2019 05:37:48 +1300 Subject: [PATCH 22/90] path-help missing or incorrect for raft paths (#7326) --- vault/logical_system_raft.go | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/vault/logical_system_raft.go b/vault/logical_system_raft.go index 18c1c16b6858..a00caaa98d7e 100644 --- a/vault/logical_system_raft.go +++ b/vault/logical_system_raft.go @@ -96,15 +96,15 @@ func (b *SystemBackend) raftStoragePaths() []*framework.Path { }, }, - HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-remove-peer"][0]), - HelpDescription: strings.TrimSpace(sysRaftHelp["raft-remove-peer"][1]), + HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-configuration"][0]), + HelpDescription: strings.TrimSpace(sysRaftHelp["raft-configuration"][1]), }, { Pattern: "storage/raft/snapshot", Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.handleStorageRaftSnapshotRead(), - Summary: "Retruns a snapshot of the current state of vault.", + Summary: "Returns a snapshot of the current state of vault.", }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.handleStorageRaftSnapshotWrite(false), @@ -112,8 +112,8 @@ func (b *SystemBackend) raftStoragePaths() []*framework.Path { }, }, - HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-remove-peer"][0]), - HelpDescription: strings.TrimSpace(sysRaftHelp["raft-remove-peer"][1]), + HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-snapshot"][0]), + HelpDescription: strings.TrimSpace(sysRaftHelp["raft-snapshot"][1]), }, { Pattern: "storage/raft/snapshot-force", @@ -124,8 +124,8 @@ func (b *SystemBackend) raftStoragePaths() []*framework.Path { }, }, - HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-remove-peer"][0]), - HelpDescription: strings.TrimSpace(sysRaftHelp["raft-remove-peer"][1]), + HelpSynopsis: strings.TrimSpace(sysRaftHelp["raft-snapshot-force"][0]), + HelpDescription: strings.TrimSpace(sysRaftHelp["raft-snapshot-force"][1]), }, } } @@ -436,8 +436,20 @@ var sysRaftHelp = map[string][2]string{ "Accepts an answer from the peer to be joined to the fact cluster.", "", }, + "raft-configuration": { + "Returns the raft cluster configuration.", + "", + }, "raft-remove-peer": { "Removes a peer from the raft cluster.", "", }, + "raft-snapshot": { + "Restores and saves snapshots from the raft cluster.", + "", + }, + "raft-snapshot-force": { + "Force restore a raft cluster snapshot", + "", + }, } From 25c2042ab60e2b66c902538e4197ecf0e40a88d1 Mon Sep 17 00:00:00 2001 From: Mike Jarmy Date: Fri, 25 Oct 2019 12:46:56 -0400 Subject: [PATCH 23/90] add docs for new replication metrics (#7729) * add docs for new replication metrics * add docs for new replication metrics --- .../source/docs/internals/telemetry.html.md | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/website/source/docs/internals/telemetry.html.md b/website/source/docs/internals/telemetry.html.md index 79f9aaba3354..a26d9d8b72cf 100644 --- a/website/source/docs/internals/telemetry.html.md +++ b/website/source/docs/internals/telemetry.html.md @@ -395,6 +395,26 @@ Number of incidences where the starting Merkle Tree index used to begin streamin **[S]** Summary (Milliseconds): Time taken to perform a Merkle Tree based synchronization using the last delta generated between the clusters participating in replication +### replication.merkle.commit_index + +**[G]** Gauge (Logical Sequence Number): The last committed index in the Merkle Tree. + +### replication.wal.last_wal + +**[G]** Gauge (Logical Sequence Number): The index of the last WAL + +### replication.wal.last_dr_wal + +**[G]** Gauge (Logical Sequence Number): The index of the last DR WAL + +### replication.wal.last_performance_wal + +**[G]** Gauge (Logical Sequence Number): The index of the last Performance WAL + +### replication.fsm.last_remote_wal + +**[G]** Gauge (Logical Sequence Number): The index of the last remote WAL + ## Secrets Engines Metrics These metrics relate to the supported [secrets engines][secrets-engines]. From 373b5d6a2558736d0d3be305f2ec898be88bb792 Mon Sep 17 00:00:00 2001 From: Matthew Irish Date: Fri, 25 Oct 2019 13:16:45 -0500 Subject: [PATCH 24/90] UI - replication path filtering (#7620) * rename mount-filter-config models, components, serializer, adapters to path-filter-config * move search-select component to core addon * add js class for search-select-placeholder and sort out power-select deps for moving to the core component * expose oninput from powerselect through search-select * don't fetch mounts in the replication routes * remove toggle from add template * start cross-namespace fetching * group options and set up for namespace fetch via power-select search prop * add and style up radio-card CSS component * add xlm size for icons between l and xl * copy defaults so they're not getting mutated * finalize cross-namespace fetching and getting that to work with power-select * when passing options but no models, format the options in search select so that they render properly in the list * tint the background of a selected radio card * default to null mode and uniq options in search-select * finish styling radio-card * format inputValues when first rendering the component if options are being passed from outside * treat mode:null as deleting existing config which simplifies save logic * correctly prune the auto complete list since path-filter-config-list handles all of that and finish styling * remove old component * add search debounce and fix linting * update search-select docs * updating tests * support grouped options for when to show the create prompt * update and add tests for path-filter-config-list * fix tests for search-select and path-filter-config-list * the new api uses allow/deny instead of whitelist/blacklist --- ...filter-config.js => path-filter-config.js} | 2 +- ...filter-config.js => path-filter-config.js} | 4 +- ...filter-config.js => path-filter-config.js} | 0 ui/app/styles/components/hs-icon.scss | 4 + ui/app/styles/components/radio-card.scss | 90 ++++++++++++ ui/app/styles/components/search-select.scss | 2 +- ui/app/styles/core.scss | 1 + ui/lib/core/addon/components/icon.js | 4 +- .../components/search-select-placeholder.js | 6 + .../core/addon}/components/search-select.js | 49 ++++--- .../components/search-select-placeholder.hbs | 0 .../templates/components/search-select.hbs | 3 +- .../components/search-select-placeholder.js | 1 + ui/lib/core/app/components/search-select.js | 1 + ui/lib/core/package.json | 1 + ui/lib/core/stories/icon.md | 2 + ui/{ => lib/core}/stories/search-select.md | 9 +- ui/lib/core/stories/search-select.stories.js | 41 ++++++ .../components/mount-filter-config-list.js | 27 ---- .../components/path-filter-config-list.js | 138 ++++++++++++++++++ .../addon/controllers/application.js | 28 ++-- .../mode/secondaries/config-edit.js | 14 +- .../addon/routes/mode/secondaries/add.js | 13 +- .../routes/mode/secondaries/config-create.js | 9 +- .../routes/mode/secondaries/config-edit.js | 3 +- .../routes/mode/secondaries/config-show.js | 2 +- .../addon/routes/replication-base.js | 10 -- .../components/mount-filter-config-list.hbs | 79 ---------- .../components/path-filter-config-list.hbs | 126 ++++++++++++++++ .../addon/templates/mode/secondaries/add.hbs | 25 +--- .../mode/secondaries/config-create.hbs | 6 +- .../mode/secondaries/config-edit.hbs | 26 +--- ui/public/file-error.svg | 3 + ui/public/file-success.svg | 3 + ui/stories/search-select.stories.js | 34 ----- .../mount-filter-config-list-test.js | 38 ----- .../path-filter-config-list-test.js | 116 +++++++++++++++ 37 files changed, 620 insertions(+), 300 deletions(-) rename ui/app/adapters/{mount-filter-config.js => path-filter-config.js} (87%) rename ui/app/models/{mount-filter-config.js => path-filter-config.js} (73%) rename ui/app/serializers/{mount-filter-config.js => path-filter-config.js} (100%) create mode 100644 ui/app/styles/components/radio-card.scss create mode 100644 ui/lib/core/addon/components/search-select-placeholder.js rename ui/{app => lib/core/addon}/components/search-select.js (75%) rename ui/{app => lib/core/addon}/templates/components/search-select-placeholder.hbs (100%) rename ui/{app => lib/core/addon}/templates/components/search-select.hbs (94%) create mode 100644 ui/lib/core/app/components/search-select-placeholder.js create mode 100644 ui/lib/core/app/components/search-select.js rename ui/{ => lib/core}/stories/search-select.md (62%) create mode 100644 ui/lib/core/stories/search-select.stories.js delete mode 100644 ui/lib/replication/addon/components/mount-filter-config-list.js create mode 100644 ui/lib/replication/addon/components/path-filter-config-list.js delete mode 100644 ui/lib/replication/addon/templates/components/mount-filter-config-list.hbs create mode 100644 ui/lib/replication/addon/templates/components/path-filter-config-list.hbs create mode 100644 ui/public/file-error.svg create mode 100644 ui/public/file-success.svg delete mode 100644 ui/stories/search-select.stories.js delete mode 100644 ui/tests/integration/components/mount-filter-config-list-test.js create mode 100644 ui/tests/integration/components/path-filter-config-list-test.js diff --git a/ui/app/adapters/mount-filter-config.js b/ui/app/adapters/path-filter-config.js similarity index 87% rename from ui/app/adapters/mount-filter-config.js rename to ui/app/adapters/path-filter-config.js index 597eedb0d84e..d46739715dad 100644 --- a/ui/app/adapters/mount-filter-config.js +++ b/ui/app/adapters/path-filter-config.js @@ -2,7 +2,7 @@ import ApplicationAdapter from './application'; export default ApplicationAdapter.extend({ url(id) { - return `${this.buildURL()}/replication/performance/primary/mount-filter/${id}`; + return `${this.buildURL()}/replication/performance/primary/paths-filter/${id}`; }, findRecord(store, type, id) { diff --git a/ui/app/models/mount-filter-config.js b/ui/app/models/path-filter-config.js similarity index 73% rename from ui/app/models/mount-filter-config.js rename to ui/app/models/path-filter-config.js index b0234a749001..893c6c678f45 100644 --- a/ui/app/models/mount-filter-config.js +++ b/ui/app/models/path-filter-config.js @@ -2,9 +2,7 @@ import DS from 'ember-data'; const { attr } = DS; export default DS.Model.extend({ - mode: attr('string', { - defaultValue: 'whitelist', - }), + mode: attr('string'), paths: attr('array', { defaultValue: function() { return []; diff --git a/ui/app/serializers/mount-filter-config.js b/ui/app/serializers/path-filter-config.js similarity index 100% rename from ui/app/serializers/mount-filter-config.js rename to ui/app/serializers/path-filter-config.js diff --git a/ui/app/styles/components/hs-icon.scss b/ui/app/styles/components/hs-icon.scss index 904fd9d2be64..80507831ec47 100644 --- a/ui/app/styles/components/hs-icon.scss +++ b/ui/app/styles/components/hs-icon.scss @@ -30,6 +30,10 @@ height: 20px; } +.hs-icon-xlm { + width: 24px; + height: 24px; +} .hs-icon-xl { width: 28px; height: 28px; diff --git a/ui/app/styles/components/radio-card.scss b/ui/app/styles/components/radio-card.scss new file mode 100644 index 000000000000..d48932ae4dd9 --- /dev/null +++ b/ui/app/styles/components/radio-card.scss @@ -0,0 +1,90 @@ +.radio-card-selector { + display: flex; + margin-bottom: $spacing-xs; +} +.radio-card { + width: 19rem; + box-shadow: $box-shadow-low; + display: flex; + flex-direction: column; + justify-content: space-between; + margin: $spacing-xs $spacing-m; + border: $base-border; + border-radius: $radius; + transition: all ease-in-out $speed; + + input[type='radio'] { + position: absolute; + z-index: 1; + opacity: 0; + } + + input[type='radio'] + label { + border: 1px solid $grey-light; + border-radius: 50%; + cursor: pointer; + display: block; + height: 1rem; + width: 1rem; + flex-shrink: 0; + flex-grow: 0; + } + + input[type='radio']:checked + label { + background: $blue; + border: 1px solid $blue; + box-shadow: inset 0 0 0 0.15rem $white; + } + input[type='radio']:focus + label { + box-shadow: 0 0 10px 1px rgba($blue, 0.4), inset 0 0 0 0.15rem $white; + } +} +.radio-card:first-child { + margin-left: 0; +} +.radio-card:last-child { + margin-right: 0; +} + +.radio-card-row { + display: flex; + padding: $spacing-m; +} +.radio-card-icon { + color: $ui-gray-300; +} +.radio-card-message { + margin: $spacing-xxs; +} + +.radio-card-message-title { + font-weight: $font-weight-semibold; + font-size: $size-7; + margin-bottom: $spacing-xxs; +} +.radio-card-message-body { + line-height: 1.2; + color: $ui-gray-500; + font-size: $size-8; +} + +.radio-card-radio-row { + display: flex; + justify-content: center; + background: $ui-gray-050; + padding: $spacing-xs; +} + +.is-selected { + &.radio-card { + border-color: $blue-500; + background: $ui-gray-010; + box-shadow: $box-shadow-middle; + } + .radio-card-icon { + color: $black; + } + .radio-card-radio-row { + background: $blue-050; + } +} diff --git a/ui/app/styles/components/search-select.scss b/ui/app/styles/components/search-select.scss index cd764ae96bec..027cea6a4172 100644 --- a/ui/app/styles/components/search-select.scss +++ b/ui/app/styles/components/search-select.scss @@ -56,7 +56,7 @@ padding-left: $spacing-xxs + $spacing-l; } -.ember-power-select-options { +div > .ember-power-select-options { background: $white; border: $base-border; box-shadow: $box-shadow-middle; diff --git a/ui/app/styles/core.scss b/ui/app/styles/core.scss index e1402d54ca14..87706441b2f8 100644 --- a/ui/app/styles/core.scss +++ b/ui/app/styles/core.scss @@ -72,6 +72,7 @@ @import './components/navigate-input'; @import './components/page-header'; @import './components/popup-menu'; +@import './components/radio-card'; @import './components/radial-progress'; @import './components/raft-join'; @import './components/role-item'; diff --git a/ui/lib/core/addon/components/icon.js b/ui/lib/core/addon/components/icon.js index c1f4c47bd3f2..eae63b0cc62b 100644 --- a/ui/lib/core/addon/components/icon.js +++ b/ui/lib/core/addon/components/icon.js @@ -7,7 +7,7 @@ * * ``` * @param glyph=null {String} - The name of the SVG to render inline. - * @param [size='m'] {String} - The size of the Icon, can be one of 's', 'm', 'l', 'xl', 'xxl'. The default is 'm'. + * @param [size='m'] {String} - The size of the Icon, can be one of 's', 'm', 'l', 'xlm', 'xl', 'xxl'. The default is 'm'. * */ import Component from '@ember/component'; @@ -15,7 +15,7 @@ import { computed } from '@ember/object'; import { assert } from '@ember/debug'; import layout from '../templates/components/icon'; -const SIZES = ['s', 'm', 'l', 'xl', 'xxl']; +const SIZES = ['s', 'm', 'l', 'xlm', 'xl', 'xxl']; export default Component.extend({ tagName: '', diff --git a/ui/lib/core/addon/components/search-select-placeholder.js b/ui/lib/core/addon/components/search-select-placeholder.js new file mode 100644 index 000000000000..cf5e49baff5f --- /dev/null +++ b/ui/lib/core/addon/components/search-select-placeholder.js @@ -0,0 +1,6 @@ +import Component from '@ember/component'; +import layout from '../templates/components/search-select-placeholder'; + +export default Component.extend({ + layout, +}); diff --git a/ui/app/components/search-select.js b/ui/lib/core/addon/components/search-select.js similarity index 75% rename from ui/app/components/search-select.js rename to ui/lib/core/addon/components/search-select.js index d1c626bf458b..bc710c5d2428 100644 --- a/ui/app/components/search-select.js +++ b/ui/lib/core/addon/components/search-select.js @@ -3,6 +3,7 @@ import { inject as service } from '@ember/service'; import { task } from 'ember-concurrency'; import { computed } from '@ember/object'; import { singularize } from 'ember-inflector'; +import layout from '../templates/components/search-select'; /** * @module SearchSelect @@ -13,34 +14,25 @@ import { singularize } from 'ember-inflector'; * @param id {String} - The name of the form field * @param models {String} - An array of model types to fetch from the API. * @param onChange {Func} - The onchange action for this form field. - * @param inputValue {String} - A comma-separated string or an array of strings. + * @param inputValue {String | Array} - A comma-separated string or an array of strings. * @param [helpText] {String} - Text to be displayed in the info tooltip for this form field * @param label {String} - Label for this form field * @param fallbackComponent {String} - name of component to be rendered if the API call 403s * + * @param options {Array} - *Advanced usage* - `options` can be passed directly from the outside to the + * power-select component. If doing this, `models` should not also be passed as that will overwrite the + * passed value. + * @param search {Func} - *Advanced usage* - Customizes how the power-select component searches for matches - + * see the power-select docs for more information. + * */ export default Component.extend({ + layout, 'data-test-component': 'search-select', classNames: ['field', 'search-select'], store: service(), - /* - * @public - * @param Function - * - * Function called when any of the inputs change - * accepts a single param `value` - * - */ onChange: () => {}, - - /* - * @public - * @param String | Array - * A comma-separated string or an array of strings. - * Defaults to an empty array. - * - */ inputValue: computed(function() { return []; }), @@ -52,6 +44,16 @@ export default Component.extend({ this._super(...arguments); this.set('selectedOptions', this.inputValue || []); }, + didRender() { + this._super(...arguments); + let { oldOptions, options, selectedOptions } = this; + let hasFormattedInput = typeof selectedOptions.firstObject !== 'string'; + if (options && !oldOptions && !hasFormattedInput) { + // this is the first time they've been set, so we need to format them + this.formatOptions(options); + } + this.set('oldOptions', options); + }, formatOptions: function(options) { options = options.toArray().map(option => { option.searchText = `${option.name} ${option.id}`; @@ -68,11 +70,17 @@ export default Component.extend({ }); this.set('selectedOptions', formattedOptions); if (this.options) { - options = this.options.concat(options); + options = this.options.concat(options).uniq(); } this.set('options', options); }, fetchOptions: task(function*() { + if (!this.models) { + if (this.options) { + this.formatOptions(this.options); + } + return; + } for (let modelType of this.models) { if (modelType.includes('identity')) { this.set('shouldRenderName', true); @@ -128,7 +136,10 @@ export default Component.extend({ constructSuggestion(id) { return `Add new ${singularize(this.label)}: ${id}`; }, - hideCreateOptionOnSameID(id) { + hideCreateOptionOnSameID(id, options) { + if (options && options.length && options.firstObject.groupName) { + return !options.some(group => group.options.findBy('id', id)); + } let existingOption = this.options && (this.options.findBy('id', id) || this.options.findBy('name', id)); return !existingOption; }, diff --git a/ui/app/templates/components/search-select-placeholder.hbs b/ui/lib/core/addon/templates/components/search-select-placeholder.hbs similarity index 100% rename from ui/app/templates/components/search-select-placeholder.hbs rename to ui/lib/core/addon/templates/components/search-select-placeholder.hbs diff --git a/ui/app/templates/components/search-select.hbs b/ui/lib/core/addon/templates/components/search-select.hbs similarity index 94% rename from ui/app/templates/components/search-select.hbs rename to ui/lib/core/addon/templates/components/search-select.hbs index 848910a179f4..a38c05f5da80 100644 --- a/ui/app/templates/components/search-select.hbs +++ b/ui/lib/core/addon/templates/components/search-select.hbs @@ -7,7 +7,7 @@ helpText=helpText }} {{else}} -