diff --git a/.changelog/13539.txt b/.changelog/13539.txt new file mode 100644 index 00000000000..8f77a29e839 --- /dev/null +++ b/.changelog/13539.txt @@ -0,0 +1,3 @@ +```release-note:improvement +build: Update go toolchain to 1.18.5 +``` diff --git a/.changelog/13670.txt b/.changelog/13670.txt new file mode 100644 index 00000000000..04e3541e3dd --- /dev/null +++ b/.changelog/13670.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed a bug where task memory was reported as zero on systems using cgroups v2 +``` diff --git a/.changelog/13755.txt b/.changelog/13755.txt new file mode 100644 index 00000000000..3b1c2c051ae --- /dev/null +++ b/.changelog/13755.txt @@ -0,0 +1,3 @@ +```release-note:improvement +template: Templates support new uid/gid parameter pair +``` \ No newline at end of file diff --git a/.changelog/13907.txt b/.changelog/13907.txt new file mode 100644 index 00000000000..50dc4a0ce19 --- /dev/null +++ b/.changelog/13907.txt @@ -0,0 +1,3 @@ +```release-note:improvement +template: Expose consul-template configuration options at the client level for `nomad_retry`. +``` diff --git a/.changelog/13919.txt b/.changelog/13919.txt new file mode 100644 index 00000000000..559d948d863 --- /dev/null +++ b/.changelog/13919.txt @@ -0,0 +1,3 @@ +```release-note:improvement +csi: Add `stage_publish_base_dir` field to `csi_plugin` block to support plugins that require a specific staging/publishing directory for mounts +``` diff --git a/.changelog/13971.txt b/.changelog/13971.txt new file mode 100644 index 00000000000..3873e254e11 --- /dev/null +++ b/.changelog/13971.txt @@ -0,0 +1,3 @@ +```release-note:improvement +qemu: use shorter socket file names to reduce the chance of hitting the max path length +``` diff --git a/.changelog/13972.txt b/.changelog/13972.txt new file mode 100644 index 00000000000..330faea98a1 --- /dev/null +++ b/.changelog/13972.txt @@ -0,0 +1,3 @@ +```release-note:improvement +template: add script change_mode that allows scripts to be executed on template change +``` \ No newline at end of file diff --git a/.changelog/13991.txt b/.changelog/13991.txt new file mode 100644 index 00000000000..ec57024a45a --- /dev/null +++ b/.changelog/13991.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: when creating a secure variable, check against your namespaces rather than assuming default +``` diff --git a/.changelog/14000.txt b/.changelog/14000.txt new file mode 100644 index 00000000000..3db5ca3b648 --- /dev/null +++ b/.changelog/14000.txt @@ -0,0 +1,3 @@ +```release-note:bug +qemu: restore the monitor socket path when restoring a QEMU task. +``` diff --git a/.changelog/14001.txt b/.changelog/14001.txt new file mode 100644 index 00000000000..026c80f8136 --- /dev/null +++ b/.changelog/14001.txt @@ -0,0 +1,3 @@ +```release-note:bug +deployments: Fixed a bug that prevented auto-approval if canaries were marked as unhealthy during deployment +``` diff --git a/.changelog/14025.txt b/.changelog/14025.txt new file mode 100644 index 00000000000..762fa2f7e5c --- /dev/null +++ b/.changelog/14025.txt @@ -0,0 +1,3 @@ +```release-note:improvement +deps: Update go.etcd.io/bbolt to v1.3.6 +``` diff --git a/.changelog/14065.txt b/.changelog/14065.txt new file mode 100644 index 00000000000..eef7084eeb2 --- /dev/null +++ b/.changelog/14065.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fixed a bug where job validation requeset was not sent to leader +``` diff --git a/.changelog/14069.txt b/.changelog/14069.txt new file mode 100644 index 00000000000..9076fc3d6b6 --- /dev/null +++ b/.changelog/14069.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fixed a bug where the memory usage reported by Allocation Resource Utilization is zero on systems using cgroups v2 +``` diff --git a/.changelog/14088.txt b/.changelog/14088.txt new file mode 100644 index 00000000000..e8963029aaa --- /dev/null +++ b/.changelog/14088.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fixed a bug where vault token not respected in plan command +``` diff --git a/.changelog/14089.txt b/.changelog/14089.txt new file mode 100644 index 00000000000..c2a0d3e5059 --- /dev/null +++ b/.changelog/14089.txt @@ -0,0 +1,3 @@ +```release-note:improvement +driver/docker: Added config option to disable container healthcheck +``` diff --git a/.changelog/14103.txt b/.changelog/14103.txt new file mode 100644 index 00000000000..af78d62dc1c --- /dev/null +++ b/.changelog/14103.txt @@ -0,0 +1,3 @@ +```release-note:improvement +deps: Update google.golang.org/grpc to v1.48.0 +``` diff --git a/.changelog/14112.txt b/.changelog/14112.txt new file mode 100644 index 00000000000..19c11fa04ae --- /dev/null +++ b/.changelog/14112.txt @@ -0,0 +1,3 @@ +```release-note:improvement +deps: Update fsouza/go-dockerclient to v1.8.2 +``` diff --git a/.changelog/14115.txt b/.changelog/14115.txt new file mode 100644 index 00000000000..e8a7f86b6ea --- /dev/null +++ b/.changelog/14115.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Removes duplicate breadcrumb header when navigating from child job back to parent. +``` diff --git a/.changelog/14127.txt b/.changelog/14127.txt new file mode 100644 index 00000000000..61c0368774e --- /dev/null +++ b/.changelog/14127.txt @@ -0,0 +1,7 @@ +```release-note:improvement +client: add option to restart all tasks of an allocation, regardless of lifecycle type or state. +``` + +```release-note:improvement +client: only start poststop tasks after poststart tasks are done. +``` diff --git a/.changelog/14132.txt b/.changelog/14132.txt new file mode 100644 index 00000000000..a93b804ce49 --- /dev/null +++ b/.changelog/14132.txt @@ -0,0 +1,3 @@ +```release-note:improvement +build: update to go1.19 +``` diff --git a/.changelog/14138.txt b/.changelog/14138.txt new file mode 100644 index 00000000000..0978a97ce52 --- /dev/null +++ b/.changelog/14138.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add general keyboard navigation to the Nomad UI +``` \ No newline at end of file diff --git a/.changelog/14140.txt b/.changelog/14140.txt new file mode 100644 index 00000000000..c3d9115ae8a --- /dev/null +++ b/.changelog/14140.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: `acl policy info` output format has changed to improve readability with large policy documents +``` diff --git a/.changelog/14145.txt b/.changelog/14145.txt new file mode 100644 index 00000000000..5a543dac8de --- /dev/null +++ b/.changelog/14145.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: cleanup whitespace from failed api response body +``` diff --git a/.changelog/14171.txt b/.changelog/14171.txt new file mode 100644 index 00000000000..ca84601d615 --- /dev/null +++ b/.changelog/14171.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sentinel: add the ability to reference the namespace and Nomad acl token in policies +``` diff --git a/.changelog/14199.txt b/.changelog/14199.txt new file mode 100644 index 00000000000..a2b21ffdb15 --- /dev/null +++ b/.changelog/14199.txt @@ -0,0 +1,3 @@ +```release-note:feature +[ui] Services table: Display task-level services in addition to group-level services. +``` diff --git a/.changelog/14203.txt b/.changelog/14203.txt new file mode 100644 index 00000000000..f331d84c878 --- /dev/null +++ b/.changelog/14203.txt @@ -0,0 +1,3 @@ +```release-note:bug +template: Fixed a bug where job templates would use `uid` and `gid` 0 after upgrading to Nomad 1.3.3, causing tasks to fail with the error `failed looking up user: managing file ownership is not supported on Windows`. +``` diff --git a/.changelog/14212.txt b/.changelog/14212.txt new file mode 100644 index 00000000000..e0e9809b40e --- /dev/null +++ b/.changelog/14212.txt @@ -0,0 +1,3 @@ +```release-note:breaking-change +audit (Enterprise): fixed inconsistency in event filter logic +``` diff --git a/.changelog/14223.txt b/.changelog/14223.txt new file mode 100644 index 00000000000..e8fceaa7c1c --- /dev/null +++ b/.changelog/14223.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add button to restart all tasks in an allocation. +``` diff --git a/.changelog/14224.txt b/.changelog/14224.txt new file mode 100644 index 00000000000..8e05f384d30 --- /dev/null +++ b/.changelog/14224.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed a bug that caused the allocation details page to display the stats bar chart even if the task was pending. +``` diff --git a/.changelog/14248.txt b/.changelog/14248.txt new file mode 100644 index 00000000000..32b4e40e9e0 --- /dev/null +++ b/.changelog/14248.txt @@ -0,0 +1,3 @@ +```release-note:bug +client: Fixed a bug where user lookups would hang or panic +``` diff --git a/.changelog/14297.txt b/.changelog/14297.txt new file mode 100644 index 00000000000..207eb385890 --- /dev/null +++ b/.changelog/14297.txt @@ -0,0 +1,3 @@ +```release-note:bug +client/logmon: fixed a bug where logmon cannot find nomad executable +``` diff --git a/.changelog/14298.txt b/.changelog/14298.txt new file mode 100644 index 00000000000..1072f7bebf2 --- /dev/null +++ b/.changelog/14298.txt @@ -0,0 +1,7 @@ +```release-note:bug +vault: Fixed a bug where changing the Vault configuration `namespace` field was not detected as a change during server configuration reload. +``` + +```release-note:bug +vault: Fixed a bug where Vault clients were recreated when the server configuration was reloaded, even if there were no changes to the Vault configuration. +``` diff --git a/.circleci/config.yml b/.circleci/config.yml index 080f3fac008..442ca1ad407 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -171,7 +171,11 @@ jobs: mkdir -p $GOTESTSUM_PATH - install-golang: target_directory: "c:" - - run: go version + - run: + name: Show installed Go version + command: | + export PATH=/c/go/bin:/c/gopath/bin:$PATH + go version - install-vault: version: $VAULT_VERSION - run: vault version @@ -457,7 +461,7 @@ executors: go: working_directory: /go/src/github.com/hashicorp/nomad docker: - - image: docker.mirror.hashicorp.services/golang:1.18.3 + - image: docker.mirror.hashicorp.services/golang:1.19 resource_class: medium environment: <<: *common_envs @@ -470,7 +474,7 @@ executors: resource_class: large environment: &machine_env <<: *common_envs - GOLANG_VERSION: 1.18.3 + GOLANG_VERSION: 1.19 go-macos: working_directory: ~/go/src/github.com/hashicorp/nomad @@ -479,7 +483,7 @@ executors: environment: <<: *common_envs GOPATH: /Users/distiller/go - GOLANG_VERSION: 1.18.3 + GOLANG_VERSION: 1.19 go-windows: machine: @@ -491,7 +495,7 @@ executors: GOPATH: c:\gopath GOBIN: c:\gopath\bin GOTESTSUM_PATH: c:\tmp\test-reports - GOLANG_VERSION: 1.18.3 + GOLANG_VERSION: 1.19 GOTESTSUM_VERSION: 1.7.0 VAULT_VERSION: 1.4.1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 52c3b0b0110..e24c03941e3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -186,10 +186,18 @@ jobs: echo "Version ${{ github.event.inputs.version }} is a prerelease, skipping update of LAST_RELEASE" fi + - name: Remove generated files + run: | + # These generated files are only needed when building the final + # binary and should be not be present in the repository afterwards. + find . -name '*.generated.go' | xargs git rm + git status + - name: Commit post-release changes run: | - # Display stated and unstaged diffs. - git diff --color=always HEAD + # Display staged and unstaged diffs, skipping deleted files to avoid + # cluttering the output with the generated files. + git diff --diff-filter=d --color=always HEAD git add -A . if ! git diff-index --quiet HEAD --; then git commit --message 'Prepare for next release' diff --git a/.github/workflows/test-core.yaml b/.github/workflows/test-core.yaml index c7317b31c8c..c71c256d362 100644 --- a/.github/workflows/test-core.yaml +++ b/.github/workflows/test-core.yaml @@ -21,7 +21,7 @@ on: - 'website/**' env: VERBOSE: 1 - GO_VERSION: 1.18.3 + GO_VERSION: 1.19 GOBIN: /usr/local/bin GOTESTARCH: amd64 CONSUL_VERSION: 1.11.3 diff --git a/.go-version b/.go-version index b9fb27ab4f7..815d5ca06d5 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.18.3 +1.19.0 diff --git a/.release/ci.hcl b/.release/ci.hcl index dab8087af49..8883245039b 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -13,10 +13,7 @@ project "nomad" { release_branches = [ "main", - "release/1.0.x", - "release/1.1.x", - "release/1.2.x", - "release/1.3.x", + "release/**", ] } } @@ -225,3 +222,17 @@ event "promote-production-packaging" { on = "always" } } + +event "post-publish-website" { + depends = ["promote-production-packaging"] + + action "post-publish-website" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "post-publish-website" + } + + notification { + on = "always" + } +} diff --git a/.semgrep/api_errorf.yml b/.semgrep/api_errorf.yml new file mode 100644 index 00000000000..703c5bf29f0 --- /dev/null +++ b/.semgrep/api_errorf.yml @@ -0,0 +1,11 @@ +rules: + - id: "fmt_errorf_unformatted_use" + patterns: + - pattern: fmt.Errorf("...") + message: "Use of fmt.Errorf without formatting. Please use errors.New" + languages: + - "go" + severity: "WARNING" + paths: + include: + - "./api/*" diff --git a/CHANGELOG.md b/CHANGELOG.md index 74bc7ab9fff..4c225a6d7cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,25 @@ +## 1.3.3 (August 05, 2022) + +IMPROVEMENTS: + +* build: Update go toolchain to 1.18.5 [[GH-13956](https://github.com/hashicorp/nomad/pull/13956)] +* csi: Add `stage_publish_base_dir` field to `csi_plugin` block to support plugins that require a specific staging/publishing directory for mounts [[GH-13919](https://github.com/hashicorp/nomad/issues/13919)] +* qemu: use shorter socket file names to reduce the chance of hitting the max path length [[GH-13971](https://github.com/hashicorp/nomad/issues/13971)] +* template: Expose consul-template configuration options at the client level for `nomad_retry`. [[GH-13907](https://github.com/hashicorp/nomad/issues/13907)] +* template: Templates support new uid/gid parameter pair [[GH-13755](https://github.com/hashicorp/nomad/issues/13755)] +* ui: Reorder and apply the same style to the Evaluations list page filters to match the Job list page. [[GH-13866](https://github.com/hashicorp/nomad/issues/13866)] + +BUG FIXES: + +* acl: Fixed a bug where the timestamp for expiring one-time tokens was not deterministic between servers [[GH-13737](https://github.com/hashicorp/nomad/issues/13737)] +* deployments: Fixed a bug that prevented auto-approval if canaries were marked as unhealthy during deployment [[GH-14001](https://github.com/hashicorp/nomad/issues/14001)] +* metrics: Fixed a bug where blocked evals with no class produced no dc:class scope metrics [[GH-13786](https://github.com/hashicorp/nomad/issues/13786)] +* namespaces: Fixed a bug that allowed deleting a namespace that contained a CSI volume [[GH-13880](https://github.com/hashicorp/nomad/issues/13880)] +* qemu: restore the monitor socket path when restoring a QEMU task. [[GH-14000](https://github.com/hashicorp/nomad/issues/14000)] +* servicedisco: Fixed a bug where non-unique services would escape job validation [[GH-13869](https://github.com/hashicorp/nomad/issues/13869)] +* ui: Add missing breadcrumb in the Evaluations page. [[GH-13865](https://github.com/hashicorp/nomad/issues/13865)] +* ui: Fixed a bug where task memory was reported as zero on systems using cgroups v2 [[GH-13670](https://github.com/hashicorp/nomad/issues/13670)] + ## 1.3.2 (July 13, 2022) IMPROVEMENTS: @@ -204,6 +226,17 @@ BUG FIXES: * ui: fix broken link to task-groups in the Recent Allocations table in the Job Detail overview page. [[GH-12765](https://github.com/hashicorp/nomad/issues/12765)] * ui: fix the unit for the task row memory usage metric [[GH-11980](https://github.com/hashicorp/nomad/issues/11980)] +## 1.2.10 (August 05, 2022) + +BUG FIXES: + +* acl: Fixed a bug where the timestamp for expiring one-time tokens was not deterministic between servers [[GH-13737](https://github.com/hashicorp/nomad/issues/13737)] +* build: Update go toolchain to 1.18.5 [[GH-13956](https://github.com/hashicorp/nomad/pull/13956)] +* deployments: Fixed a bug that prevented auto-approval if canaries were marked as unhealthy during deployment [[GH-14001](https://github.com/hashicorp/nomad/issues/14001)] +* metrics: Fixed a bug where blocked evals with no class produced no dc:class scope metrics [[GH-13786](https://github.com/hashicorp/nomad/issues/13786)] +* namespaces: Fixed a bug that allowed deleting a namespace that contained a CSI volume [[GH-13880](https://github.com/hashicorp/nomad/issues/13880)] +* qemu: restore the monitor socket path when restoring a QEMU task. [[GH-14000](https://github.com/hashicorp/nomad/issues/14000)] + ## 1.2.9 (July 13, 2022) BUG FIXES: @@ -453,6 +486,15 @@ BUG FIXES: * server: Fixed a panic on arm64 platform when dispatching a job with a payload [[GH-11396](https://github.com/hashicorp/nomad/issues/11396)] * server: Fixed a panic that may occur when preempting multiple allocations on the same node [[GH-11346](https://github.com/hashicorp/nomad/issues/11346)] +## 1.1.16 (August 05, 2022) + +BUG FIXES: + +* acl: Fixed a bug where the timestamp for expiring one-time tokens was not deterministic between servers [[GH-13737](https://github.com/hashicorp/nomad/issues/13737)] +* deployments: Fixed a bug that prevented auto-approval if canaries were marked as unhealthy during deployment [[GH-14001](https://github.com/hashicorp/nomad/issues/14001)] +* namespaces: Fixed a bug that allowed deleting a namespace that contained a CSI volume [[GH-13880](https://github.com/hashicorp/nomad/issues/13880)] +* qemu: restore the monitor socket path when restoring a QEMU task. [[GH-14000](https://github.com/hashicorp/nomad/issues/14000)] + ## 1.1.15 (July 13, 2022) BUG FIXES: diff --git a/GNUmakefile b/GNUmakefile index 1c017108663..5f1d99c6ee2 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -21,7 +21,7 @@ ifndef BIN BIN := $(GOPATH)/bin endif -GO_TAGS ?= +GO_TAGS ?= osusergo ifeq ($(CI),true) GO_TAGS := codegen_generated $(GO_TAGS) @@ -49,7 +49,7 @@ PROTO_COMPARE_TAG ?= v1.0.3$(if $(findstring ent,$(GO_TAGS)),+ent,) # LAST_RELEASE is the git sha of the latest release corresponding to this branch. main should have the latest # published release, and release branches should point to the latest published release in the X.Y release line. -LAST_RELEASE ?= v1.3.2 +LAST_RELEASE ?= v1.3.3 default: help @@ -143,9 +143,9 @@ deps: ## Install build and development dependencies lint-deps: ## Install linter dependencies ## Keep versions in sync with tools/go.mod (see https://github.com/golang/go/issues/30515) @echo "==> Updating linter dependencies..." - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.2 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.48.0 go install github.com/client9/misspell/cmd/misspell@v0.3.4 - go install github.com/hashicorp/go-hclog/hclogvet@v0.1.4 + go install github.com/hashicorp/go-hclog/hclogvet@v0.1.5 .PHONY: git-hooks git-dir = $(shell git rev-parse --git-dir) @@ -179,6 +179,9 @@ check: ## Lint the source code @echo "==> Check API package is isolated from rest" @cd ./api && if go list --test -f '{{ join .Deps "\n" }}' . | grep github.com/hashicorp/nomad/ | grep -v -e /nomad/api/ -e nomad/api.test; then echo " /api package depends the ^^ above internal nomad packages. Remove such dependency"; exit 1; fi + @echo "==> Check command package does not import structs" + @cd ./command && if go list -f '{{ join .Imports "\n" }}' . | grep github.com/hashicorp/nomad/nomad/structs; then echo " /command package imports the structs pkg. Remove such import"; exit 1; fi + @echo "==> Checking Go mod.." @GO111MODULE=on $(MAKE) tidy @if (git status --porcelain | grep -Eq "go\.(mod|sum)"); then \ @@ -210,12 +213,12 @@ generate-all: generate-structs proto generate-examples ## Generate structs, prot .PHONY: generate-structs generate-structs: LOCAL_PACKAGES = $(shell go list ./...) generate-structs: ## Update generated code - @echo "--> Running go generate..." + @echo "==> Running go generate..." @go generate $(LOCAL_PACKAGES) .PHONY: proto proto: ## Generate protobuf bindings - @echo "--> Generating proto bindings..." + @echo "==> Generating proto bindings..." @buf --config tools/buf/buf.yaml --template tools/buf/buf.gen.yaml generate .PHONY: generate-examples @@ -232,7 +235,7 @@ changelog: ## Generate changelog from entries ## that do not successfully compile without rendering .PHONY: hclfmt hclfmt: ## Format HCL files with hclfmt - @echo "--> Formatting HCL" + @echo "==> Formatting HCL" @find . -name '.terraform' -prune \ -o -name 'upstart.nomad' -prune \ -o -name '.git' -prune \ @@ -245,10 +248,10 @@ hclfmt: ## Format HCL files with hclfmt .PHONY: tidy tidy: ## Tidy up the go mod files - @echo "--> Tidy up submodules" + @echo "==> Tidy up submodules" @cd tools && go mod tidy @cd api && go mod tidy - @echo "--> Tidy nomad module" + @echo "==> Tidy nomad module" @go mod tidy .PHONY: dev @@ -357,24 +360,24 @@ testcluster: ## Bring up a Linux test cluster using Vagrant. Set PROVIDER if nec .PHONY: static-assets static-assets: ## Compile the static routes to serve alongside the API - @echo "--> Generating static assets" + @echo "==> Generating static assets" @go-bindata-assetfs -pkg agent -prefix ui -modtime 1480000000 -tags ui -o bindata_assetfs.go ./ui/dist/... @mv bindata_assetfs.go command/agent .PHONY: test-ui test-ui: ## Run Nomad UI test suite - @echo "--> Installing JavaScript assets" + @echo "==> Installing JavaScript assets" @cd ui && npm rebuild node-sass @cd ui && yarn install - @echo "--> Running ember tests" + @echo "==> Running ember tests" @cd ui && npm test .PHONY: ember-dist ember-dist: ## Build the static UI assets from source - @echo "--> Installing JavaScript assets" + @echo "==> Installing JavaScript assets" @cd ui && yarn install --silent --network-timeout 300000 @cd ui && npm rebuild node-sass - @echo "--> Building Ember application" + @echo "==> Building Ember application" @cd ui && npm run build .PHONY: dev-ui diff --git a/README.md b/README.md index 13e3bc29034..c9dadcf1970 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -Nomad [![Build Status](https://circleci.com/gh/hashicorp/nomad.svg?style=svg)](https://circleci.com/gh/hashicorp/nomad) [![Discuss](https://img.shields.io/badge/discuss-nomad-00BC7F?style=flat)](https://discuss.hashicorp.com/c/nomad) +Nomad +[![License: MPL 2.0](https://img.shields.io/badge/License-MPL_2.0-brightgreen.svg)](LICENSE) +[![Discuss](https://img.shields.io/badge/discuss-nomad-00BC7F?style=flat)](https://discuss.hashicorp.com/c/nomad) ===

@@ -14,8 +16,6 @@ Nomad is supported on Linux, Windows, and macOS. A commercial version of Nomad, * Website: https://nomadproject.io * Tutorials: [HashiCorp Learn](https://learn.hashicorp.com/nomad) * Forum: [Discuss](https://discuss.hashicorp.com/c/nomad) -* Mailing List: [Google Groups](https://groups.google.com/group/nomad-tool) -* Gitter: [hashicorp-nomad](https://gitter.im/hashicorp-nomad/Lobby) Nomad provides several key features: diff --git a/acl/acl_test.go b/acl/acl_test.go index 57c3cfa2b51..51c1a618972 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -586,6 +586,31 @@ func TestSecureVariablesMatching(t *testing.T) { op: "read", allow: false, }, + { + name: "wildcard with more specific denied path", + policy: `namespace "ns" { + secure_variables { + path "*" { capabilities = ["list"] } + path "system/*" { capabilities = ["deny"] }}}`, + ns: "ns", + path: "system/not-allowed", + op: "list", + allow: false, + }, + { + name: "multiple namespace with overlapping paths", + policy: `namespace "ns" { + secure_variables { + path "*" { capabilities = ["list"] } + path "system/*" { capabilities = ["deny"] }}} + namespace "prod" { + secure_variables { + path "*" { capabilities = ["list"]}}}`, + ns: "prod", + path: "system/is-allowed", + op: "list", + allow: true, + }, } for _, tc := range tests { diff --git a/acl/policy.go b/acl/policy.go index d74df4437d6..7059f8b1782 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -75,6 +75,7 @@ const ( SecureVariablesCapabilityRead = "read" SecureVariablesCapabilityWrite = "write" SecureVariablesCapabilityDestroy = "destroy" + SecureVariablesCapabilityDeny = "deny" ) // Policy represents a parsed HCL or JSON policy. @@ -187,7 +188,7 @@ func isNamespaceCapabilityValid(cap string) bool { func isPathCapabilityValid(cap string) bool { switch cap { case SecureVariablesCapabilityWrite, SecureVariablesCapabilityRead, - SecureVariablesCapabilityList, SecureVariablesCapabilityDestroy: + SecureVariablesCapabilityList, SecureVariablesCapabilityDestroy, SecureVariablesCapabilityDeny: return true default: return false @@ -269,6 +270,8 @@ func expandSecureVariablesCapabilities(caps []string) []string { var foundRead, foundList bool for _, cap := range caps { switch cap { + case SecureVariablesCapabilityDeny: + return []string{SecureVariablesCapabilityDeny} case SecureVariablesCapabilityRead: foundRead = true case SecureVariablesCapabilityList: @@ -325,14 +328,17 @@ func Parse(rules string) (*Policy, error) { } if ns.SecureVariables != nil { + if len(ns.SecureVariables.Paths) == 0 { + return nil, fmt.Errorf("Invalid secure variable policy: no secure variable paths in namespace %s", ns.Name) + } for _, pathPolicy := range ns.SecureVariables.Paths { if pathPolicy.PathSpec == "" { - return nil, fmt.Errorf("Invalid missing secure variable path in namespace %#v", ns) + return nil, fmt.Errorf("Invalid missing secure variable path in namespace %s", ns.Name) } for _, cap := range pathPolicy.Capabilities { if !isPathCapabilityValid(cap) { return nil, fmt.Errorf( - "Invalid secure variable capability '%s' in namespace %#v", cap, ns) + "Invalid secure variable capability '%s' in namespace %s", cap, ns.Name) } } pathPolicy.Capabilities = expandSecureVariablesCapabilities(pathPolicy.Capabilities) diff --git a/acl/policy_test.go b/acl/policy_test.go index 4ad25fbf943..bd8f6eba6fa 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -198,6 +198,17 @@ func TestParse(t *testing.T) { "Invalid namespace policy", nil, }, + { + ` + namespace "dev" { + secure_variables "*" { + capabilities = ["read", "write"] + } + } + `, + "Invalid secure variable policy: no secure variable paths in namespace dev", + nil, + }, { ` namespace "default" { diff --git a/api/acl.go b/api/acl.go index 0562a87d940..f3a3afc2017 100644 --- a/api/acl.go +++ b/api/acl.go @@ -2,7 +2,6 @@ package api import ( "errors" - "fmt" "time" ) @@ -29,7 +28,7 @@ func (a *ACLPolicies) List(q *QueryOptions) ([]*ACLPolicyListStub, *QueryMeta, e // Upsert is used to create or update a policy func (a *ACLPolicies) Upsert(policy *ACLPolicy, q *WriteOptions) (*WriteMeta, error) { if policy == nil || policy.Name == "" { - return nil, fmt.Errorf("missing policy name") + return nil, errors.New("missing policy name") } wm, err := a.client.write("/v1/acl/policy/"+policy.Name, policy, nil, q) if err != nil { @@ -41,7 +40,7 @@ func (a *ACLPolicies) Upsert(policy *ACLPolicy, q *WriteOptions) (*WriteMeta, er // Delete is used to delete a policy func (a *ACLPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) { if policyName == "" { - return nil, fmt.Errorf("missing policy name") + return nil, errors.New("missing policy name") } wm, err := a.client.delete("/v1/acl/policy/"+policyName, nil, nil, q) if err != nil { @@ -53,7 +52,7 @@ func (a *ACLPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, er // Info is used to query a specific policy func (a *ACLPolicies) Info(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { if policyName == "" { - return nil, nil, fmt.Errorf("missing policy name") + return nil, nil, errors.New("missing policy name") } var resp ACLPolicy wm, err := a.client.query("/v1/acl/policy/"+policyName, &resp, q) @@ -114,7 +113,7 @@ func (a *ACLTokens) List(q *QueryOptions) ([]*ACLTokenListStub, *QueryMeta, erro // Create is used to create a token func (a *ACLTokens) Create(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { if token.AccessorID != "" { - return nil, nil, fmt.Errorf("cannot specify Accessor ID") + return nil, nil, errors.New("cannot specify Accessor ID") } var resp ACLToken wm, err := a.client.write("/v1/acl/token", token, &resp, q) @@ -127,7 +126,7 @@ func (a *ACLTokens) Create(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteM // Update is used to update an existing token func (a *ACLTokens) Update(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { if token.AccessorID == "" { - return nil, nil, fmt.Errorf("missing accessor ID") + return nil, nil, errors.New("missing accessor ID") } var resp ACLToken wm, err := a.client.write("/v1/acl/token/"+token.AccessorID, @@ -141,7 +140,7 @@ func (a *ACLTokens) Update(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteM // Delete is used to delete a token func (a *ACLTokens) Delete(accessorID string, q *WriteOptions) (*WriteMeta, error) { if accessorID == "" { - return nil, fmt.Errorf("missing accessor ID") + return nil, errors.New("missing accessor ID") } wm, err := a.client.delete("/v1/acl/token/"+accessorID, nil, nil, q) if err != nil { @@ -153,7 +152,7 @@ func (a *ACLTokens) Delete(accessorID string, q *WriteOptions) (*WriteMeta, erro // Info is used to query a token func (a *ACLTokens) Info(accessorID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) { if accessorID == "" { - return nil, nil, fmt.Errorf("missing accessor ID") + return nil, nil, errors.New("missing accessor ID") } var resp ACLToken wm, err := a.client.query("/v1/acl/token/"+accessorID, &resp, q) @@ -181,7 +180,7 @@ func (a *ACLTokens) UpsertOneTimeToken(q *WriteOptions) (*OneTimeToken, *WriteMe return nil, nil, err } if resp == nil { - return nil, nil, fmt.Errorf("no one-time token returned") + return nil, nil, errors.New("no one-time token returned") } return resp.OneTimeToken, wm, nil } @@ -189,7 +188,7 @@ func (a *ACLTokens) UpsertOneTimeToken(q *WriteOptions) (*OneTimeToken, *WriteMe // ExchangeOneTimeToken is used to create a one-time token func (a *ACLTokens) ExchangeOneTimeToken(secret string, q *WriteOptions) (*ACLToken, *WriteMeta, error) { if secret == "" { - return nil, nil, fmt.Errorf("missing secret ID") + return nil, nil, errors.New("missing secret ID") } req := &OneTimeTokenExchangeRequest{OneTimeSecretID: secret} var resp *OneTimeTokenExchangeResponse @@ -198,7 +197,7 @@ func (a *ACLTokens) ExchangeOneTimeToken(secret string, q *WriteOptions) (*ACLTo return nil, nil, err } if resp == nil { - return nil, nil, fmt.Errorf("no ACL token returned") + return nil, nil, errors.New("no ACL token returned") } return resp.Token, wm, nil } @@ -306,10 +305,20 @@ type ACLPolicy struct { Name string Description string Rules string + JobACL *JobACL + CreateIndex uint64 ModifyIndex uint64 } +// JobACL represents an ACL policy's attachment to a job, group, or task. +type JobACL struct { + Namespace string + JobID string + Group string + Task string +} + // ACLToken represents a client token which is used to Authenticate type ACLToken struct { AccessorID string diff --git a/api/allocations.go b/api/allocations.go index 67fc6ca2989..dc2ebb27900 100644 --- a/api/allocations.go +++ b/api/allocations.go @@ -2,16 +2,17 @@ package api import ( "context" - "fmt" + "errors" "io" "sort" + "strings" "time" ) var ( // NodeDownErr marks an operation as not able to complete since the node is // down. - NodeDownErr = fmt.Errorf("node down") + NodeDownErr = errors.New("node down") ) const ( @@ -28,6 +29,10 @@ const ( AllocClientStatusLost = "lost" ) +const ( + AllocRestartReasonWithinPolicy = "Restart within policy" +) + // Allocations is used to query the alloc-related endpoints. type Allocations struct { client *Client @@ -67,15 +72,19 @@ func (a *Allocations) Info(allocID string, q *QueryOptions) (*Allocation, *Query // the task environment. // // The parameters are: -// * ctx: context to set deadlines or timeout -// * allocation: the allocation to execute command inside -// * task: the task's name to execute command in -// * tty: indicates whether to start a pseudo-tty for the command -// * stdin, stdout, stderr: the std io to pass to command. -// If tty is true, then streams need to point to a tty that's alive for the whole process -// * terminalSizeCh: A channel to send new tty terminal sizes +// - ctx: context to set deadlines or timeout +// - allocation: the allocation to execute command inside +// - task: the task's name to execute command in +// - tty: indicates whether to start a pseudo-tty for the command +// - stdin, stdout, stderr: the std io to pass to command. +// If tty is true, then streams need to point to a tty that's alive for the whole process +// - terminalSizeCh: A channel to send new tty terminal sizes // // The call blocks until command terminates (or an error occurs), and returns the exit code. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *Allocations) Exec(ctx context.Context, alloc *Allocation, task string, tty bool, command []string, stdin io.Reader, stdout, stderr io.Writer, @@ -99,19 +108,46 @@ func (a *Allocations) Exec(ctx context.Context, return s.run(ctx) } +// Stats gets allocation resource usage statistics about an allocation. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *Allocations) Stats(alloc *Allocation, q *QueryOptions) (*AllocResourceUsage, error) { var resp AllocResourceUsage - path := fmt.Sprintf("/v1/client/allocation/%s/stats", alloc.ID) - _, err := a.client.query(path, &resp, q) + _, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/stats", &resp, q) return &resp, err } +// Checks gets status information for nomad service checks that exist in the allocation. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. +func (a *Allocations) Checks(allocID string, q *QueryOptions) (AllocCheckStatuses, error) { + var resp AllocCheckStatuses + _, err := a.client.query("/v1/client/allocation/"+allocID+"/checks", &resp, q) + return resp, err +} + +// GC forces a garbage collection of client state for an allocation. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *Allocations) GC(alloc *Allocation, q *QueryOptions) error { var resp struct{} _, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/gc", &resp, nil) return err } +// Restart restarts the tasks that are currently running or a specific task if +// taskName is provided. An error is returned if the task to be restarted is +// not running. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *Allocations) Restart(alloc *Allocation, taskName string, q *QueryOptions) error { req := AllocationRestartRequest{ TaskName: taskName, @@ -122,6 +158,27 @@ func (a *Allocations) Restart(alloc *Allocation, taskName string, q *QueryOption return err } +// RestartAllTasks restarts all tasks in the allocation, regardless of +// lifecycle type or state. Tasks will restart following their lifecycle order. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. +func (a *Allocations) RestartAllTasks(alloc *Allocation, q *QueryOptions) error { + req := AllocationRestartRequest{ + AllTasks: true, + } + + var resp struct{} + _, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/restart", &req, &resp, q) + return err +} + +// Stop stops an allocation. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *Allocations) Stop(alloc *Allocation, q *QueryOptions) (*AllocStopResponse, error) { var resp AllocStopResponse _, err := a.client.putQuery("/v1/allocation/"+alloc.ID+"/stop", nil, &resp, q) @@ -136,6 +193,11 @@ type AllocStopResponse struct { WriteMeta } +// Signal sends a signal to the allocation. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *Allocations) Signal(alloc *Allocation, q *QueryOptions, task, signal string) error { req := AllocSignalRequest{ Signal: signal, @@ -403,6 +465,7 @@ func (a Allocation) RescheduleInfo(t time.Time) (int, int) { type AllocationRestartRequest struct { TaskName string + AllTasks bool } type AllocSignalRequest struct { @@ -473,12 +536,12 @@ type ExecStreamingInput struct { TTYSize *TerminalSize `json:"tty_size,omitempty"` } -// ExecStreamingExitResults captures the exit code of just completed nomad exec command +// ExecStreamingExitResult captures the exit code of just completed nomad exec command type ExecStreamingExitResult struct { ExitCode int `json:"exit_code"` } -// ExecStreamingInput represents an output streaming entity, e.g. stdout/stderr update or termination +// ExecStreamingOutput represents an output streaming entity, e.g. stdout/stderr update or termination // // At most one of these fields should be set: `Stdout`, `Stderr`, or `Result`. // If `Exited` is true, then `Result` is non-nil, and other fields are nil. @@ -489,3 +552,12 @@ type ExecStreamingOutput struct { Exited bool `json:"exited,omitempty"` Result *ExecStreamingExitResult `json:"result,omitempty"` } + +func AllocSuffix(name string) string { + idx := strings.LastIndex(name, "[") + if idx == -1 { + return "" + } + suffix := name[idx:] + return suffix +} diff --git a/api/allocations_test.go b/api/allocations_test.go index 4f2993e82f2..3af6c54b246 100644 --- a/api/allocations_test.go +++ b/api/allocations_test.go @@ -159,13 +159,13 @@ func TestAllocations_RescheduleInfo(t *testing.T) { testutil.Parallel(t) // Create a job, task group and alloc job := &Job{ - Name: stringToPtr("foo"), - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), Tasks: []*Task{ { Name: "task1", @@ -205,8 +205,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) { { desc: "no reschedule events", reschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(3), - Interval: timeToPtr(15 * time.Minute), + Attempts: pointerOf(3), + Interval: pointerOf(15 * time.Minute), }, expAttempted: 0, expTotal: 3, @@ -214,8 +214,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) { { desc: "all reschedule events within interval", reschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(3), - Interval: timeToPtr(15 * time.Minute), + Attempts: pointerOf(3), + Interval: pointerOf(15 * time.Minute), }, time: time.Now(), rescheduleTracker: &RescheduleTracker{ @@ -231,8 +231,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) { { desc: "some reschedule events outside interval", reschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(3), - Interval: timeToPtr(15 * time.Minute), + Attempts: pointerOf(3), + Interval: pointerOf(15 * time.Minute), }, time: time.Now(), rescheduleTracker: &RescheduleTracker{ @@ -276,13 +276,13 @@ func TestAllocations_ExecErrors(t *testing.T) { a := c.Allocations() job := &Job{ - Name: stringToPtr("foo"), - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), Tasks: []*Task{ { Name: "task1", @@ -392,9 +392,9 @@ func TestAllocation_ClientTerminalStatus(t *testing.T) { func TestAllocations_ShouldMigrate(t *testing.T) { testutil.Parallel(t) - require.True(t, DesiredTransition{Migrate: boolToPtr(true)}.ShouldMigrate()) + require.True(t, DesiredTransition{Migrate: pointerOf(true)}.ShouldMigrate()) require.False(t, DesiredTransition{}.ShouldMigrate()) - require.False(t, DesiredTransition{Migrate: boolToPtr(false)}.ShouldMigrate()) + require.False(t, DesiredTransition{Migrate: pointerOf(false)}.ShouldMigrate()) } func TestAllocations_Services(t *testing.T) { diff --git a/api/api.go b/api/api.go index 8d3fc46d87b..9a16a8333d6 100644 --- a/api/api.go +++ b/api/api.go @@ -25,7 +25,9 @@ import ( var ( // ClientConnTimeout is the timeout applied when attempting to contact a // client directly before switching to a connection through the Nomad - // server. + // server. For cluster topologies where API consumers don't have network + // access to Nomad clients, set this to a small value (ex 1ms) to avoid + // pausing on client APIs such as AllocFS. ClientConnTimeout = 1 * time.Second ) @@ -33,6 +35,11 @@ const ( // AllNamespacesNamespace is a sentinel Namespace value to indicate that api should search for // jobs and allocations in all the namespaces the requester can access. AllNamespacesNamespace = "*" + + // PermissionDeniedErrorContent is the string content of an error returned + // by the API which indicates the caller does not have permission to + // perform the action. + PermissionDeniedErrorContent = "Permission denied" ) // QueryOptions are used to parametrize a query @@ -340,9 +347,9 @@ func DefaultConfig() *Config { // otherwise, returns the same client func cloneWithTimeout(httpClient *http.Client, t time.Duration) (*http.Client, error) { if httpClient == nil { - return nil, fmt.Errorf("nil HTTP client") + return nil, errors.New("nil HTTP client") } else if httpClient.Transport == nil { - return nil, fmt.Errorf("nil HTTP client transport") + return nil, errors.New("nil HTTP client transport") } if t.Nanoseconds() < 0 { @@ -393,7 +400,7 @@ func ConfigureTLS(httpClient *http.Client, tlsConfig *TLSConfig) error { return nil } if httpClient == nil { - return fmt.Errorf("config HTTP Client must be set") + return errors.New("config HTTP Client must be set") } var clientCert tls.Certificate @@ -407,7 +414,7 @@ func ConfigureTLS(httpClient *http.Client, tlsConfig *TLSConfig) error { } foundClientCert = true } else { - return fmt.Errorf("Both client cert and client key must be provided") + return errors.New("Both client cert and client key must be provided") } } else if len(tlsConfig.ClientCertPEM) != 0 || len(tlsConfig.ClientKeyPEM) != 0 { if len(tlsConfig.ClientCertPEM) != 0 && len(tlsConfig.ClientKeyPEM) != 0 { @@ -418,7 +425,7 @@ func ConfigureTLS(httpClient *http.Client, tlsConfig *TLSConfig) error { } foundClientCert = true } else { - return fmt.Errorf("Both client cert and client key must be provided") + return errors.New("Both client cert and client key must be provided") } } @@ -844,7 +851,7 @@ func (c *Client) websocket(endpoint string, q *QueryOptions) (*websocket.Conn, * transport, ok := c.httpClient.Transport.(*http.Transport) if !ok { - return nil, nil, fmt.Errorf("unsupported transport") + return nil, nil, errors.New("unsupported transport") } dialer := websocket.Dialer{ ReadBufferSize: 4096, @@ -1091,9 +1098,10 @@ func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *h } if resp.StatusCode != 200 { var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + _, _ = io.Copy(&buf, resp.Body) + _ = resp.Body.Close() + body := strings.TrimSpace(buf.String()) + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, body) } return d, resp, nil } diff --git a/api/api_test.go b/api/api_test.go index ce845c10fbe..8987f534d5a 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -501,7 +501,7 @@ func TestCloneHttpClient(t *testing.T) { client := defaultHttpClient() originalTransport := client.Transport.(*http.Transport) originalTransport.Proxy = func(*http.Request) (*url.URL, error) { - return nil, fmt.Errorf("stub function") + return nil, errors.New("stub function") } t.Run("closing with negative timeout", func(t *testing.T) { diff --git a/api/compose_test.go b/api/compose_test.go index c70b244cdc5..2fe750a6da1 100644 --- a/api/compose_test.go +++ b/api/compose_test.go @@ -15,13 +15,13 @@ func TestCompose(t *testing.T) { SetMeta("foo", "bar"). Constrain(NewConstraint("kernel.name", "=", "linux")). Require(&Resources{ - CPU: intToPtr(1250), - MemoryMB: intToPtr(1024), - DiskMB: intToPtr(2048), + CPU: pointerOf(1250), + MemoryMB: pointerOf(1024), + DiskMB: pointerOf(2048), Networks: []*NetworkResource{ { CIDR: "0.0.0.0/0", - MBits: intToPtr(100), + MBits: pointerOf(100), ReservedPorts: []Port{{"", 80, 0, ""}, {"", 443, 0, ""}}, }, }, @@ -47,11 +47,11 @@ func TestCompose(t *testing.T) { // Check that the composed result looks correct expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeService), - Priority: intToPtr(2), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeService), + Priority: pointerOf(2), Datacenters: []string{ "dc1", }, @@ -67,8 +67,8 @@ func TestCompose(t *testing.T) { }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("grp1"), - Count: intToPtr(2), + Name: pointerOf("grp1"), + Count: pointerOf(2), Constraints: []*Constraint{ { LTarget: "kernel.name", @@ -81,13 +81,13 @@ func TestCompose(t *testing.T) { LTarget: "${node.class}", RTarget: "large", Operand: "=", - Weight: int8ToPtr(50), + Weight: pointerOf(int8(50)), }, }, Spreads: []*Spread{ { Attribute: "${node.datacenter}", - Weight: int8ToPtr(30), + Weight: pointerOf(int8(30)), SpreadTarget: []*SpreadTarget{ { Value: "dc1", @@ -105,13 +105,13 @@ func TestCompose(t *testing.T) { Name: "task1", Driver: "exec", Resources: &Resources{ - CPU: intToPtr(1250), - MemoryMB: intToPtr(1024), - DiskMB: intToPtr(2048), + CPU: pointerOf(1250), + MemoryMB: pointerOf(1024), + DiskMB: pointerOf(2048), Networks: []*NetworkResource{ { CIDR: "0.0.0.0/0", - MBits: intToPtr(100), + MBits: pointerOf(100), ReservedPorts: []Port{ {"", 80, 0, ""}, {"", 443, 0, ""}, diff --git a/api/consul.go b/api/consul.go index db25a16640e..9a76bfb3294 100644 --- a/api/consul.go +++ b/api/consul.go @@ -1,6 +1,8 @@ package api -import "time" +import ( + "time" +) // Consul represents configuration related to consul. type Consul struct { @@ -121,11 +123,11 @@ func (st *SidecarTask) Canonicalize() { } if st.KillTimeout == nil { - st.KillTimeout = timeToPtr(5 * time.Second) + st.KillTimeout = pointerOf(5 * time.Second) } if st.ShutdownDelay == nil { - st.ShutdownDelay = timeToPtr(0) + st.ShutdownDelay = pointerOf(time.Duration(0)) } } @@ -313,7 +315,7 @@ func (p *ConsulGatewayProxy) Canonicalize() { if p.ConnectTimeout == nil { // same as the default from consul - p.ConnectTimeout = timeToPtr(defaultGatewayConnectTimeout) + p.ConnectTimeout = pointerOf(defaultGatewayConnectTimeout) } if len(p.EnvoyGatewayBindAddresses) == 0 { @@ -347,7 +349,7 @@ func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy { } return &ConsulGatewayProxy{ - ConnectTimeout: timeToPtr(*p.ConnectTimeout), + ConnectTimeout: pointerOf(*p.ConnectTimeout), EnvoyGatewayBindTaggedAddresses: p.EnvoyGatewayBindTaggedAddresses, EnvoyGatewayBindAddresses: binds, EnvoyGatewayNoDefaultBind: p.EnvoyGatewayNoDefaultBind, diff --git a/api/consul_test.go b/api/consul_test.go index 2556ab662ce..ce27572957f 100644 --- a/api/consul_test.go +++ b/api/consul_test.go @@ -39,7 +39,7 @@ func TestConsul_MergeNamespace(t *testing.T) { testutil.Parallel(t) t.Run("already set", func(t *testing.T) { a := &Consul{Namespace: "foo"} - ns := stringToPtr("bar") + ns := pointerOf("bar") a.MergeNamespace(ns) require.Equal(t, "foo", a.Namespace) require.Equal(t, "bar", *ns) @@ -47,7 +47,7 @@ func TestConsul_MergeNamespace(t *testing.T) { t.Run("inherit", func(t *testing.T) { a := &Consul{Namespace: ""} - ns := stringToPtr("bar") + ns := pointerOf("bar") a.MergeNamespace(ns) require.Equal(t, "bar", a.Namespace) require.Equal(t, "bar", *ns) @@ -228,9 +228,9 @@ func TestSidecarTask_Canonicalize(t *testing.T) { t.Run("non empty sidecar_task resources", func(t *testing.T) { exp := DefaultResources() - exp.MemoryMB = intToPtr(333) + exp.MemoryMB = pointerOf(333) st := &SidecarTask{ - Resources: &Resources{MemoryMB: intToPtr(333)}, + Resources: &Resources{MemoryMB: pointerOf(333)}, } st.Canonicalize() require.Equal(t, exp, st.Resources) @@ -263,7 +263,7 @@ func TestConsulGateway_Canonicalize(t *testing.T) { }, } cg.Canonicalize() - require.Equal(t, timeToPtr(5*time.Second), cg.Proxy.ConnectTimeout) + require.Equal(t, pointerOf(5*time.Second), cg.Proxy.ConnectTimeout) require.True(t, cg.Proxy.EnvoyGatewayBindTaggedAddresses) require.Nil(t, cg.Proxy.EnvoyGatewayBindAddresses) require.True(t, cg.Proxy.EnvoyGatewayNoDefaultBind) @@ -283,7 +283,7 @@ func TestConsulGateway_Copy(t *testing.T) { gateway := &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: timeToPtr(3 * time.Second), + ConnectTimeout: pointerOf(3 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "listener1": {Address: "10.0.0.1", Port: 2000}, diff --git a/api/csi.go b/api/csi.go index 5eb5d1b71b3..c9d355d625e 100644 --- a/api/csi.go +++ b/api/csi.go @@ -229,6 +229,11 @@ const ( CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" ) +const ( + CSIVolumeTypeHost = "host" + CSIVolumeTypeCSI = "csi" +) + // CSIMountOptions contain optional additional configuration that can be used // when specifying that a Volume should be used with VolumeAccessTypeMount. type CSIMountOptions struct { @@ -244,6 +249,18 @@ type CSIMountOptions struct { ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"` // report unexpected keys } +func (o *CSIMountOptions) Merge(p *CSIMountOptions) { + if p == nil { + return + } + if p.FSType != "" { + o.FSType = p.FSType + } + if p.MountFlags != nil { + o.MountFlags = p.MountFlags + } +} + // CSISecrets contain optional additional credentials that may be needed by // the storage provider. These values will be redacted when reported in the // API or in Nomad's logs. diff --git a/api/deployments.go b/api/deployments.go index 4a4844246fa..1c8011a6fbb 100644 --- a/api/deployments.go +++ b/api/deployments.go @@ -136,6 +136,17 @@ func (d *Deployments) SetAllocHealth(deploymentID string, healthy, unhealthy []s return &resp, wm, nil } +const ( + DeploymentStatusRunning = "running" + DeploymentStatusPaused = "paused" + DeploymentStatusFailed = "failed" + DeploymentStatusSuccessful = "successful" + DeploymentStatusCancelled = "cancelled" + DeploymentStatusPending = "pending" + DeploymentStatusBlocked = "blocked" + DeploymentStatusUnblocking = "unblocking" +) + // Deployment is used to serialize an deployment. type Deployment struct { // ID is a generated UUID for the deployment diff --git a/api/evaluations.go b/api/evaluations.go index 1acf70db7d9..bcb0eb2b960 100644 --- a/api/evaluations.go +++ b/api/evaluations.go @@ -64,6 +64,14 @@ func (e *Evaluations) Allocations(evalID string, q *QueryOptions) ([]*Allocation return resp, qm, nil } +const ( + EvalStatusBlocked = "blocked" + EvalStatusPending = "pending" + EvalStatusComplete = "complete" + EvalStatusFailed = "failed" + EvalStatusCancelled = "canceled" +) + // Evaluation is used to serialize an evaluation. type Evaluation struct { ID string diff --git a/api/event_stream_test.go b/api/event_stream_test.go index d0f55f91f47..72c742cac3b 100644 --- a/api/event_stream_test.go +++ b/api/event_stream_test.go @@ -240,8 +240,8 @@ func TestEventStream_PayloadValueHelpers(t *testing.T) { j, err := event.Job() require.NoError(t, err) require.Equal(t, &Job{ - ID: stringToPtr("some-id"), - Namespace: stringToPtr("some-namespace-id"), + ID: pointerOf("some-id"), + Namespace: pointerOf("some-namespace-id"), }, j) }, }, diff --git a/api/fs.go b/api/fs.go index e0a8383e9fb..8b63a388c9d 100644 --- a/api/fs.go +++ b/api/fs.go @@ -51,7 +51,10 @@ func (c *Client) AllocFS() *AllocFS { return &AllocFS{client: c} } -// List is used to list the files at a given path of an allocation directory +// List is used to list the files at a given path of an allocation directory. +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *AllocFS) List(alloc *Allocation, path string, q *QueryOptions) ([]*AllocFileInfo, *QueryMeta, error) { if q == nil { q = &QueryOptions{} @@ -70,7 +73,10 @@ func (a *AllocFS) List(alloc *Allocation, path string, q *QueryOptions) ([]*Allo return resp, qm, nil } -// Stat is used to stat a file at a given path of an allocation directory +// Stat is used to stat a file at a given path of an allocation directory. +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocFileInfo, *QueryMeta, error) { if q == nil { q = &QueryOptions{} @@ -91,6 +97,9 @@ func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocF // ReadAt is used to read bytes at a given offset until limit at the given path // in an allocation directory. If limit is <= 0, there is no limit. +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int64, q *QueryOptions) (io.ReadCloser, error) { reqPath := fmt.Sprintf("/v1/client/fs/readat/%s", alloc.ID) @@ -103,7 +112,10 @@ func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int } // Cat is used to read contents of a file at the given path in an allocation -// directory +// directory. +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *AllocFS) Cat(alloc *Allocation, path string, q *QueryOptions) (io.ReadCloser, error) { reqPath := fmt.Sprintf("/v1/client/fs/cat/%s", alloc.ID) return queryClientNode(a.client, alloc, reqPath, q, @@ -120,6 +132,10 @@ func (a *AllocFS) Cat(alloc *Allocation, path string, q *QueryOptions) (io.ReadC // * cancel: A channel that when closed, streaming will end. // // The return value is a channel that will emit StreamFrames as they are read. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64, cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { @@ -224,6 +240,10 @@ func queryClientNode(c *Client, alloc *Allocation, reqPath string, q *QueryOptio // reached. // // Unexpected (non-EOF) errors will be sent on the error chan. +// +// Note: for cluster topologies where API consumers don't have network access to +// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid +// long pauses on this API call. func (a *AllocFS) Logs(alloc *Allocation, follow bool, task, logType, origin string, offset int64, cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { diff --git a/api/fs_test.go b/api/fs_test.go index 449239f9654..fc03a9f764e 100644 --- a/api/fs_test.go +++ b/api/fs_test.go @@ -2,6 +2,7 @@ package api import ( "bytes" + "errors" "fmt" "io" "reflect" @@ -47,7 +48,7 @@ func TestFS_Logs(t *testing.T) { return false, fmt.Errorf("node not ready: %s", nodes[0].Status) } if _, ok := nodes[0].Drivers["mock_driver"]; !ok { - return false, fmt.Errorf("mock_driver not ready") + return false, errors.New("mock_driver not ready") } return true, nil }, func(err error) { @@ -62,13 +63,13 @@ func TestFS_Logs(t *testing.T) { } job := &Job{ - ID: stringToPtr("TestFS_Logs"), - Region: stringToPtr("global"), + ID: pointerOf("TestFS_Logs"), + Region: pointerOf("global"), Datacenters: []string{"dc1"}, - Type: stringToPtr("batch"), + Type: pointerOf("batch"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("TestFS_LogsGroup"), + Name: pointerOf("TestFS_LogsGroup"), Tasks: []*Task{ { Name: "logger", @@ -279,7 +280,7 @@ func TestFS_FrameReader_Error(t *testing.T) { r.SetUnblockTime(10 * time.Millisecond) // Send an error - expected := fmt.Errorf("test error") + expected := errors.New("test error") errCh <- expected // Read a little diff --git a/api/go.mod b/api/go.mod index a478988b765..12cc0c25595 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/nomad/api -go 1.17 +go 1.19 require ( github.com/docker/go-units v0.4.0 @@ -11,11 +11,13 @@ require ( github.com/kr/pretty v0.3.0 github.com/mitchellh/go-testing-interface v1.14.1 github.com/mitchellh/mapstructure v1.5.0 + github.com/shoenig/test v0.3.1 github.com/stretchr/testify v1.8.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/api/go.sum b/api/go.sum index 01a972f5fca..098f2e8cd37 100644 --- a/api/go.sum +++ b/api/go.sum @@ -4,6 +4,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= @@ -29,6 +31,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/shoenig/test v0.3.1 h1:dhGZztS6nQuvJ0o0RtUiQHaEO4hhArh/WmWwik3Ols0= +github.com/shoenig/test v0.3.1/go.mod h1:xYtyGBC5Q3kzCNyJg/SjgNpfAa2kvmgA0i5+lQso8x0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= diff --git a/api/internal/testutil/freeport/freeport.go b/api/internal/testutil/freeport/freeport.go index f21698de723..3c08df5c222 100644 --- a/api/internal/testutil/freeport/freeport.go +++ b/api/internal/testutil/freeport/freeport.go @@ -3,7 +3,7 @@ package freeport import ( - "fmt" + "errors" "math/rand" "net" "sync" @@ -110,7 +110,7 @@ func Free(n int) (ports []int, err error) { defer mu.Unlock() if n > blockSize-1 { - return nil, fmt.Errorf("freeport: block size too small") + return nil, errors.New("freeport: block size too small") } // Reserve a port block diff --git a/api/ioutil.go b/api/ioutil.go index 4f585dba061..fe3cce5ac87 100644 --- a/api/ioutil.go +++ b/api/ioutil.go @@ -5,13 +5,13 @@ import ( "crypto/sha256" "crypto/sha512" "encoding/base64" - "fmt" + "errors" "hash" "io" "strings" ) -var errMismatchChecksum = fmt.Errorf("mismatch checksum") +var errMismatchChecksum = errors.New("mismatch checksum") // checksumValidatingReader is a wrapper reader that validates // the checksum of the underlying reader. @@ -38,7 +38,7 @@ type checksumValidatingReader struct { func newChecksumValidatingReader(r io.ReadCloser, digest string) (io.ReadCloser, error) { parts := strings.SplitN(digest, "=", 2) if len(parts) != 2 { - return nil, fmt.Errorf("invalid digest format") + return nil, errors.New("invalid digest format") } algo := parts[0] diff --git a/api/ioutil_test.go b/api/ioutil_test.go index 97e43f694ad..99a18b66f42 100644 --- a/api/ioutil_test.go +++ b/api/ioutil_test.go @@ -5,7 +5,7 @@ import ( "crypto/sha256" "crypto/sha512" "encoding/base64" - "fmt" + "errors" "hash" "io" "io/ioutil" @@ -73,7 +73,7 @@ func TestChecksumValidatingReader_PropagatesError(t *testing.T) { defer pr.Close() defer pw.Close() - expectedErr := fmt.Errorf("some error") + expectedErr := errors.New("some error") go func() { pw.Write([]byte("some input")) diff --git a/api/jobs.go b/api/jobs.go index c781c628a05..4fc048dd51c 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -1,6 +1,7 @@ package api import ( + "errors" "fmt" "net/url" "sort" @@ -44,6 +45,16 @@ const ( RegisterEnforceIndexErrPrefix = "Enforcing job modify index" ) +const ( + // JobPeriodicLaunchSuffix is the string appended to the periodic jobs ID + // when launching derived instances of it. + JobPeriodicLaunchSuffix = "/periodic-" + + // JobDispatchLaunchSuffix is the string appended to the parameterized job's ID + // when dispatching instances of it. + JobDispatchLaunchSuffix = "/dispatch-" +) + // Jobs is used to access the job-specific endpoints. type Jobs struct { client *Client @@ -178,7 +189,7 @@ func (j *Jobs) Scale(jobID, group string, count *int, message string, error bool var count64 *int64 if count != nil { - count64 = int64ToPtr(int64(*count)) + count64 = pointerOf(int64(*count)) } req := &ScalingRequest{ Count: count64, @@ -390,7 +401,7 @@ func (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *Wr func (j *Jobs) PlanOpts(job *Job, opts *PlanOptions, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) { if job == nil { - return nil, nil, fmt.Errorf("must pass non-nil job") + return nil, nil, errors.New("must pass non-nil job") } // Setup the request @@ -502,15 +513,15 @@ type UpdateStrategy struct { // jobs with the old policy or for populating field defaults. func DefaultUpdateStrategy() *UpdateStrategy { return &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), } } @@ -522,39 +533,39 @@ func (u *UpdateStrategy) Copy() *UpdateStrategy { copy := new(UpdateStrategy) if u.Stagger != nil { - copy.Stagger = timeToPtr(*u.Stagger) + copy.Stagger = pointerOf(*u.Stagger) } if u.MaxParallel != nil { - copy.MaxParallel = intToPtr(*u.MaxParallel) + copy.MaxParallel = pointerOf(*u.MaxParallel) } if u.HealthCheck != nil { - copy.HealthCheck = stringToPtr(*u.HealthCheck) + copy.HealthCheck = pointerOf(*u.HealthCheck) } if u.MinHealthyTime != nil { - copy.MinHealthyTime = timeToPtr(*u.MinHealthyTime) + copy.MinHealthyTime = pointerOf(*u.MinHealthyTime) } if u.HealthyDeadline != nil { - copy.HealthyDeadline = timeToPtr(*u.HealthyDeadline) + copy.HealthyDeadline = pointerOf(*u.HealthyDeadline) } if u.ProgressDeadline != nil { - copy.ProgressDeadline = timeToPtr(*u.ProgressDeadline) + copy.ProgressDeadline = pointerOf(*u.ProgressDeadline) } if u.AutoRevert != nil { - copy.AutoRevert = boolToPtr(*u.AutoRevert) + copy.AutoRevert = pointerOf(*u.AutoRevert) } if u.Canary != nil { - copy.Canary = intToPtr(*u.Canary) + copy.Canary = pointerOf(*u.Canary) } if u.AutoPromote != nil { - copy.AutoPromote = boolToPtr(*u.AutoPromote) + copy.AutoPromote = pointerOf(*u.AutoPromote) } return copy @@ -566,39 +577,39 @@ func (u *UpdateStrategy) Merge(o *UpdateStrategy) { } if o.Stagger != nil { - u.Stagger = timeToPtr(*o.Stagger) + u.Stagger = pointerOf(*o.Stagger) } if o.MaxParallel != nil { - u.MaxParallel = intToPtr(*o.MaxParallel) + u.MaxParallel = pointerOf(*o.MaxParallel) } if o.HealthCheck != nil { - u.HealthCheck = stringToPtr(*o.HealthCheck) + u.HealthCheck = pointerOf(*o.HealthCheck) } if o.MinHealthyTime != nil { - u.MinHealthyTime = timeToPtr(*o.MinHealthyTime) + u.MinHealthyTime = pointerOf(*o.MinHealthyTime) } if o.HealthyDeadline != nil { - u.HealthyDeadline = timeToPtr(*o.HealthyDeadline) + u.HealthyDeadline = pointerOf(*o.HealthyDeadline) } if o.ProgressDeadline != nil { - u.ProgressDeadline = timeToPtr(*o.ProgressDeadline) + u.ProgressDeadline = pointerOf(*o.ProgressDeadline) } if o.AutoRevert != nil { - u.AutoRevert = boolToPtr(*o.AutoRevert) + u.AutoRevert = pointerOf(*o.AutoRevert) } if o.Canary != nil { - u.Canary = intToPtr(*o.Canary) + u.Canary = pointerOf(*o.Canary) } if o.AutoPromote != nil { - u.AutoPromote = boolToPtr(*o.AutoPromote) + u.AutoPromote = pointerOf(*o.AutoPromote) } } @@ -695,15 +706,15 @@ type Multiregion struct { func (m *Multiregion) Canonicalize() { if m.Strategy == nil { m.Strategy = &MultiregionStrategy{ - MaxParallel: intToPtr(0), - OnFailure: stringToPtr(""), + MaxParallel: pointerOf(0), + OnFailure: pointerOf(""), } } else { if m.Strategy.MaxParallel == nil { - m.Strategy.MaxParallel = intToPtr(0) + m.Strategy.MaxParallel = pointerOf(0) } if m.Strategy.OnFailure == nil { - m.Strategy.OnFailure = stringToPtr("") + m.Strategy.OnFailure = pointerOf("") } } if m.Regions == nil { @@ -711,7 +722,7 @@ func (m *Multiregion) Canonicalize() { } for _, region := range m.Regions { if region.Count == nil { - region.Count = intToPtr(1) + region.Count = pointerOf(1) } if region.Datacenters == nil { region.Datacenters = []string{} @@ -729,13 +740,13 @@ func (m *Multiregion) Copy() *Multiregion { copy := new(Multiregion) if m.Strategy != nil { copy.Strategy = new(MultiregionStrategy) - copy.Strategy.MaxParallel = intToPtr(*m.Strategy.MaxParallel) - copy.Strategy.OnFailure = stringToPtr(*m.Strategy.OnFailure) + copy.Strategy.MaxParallel = pointerOf(*m.Strategy.MaxParallel) + copy.Strategy.OnFailure = pointerOf(*m.Strategy.OnFailure) } for _, region := range m.Regions { copyRegion := new(MultiregionRegion) copyRegion.Name = region.Name - copyRegion.Count = intToPtr(*region.Count) + copyRegion.Count = pointerOf(*region.Count) copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...) for k, v := range region.Meta { copyRegion.Meta[k] = v @@ -768,19 +779,19 @@ type PeriodicConfig struct { func (p *PeriodicConfig) Canonicalize() { if p.Enabled == nil { - p.Enabled = boolToPtr(true) + p.Enabled = pointerOf(true) } if p.Spec == nil { - p.Spec = stringToPtr("") + p.Spec = pointerOf("") } if p.SpecType == nil { - p.SpecType = stringToPtr(PeriodicSpecCron) + p.SpecType = pointerOf(PeriodicSpecCron) } if p.ProhibitOverlap == nil { - p.ProhibitOverlap = boolToPtr(false) + p.ProhibitOverlap = pointerOf(false) } if p.TimeZone == nil || *p.TimeZone == "" { - p.TimeZone = stringToPtr("UTC") + p.TimeZone = pointerOf("UTC") } } @@ -893,70 +904,70 @@ func (j *Job) IsMultiregion() bool { func (j *Job) Canonicalize() { if j.ID == nil { - j.ID = stringToPtr("") + j.ID = pointerOf("") } if j.Name == nil { - j.Name = stringToPtr(*j.ID) + j.Name = pointerOf(*j.ID) } if j.ParentID == nil { - j.ParentID = stringToPtr("") + j.ParentID = pointerOf("") } if j.Namespace == nil { - j.Namespace = stringToPtr(DefaultNamespace) + j.Namespace = pointerOf(DefaultNamespace) } if j.Priority == nil { - j.Priority = intToPtr(50) + j.Priority = pointerOf(50) } if j.Stop == nil { - j.Stop = boolToPtr(false) + j.Stop = pointerOf(false) } if j.Region == nil { - j.Region = stringToPtr(GlobalRegion) + j.Region = pointerOf(GlobalRegion) } if j.Namespace == nil { - j.Namespace = stringToPtr("default") + j.Namespace = pointerOf("default") } if j.Type == nil { - j.Type = stringToPtr("service") + j.Type = pointerOf("service") } if j.AllAtOnce == nil { - j.AllAtOnce = boolToPtr(false) + j.AllAtOnce = pointerOf(false) } if j.ConsulToken == nil { - j.ConsulToken = stringToPtr("") + j.ConsulToken = pointerOf("") } if j.ConsulNamespace == nil { - j.ConsulNamespace = stringToPtr("") + j.ConsulNamespace = pointerOf("") } if j.VaultToken == nil { - j.VaultToken = stringToPtr("") + j.VaultToken = pointerOf("") } if j.VaultNamespace == nil { - j.VaultNamespace = stringToPtr("") + j.VaultNamespace = pointerOf("") } if j.NomadTokenID == nil { - j.NomadTokenID = stringToPtr("") + j.NomadTokenID = pointerOf("") } if j.Status == nil { - j.Status = stringToPtr("") + j.Status = pointerOf("") } if j.StatusDescription == nil { - j.StatusDescription = stringToPtr("") + j.StatusDescription = pointerOf("") } if j.Stable == nil { - j.Stable = boolToPtr(false) + j.Stable = pointerOf(false) } if j.Version == nil { - j.Version = uint64ToPtr(0) + j.Version = pointerOf(uint64(0)) } if j.CreateIndex == nil { - j.CreateIndex = uint64ToPtr(0) + j.CreateIndex = pointerOf(uint64(0)) } if j.ModifyIndex == nil { - j.ModifyIndex = uint64ToPtr(0) + j.ModifyIndex = pointerOf(uint64(0)) } if j.JobModifyIndex == nil { - j.JobModifyIndex = uint64ToPtr(0) + j.JobModifyIndex = pointerOf(uint64(0)) } if j.Periodic != nil { j.Periodic.Canonicalize() diff --git a/api/jobs_test.go b/api/jobs_test.go index 6cc8b3abf17..f6e3b3094c8 100644 --- a/api/jobs_test.go +++ b/api/jobs_test.go @@ -61,23 +61,23 @@ func TestJobs_Register_PreserveCounts(t *testing.T) { task := NewTask("task", "exec"). SetConfig("command", "/bin/sleep"). Require(&Resources{ - CPU: intToPtr(100), - MemoryMB: intToPtr(256), + CPU: pointerOf(100), + MemoryMB: pointerOf(256), }). SetLogConfig(&LogConfig{ - MaxFiles: intToPtr(1), - MaxFileSizeMB: intToPtr(2), + MaxFiles: pointerOf(1), + MaxFileSizeMB: pointerOf(2), }) group1 := NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) group2 := NewTaskGroup("group2", 2). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job := NewBatchJob("job", "redis", "global", 1). @@ -94,11 +94,11 @@ func TestJobs_Register_PreserveCounts(t *testing.T) { // Update the job, new groups to test PreserveCounts group1.Count = nil - group2.Count = intToPtr(0) + group2.Count = pointerOf(0) group3 := NewTaskGroup("group3", 3). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job.AddTaskGroup(group3) @@ -133,23 +133,23 @@ func TestJobs_Register_NoPreserveCounts(t *testing.T) { task := NewTask("task", "exec"). SetConfig("command", "/bin/sleep"). Require(&Resources{ - CPU: intToPtr(100), - MemoryMB: intToPtr(256), + CPU: pointerOf(100), + MemoryMB: pointerOf(256), }). SetLogConfig(&LogConfig{ - MaxFiles: intToPtr(1), - MaxFileSizeMB: intToPtr(2), + MaxFiles: pointerOf(1), + MaxFileSizeMB: pointerOf(2), }) group1 := NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) group2 := NewTaskGroup("group2", 2). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job := NewBatchJob("job", "redis", "global", 1). @@ -165,12 +165,12 @@ func TestJobs_Register_NoPreserveCounts(t *testing.T) { assertWriteMeta(t, wm) // Update the job, new groups to test PreserveCounts - group1.Count = intToPtr(0) + group1.Count = pointerOf(0) group2.Count = nil group3 := NewTaskGroup("group3", 3). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job.AddTaskGroup(group3) @@ -288,79 +288,79 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - ID: stringToPtr(""), - Name: stringToPtr(""), - Region: stringToPtr("global"), - Namespace: stringToPtr(DefaultNamespace), - Type: stringToPtr("service"), - ParentID: stringToPtr(""), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + ID: pointerOf(""), + Name: pointerOf(""), + Region: pointerOf("global"), + Namespace: pointerOf(DefaultNamespace), + Type: pointerOf("service"), + ParentID: pointerOf(""), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr(""), - Count: intToPtr(1), + Name: pointerOf(""), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ { - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), LogConfig: DefaultLogConfig(), Resources: DefaultResources(), RestartPolicy: defaultServiceJobRestartPolicy(), @@ -373,7 +373,7 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "batch", input: &Job{ - Type: stringToPtr("batch"), + Type: pointerOf("batch"), TaskGroups: []*TaskGroup{ { Tasks: []*Task{ @@ -383,56 +383,56 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - ID: stringToPtr(""), - Name: stringToPtr(""), - Region: stringToPtr("global"), - Namespace: stringToPtr(DefaultNamespace), - Type: stringToPtr("batch"), - ParentID: stringToPtr(""), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + ID: pointerOf(""), + Name: pointerOf(""), + Region: pointerOf("global"), + Namespace: pointerOf(DefaultNamespace), + Type: pointerOf("batch"), + ParentID: pointerOf(""), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), TaskGroups: []*TaskGroup{ { - Name: stringToPtr(""), - Count: intToPtr(1), + Name: pointerOf(""), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(3), - Interval: timeToPtr(24 * time.Hour), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(3), + Interval: pointerOf(24 * time.Hour), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(24 * time.Hour), - DelayFunction: stringToPtr("constant"), - Delay: timeToPtr(5 * time.Second), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(1), + Interval: pointerOf(24 * time.Hour), + DelayFunction: pointerOf("constant"), + Delay: pointerOf(5 * time.Second), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), }, Consul: &Consul{ Namespace: "", }, Tasks: []*Task{ { - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), LogConfig: DefaultLogConfig(), Resources: DefaultResources(), RestartPolicy: defaultBatchJobRestartPolicy(), @@ -445,13 +445,13 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "partial", input: &Job{ - Name: stringToPtr("foo"), - Namespace: stringToPtr("bar"), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + Namespace: pointerOf("bar"), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), Tasks: []*Task{ { Name: "task1", @@ -461,74 +461,74 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - Namespace: stringToPtr("bar"), - ID: stringToPtr("bar"), - Name: stringToPtr("foo"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - ParentID: stringToPtr("lol"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf("bar"), + ID: pointerOf("bar"), + Name: pointerOf("foo"), + Region: pointerOf("global"), + Type: pointerOf("service"), + ParentID: pointerOf("lol"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), - Count: intToPtr(1), + Name: pointerOf("bar"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -536,7 +536,7 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: defaultServiceJobRestartPolicy(), }, }, @@ -547,29 +547,29 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "example_template", input: &Job{ - ID: stringToPtr("example_template"), - Name: stringToPtr("example_template"), + ID: pointerOf("example_template"), + Name: pointerOf("example_template"), Datacenters: []string{"dc1"}, - Type: stringToPtr("service"), + Type: pointerOf("service"), Update: &UpdateStrategy{ - MaxParallel: intToPtr(1), - AutoPromote: boolToPtr(true), + MaxParallel: pointerOf(1), + AutoPromote: pointerOf(true), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("cache"), - Count: intToPtr(1), + Name: pointerOf("cache"), + Count: pointerOf(1), RestartPolicy: &RestartPolicy{ - Interval: timeToPtr(5 * time.Minute), - Attempts: intToPtr(10), - Delay: timeToPtr(25 * time.Second), - Mode: stringToPtr("delay"), + Interval: pointerOf(5 * time.Minute), + Attempts: pointerOf(10), + Delay: pointerOf(25 * time.Second), + Mode: pointerOf("delay"), }, Update: &UpdateStrategy{ - AutoRevert: boolToPtr(true), + AutoRevert: pointerOf(true), }, EphemeralDisk: &EphemeralDisk{ - SizeMB: intToPtr(300), + SizeMB: pointerOf(300), }, Tasks: []*Task{ { @@ -583,14 +583,14 @@ func TestJobs_Canonicalize(t *testing.T) { }, RestartPolicy: &RestartPolicy{ // inherit other values from TG - Attempts: intToPtr(20), + Attempts: pointerOf(20), }, Resources: &Resources{ - CPU: intToPtr(500), - MemoryMB: intToPtr(256), + CPU: pointerOf(500), + MemoryMB: pointerOf(256), Networks: []*NetworkResource{ { - MBits: intToPtr(10), + MBits: pointerOf(10), DynamicPorts: []Port{ { Label: "db", @@ -617,13 +617,13 @@ func TestJobs_Canonicalize(t *testing.T) { }, Templates: []*Template{ { - EmbeddedTmpl: stringToPtr("---"), - DestPath: stringToPtr("local/file.yml"), + EmbeddedTmpl: pointerOf("---"), + DestPath: pointerOf("local/file.yml"), }, { - EmbeddedTmpl: stringToPtr("FOO=bar\n"), - DestPath: stringToPtr("local/file.env"), - Envvars: boolToPtr(true), + EmbeddedTmpl: pointerOf("FOO=bar\n"), + DestPath: pointerOf("local/file.env"), + Envvars: pointerOf(true), }, }, }, @@ -632,75 +632,75 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("example_template"), - Name: stringToPtr("example_template"), - ParentID: stringToPtr(""), - Priority: intToPtr(50), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("example_template"), + Name: pointerOf("example_template"), + ParentID: pointerOf(""), + Priority: pointerOf(50), + Region: pointerOf("global"), + Type: pointerOf("service"), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Datacenters: []string{"dc1"}, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(true), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(true), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("cache"), - Count: intToPtr(1), + Name: pointerOf("cache"), + Count: pointerOf(1), RestartPolicy: &RestartPolicy{ - Interval: timeToPtr(5 * time.Minute), - Attempts: intToPtr(10), - Delay: timeToPtr(25 * time.Second), - Mode: stringToPtr("delay"), + Interval: pointerOf(5 * time.Minute), + Attempts: pointerOf(10), + Delay: pointerOf(25 * time.Second), + Mode: pointerOf("delay"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(true), - Canary: intToPtr(0), - AutoPromote: boolToPtr(true), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(true), + Canary: pointerOf(0), + AutoPromote: pointerOf(true), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -714,18 +714,18 @@ func TestJobs_Canonicalize(t *testing.T) { }}, }, RestartPolicy: &RestartPolicy{ - Interval: timeToPtr(5 * time.Minute), - Attempts: intToPtr(20), - Delay: timeToPtr(25 * time.Second), - Mode: stringToPtr("delay"), + Interval: pointerOf(5 * time.Minute), + Attempts: pointerOf(20), + Delay: pointerOf(25 * time.Second), + Mode: pointerOf("delay"), }, Resources: &Resources{ - CPU: intToPtr(500), - Cores: intToPtr(0), - MemoryMB: intToPtr(256), + CPU: pointerOf(500), + Cores: pointerOf(0), + MemoryMB: pointerOf(256), Networks: []*NetworkResource{ { - MBits: intToPtr(10), + MBits: pointerOf(10), DynamicPorts: []Port{ { Label: "db", @@ -754,34 +754,34 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, }, - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), LogConfig: DefaultLogConfig(), Templates: []*Template{ { - SourcePath: stringToPtr(""), - DestPath: stringToPtr("local/file.yml"), - EmbeddedTmpl: stringToPtr("---"), - ChangeMode: stringToPtr("restart"), - ChangeSignal: stringToPtr(""), - Splay: timeToPtr(5 * time.Second), - Perms: stringToPtr("0644"), - LeftDelim: stringToPtr("{{"), - RightDelim: stringToPtr("}}"), - Envvars: boolToPtr(false), - VaultGrace: timeToPtr(0), + SourcePath: pointerOf(""), + DestPath: pointerOf("local/file.yml"), + EmbeddedTmpl: pointerOf("---"), + ChangeMode: pointerOf("restart"), + ChangeSignal: pointerOf(""), + Splay: pointerOf(5 * time.Second), + Perms: pointerOf("0644"), + LeftDelim: pointerOf("{{"), + RightDelim: pointerOf("}}"), + Envvars: pointerOf(false), + VaultGrace: pointerOf(time.Duration(0)), }, { - SourcePath: stringToPtr(""), - DestPath: stringToPtr("local/file.env"), - EmbeddedTmpl: stringToPtr("FOO=bar\n"), - ChangeMode: stringToPtr("restart"), - ChangeSignal: stringToPtr(""), - Splay: timeToPtr(5 * time.Second), - Perms: stringToPtr("0644"), - LeftDelim: stringToPtr("{{"), - RightDelim: stringToPtr("}}"), - Envvars: boolToPtr(true), - VaultGrace: timeToPtr(0), + SourcePath: pointerOf(""), + DestPath: pointerOf("local/file.env"), + EmbeddedTmpl: pointerOf("FOO=bar\n"), + ChangeMode: pointerOf("restart"), + ChangeSignal: pointerOf(""), + Splay: pointerOf(5 * time.Second), + Perms: pointerOf("0644"), + LeftDelim: pointerOf("{{"), + RightDelim: pointerOf("}}"), + Envvars: pointerOf(true), + VaultGrace: pointerOf(time.Duration(0)), }, }, }, @@ -794,48 +794,48 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "periodic", input: &Job{ - ID: stringToPtr("bar"), + ID: pointerOf("bar"), Periodic: &PeriodicConfig{}, }, expected: &Job{ - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - ParentID: stringToPtr(""), - Name: stringToPtr("bar"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + ParentID: pointerOf(""), + Name: pointerOf("bar"), + Region: pointerOf("global"), + Type: pointerOf("service"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Periodic: &PeriodicConfig{ - Enabled: boolToPtr(true), - Spec: stringToPtr(""), - SpecType: stringToPtr(PeriodicSpecCron), - ProhibitOverlap: boolToPtr(false), - TimeZone: stringToPtr("UTC"), + Enabled: pointerOf(true), + Spec: pointerOf(""), + SpecType: pointerOf(PeriodicSpecCron), + ProhibitOverlap: pointerOf(false), + TimeZone: pointerOf("UTC"), }, }, }, @@ -843,34 +843,34 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "update_merge", input: &Job{ - Name: stringToPtr("foo"), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), Update: &UpdateStrategy{ - Stagger: timeToPtr(1 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(6 * time.Minute), - ProgressDeadline: timeToPtr(7 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(1 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(6 * time.Minute), + ProgressDeadline: pointerOf(7 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(2 * time.Second), - MaxParallel: intToPtr(2), - HealthCheck: stringToPtr("manual"), - MinHealthyTime: timeToPtr(1 * time.Second), - AutoRevert: boolToPtr(true), - Canary: intToPtr(1), - AutoPromote: boolToPtr(true), + Stagger: pointerOf(2 * time.Second), + MaxParallel: pointerOf(2), + HealthCheck: pointerOf("manual"), + MinHealthyTime: pointerOf(1 * time.Second), + AutoRevert: pointerOf(true), + Canary: pointerOf(1), + AutoPromote: pointerOf(true), }, Tasks: []*Task{ { @@ -879,7 +879,7 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, { - Name: stringToPtr("baz"), + Name: pointerOf("baz"), Tasks: []*Task{ { Name: "task1", @@ -889,74 +889,74 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - Name: stringToPtr("foo"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - ParentID: stringToPtr("lol"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + Name: pointerOf("foo"), + Region: pointerOf("global"), + Type: pointerOf("service"), + ParentID: pointerOf("lol"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(1 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(6 * time.Minute), - ProgressDeadline: timeToPtr(7 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(1 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(6 * time.Minute), + ProgressDeadline: pointerOf(7 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), - Count: intToPtr(1), + Name: pointerOf("bar"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(2 * time.Second), - MaxParallel: intToPtr(2), - HealthCheck: stringToPtr("manual"), - MinHealthyTime: timeToPtr(1 * time.Second), - HealthyDeadline: timeToPtr(6 * time.Minute), - ProgressDeadline: timeToPtr(7 * time.Minute), - AutoRevert: boolToPtr(true), - Canary: intToPtr(1), - AutoPromote: boolToPtr(true), + Stagger: pointerOf(2 * time.Second), + MaxParallel: pointerOf(2), + HealthCheck: pointerOf("manual"), + MinHealthyTime: pointerOf(1 * time.Second), + HealthyDeadline: pointerOf(6 * time.Minute), + ProgressDeadline: pointerOf(7 * time.Minute), + AutoRevert: pointerOf(true), + Canary: pointerOf(1), + AutoPromote: pointerOf(true), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -964,46 +964,46 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: defaultServiceJobRestartPolicy(), }, }, }, { - Name: stringToPtr("baz"), - Count: intToPtr(1), + Name: pointerOf("baz"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(1 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(6 * time.Minute), - ProgressDeadline: timeToPtr(7 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(1 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(6 * time.Minute), + ProgressDeadline: pointerOf(7 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -1011,7 +1011,7 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: defaultServiceJobRestartPolicy(), }, }, @@ -1023,35 +1023,35 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "restart_merge", input: &Job{ - Name: stringToPtr("foo"), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, Tasks: []*Task{ { Name: "task1", RestartPolicy: &RestartPolicy{ - Attempts: intToPtr(5), - Delay: timeToPtr(1 * time.Second), + Attempts: pointerOf(5), + Delay: pointerOf(1 * time.Second), }, }, }, }, { - Name: stringToPtr("baz"), + Name: pointerOf("baz"), RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(20 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(20 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, Consul: &Consul{ Namespace: "", @@ -1065,74 +1065,74 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - Name: stringToPtr("foo"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - ParentID: stringToPtr("lol"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + Name: pointerOf("foo"), + Region: pointerOf("global"), + Type: pointerOf("service"), + ParentID: pointerOf("lol"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), - Count: intToPtr(1), + Name: pointerOf("bar"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -1140,51 +1140,51 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: &RestartPolicy{ - Attempts: intToPtr(5), - Delay: timeToPtr(1 * time.Second), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Attempts: pointerOf(5), + Delay: pointerOf(1 * time.Second), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, }, }, }, { - Name: stringToPtr("baz"), - Count: intToPtr(1), + Name: pointerOf("baz"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(20 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(20 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -1192,12 +1192,12 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(20 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(20 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, }, }, @@ -1209,14 +1209,14 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "multiregion", input: &Job{ - Name: stringToPtr("foo"), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), Multiregion: &Multiregion{ Regions: []*MultiregionRegion{ { Name: "west", - Count: intToPtr(1), + Count: pointerOf(1), }, }, }, @@ -1224,49 +1224,49 @@ func TestJobs_Canonicalize(t *testing.T) { expected: &Job{ Multiregion: &Multiregion{ Strategy: &MultiregionStrategy{ - MaxParallel: intToPtr(0), - OnFailure: stringToPtr(""), + MaxParallel: pointerOf(0), + OnFailure: pointerOf(""), }, Regions: []*MultiregionRegion{ { Name: "west", - Count: intToPtr(1), + Count: pointerOf(1), Datacenters: []string{}, Meta: map[string]string{}, }, }, }, - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - Name: stringToPtr("foo"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - ParentID: stringToPtr("lol"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + Name: pointerOf("foo"), + Region: pointerOf("global"), + Type: pointerOf("service"), + ParentID: pointerOf("lol"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, }, }, @@ -1356,13 +1356,13 @@ func TestJobs_Revert(t *testing.T) { assertWriteMeta(t, wm) // Fail revert at incorrect enforce - _, _, err = jobs.Revert(*job.ID, 0, uint64ToPtr(10), nil, "", "") + _, _, err = jobs.Revert(*job.ID, 0, pointerOf(uint64(10)), nil, "", "") if err == nil || !strings.Contains(err.Error(), "enforcing version") { t.Fatalf("expected enforcement error: %v", err) } // Works at correct index - revertResp, wm, err := jobs.Revert(*job.ID, 0, uint64ToPtr(1), nil, "", "") + revertResp, wm, err := jobs.Revert(*job.ID, 0, pointerOf(uint64(1)), nil, "", "") if err != nil { t.Fatalf("err: %s", err) } @@ -1442,13 +1442,13 @@ func TestJobs_ScaleInvalidAction(t *testing.T) { // Register test job job := testJob() - job.ID = stringToPtr("TestJobs_Scale") + job.ID = pointerOf("TestJobs_Scale") _, wm, err := jobs.Register(job, nil) require.NoError(err) assertWriteMeta(t, wm) // Perform a scaling action with bad group name, verify error - _, _, err = jobs.Scale(*job.ID, "incorrect-group-name", intToPtr(2), + _, _, err = jobs.Scale(*job.ID, "incorrect-group-name", pointerOf(2), "because", false, nil, nil) require.Error(err) require.Contains(err.Error(), "does not exist") @@ -1952,11 +1952,11 @@ func TestJobs_NewBatchJob(t *testing.T) { testutil.Parallel(t) job := NewBatchJob("job1", "myjob", "global", 5) expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeBatch), - Priority: intToPtr(5), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeBatch), + Priority: pointerOf(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) @@ -1967,11 +1967,11 @@ func TestJobs_NewServiceJob(t *testing.T) { testutil.Parallel(t) job := NewServiceJob("job1", "myjob", "global", 5) expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeService), - Priority: intToPtr(5), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeService), + Priority: pointerOf(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) @@ -1982,11 +1982,11 @@ func TestJobs_NewSystemJob(t *testing.T) { testutil.Parallel(t) job := NewSystemJob("job1", "myjob", "global", 5) expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeSystem), - Priority: intToPtr(5), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeSystem), + Priority: pointerOf(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) @@ -1997,11 +1997,11 @@ func TestJobs_NewSysbatchJob(t *testing.T) { testutil.Parallel(t) job := NewSysbatchJob("job1", "myjob", "global", 5) expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeSysbatch), - Priority: intToPtr(5), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeSysbatch), + Priority: pointerOf(5), } require.Equal(t, expect, job) } @@ -2085,13 +2085,13 @@ func TestJobs_AddAffinity(t *testing.T) { LTarget: "kernel.version", RTarget: "4.6", Operand: "=", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), }, { LTarget: "${node.datacenter}", RTarget: "dc2", Operand: "=", - Weight: int8ToPtr(50), + Weight: pointerOf(int8(50)), }, } if !reflect.DeepEqual(job.Affinities, expect) { @@ -2145,7 +2145,7 @@ func TestJobs_AddSpread(t *testing.T) { expect := []*Spread{ { Attribute: "${meta.rack}", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), SpreadTarget: []*SpreadTarget{ { Value: "r1", @@ -2155,7 +2155,7 @@ func TestJobs_AddSpread(t *testing.T) { }, { Attribute: "${node.datacenter}", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), SpreadTarget: []*SpreadTarget{ { Value: "dc1", @@ -2186,7 +2186,7 @@ func TestJobs_ScaleAction(t *testing.T) { newCount := origCount + 1 // Trying to scale against a target before it exists returns an error - _, _, err := jobs.Scale(id, "missing", intToPtr(newCount), "this won't work", + _, _, err := jobs.Scale(id, "missing", pointerOf(newCount), "this won't work", false, nil, nil) require.Error(err) require.Contains(err.Error(), "not found") @@ -2198,7 +2198,7 @@ func TestJobs_ScaleAction(t *testing.T) { // Perform scaling action scalingResp, wm, err := jobs.Scale(id, groupName, - intToPtr(newCount), "need more instances", false, + pointerOf(newCount), "need more instances", false, map[string]interface{}{ "meta": "data", }, nil) diff --git a/api/nodes_test.go b/api/nodes_test.go index a811826edce..6d4de0747ad 100644 --- a/api/nodes_test.go +++ b/api/nodes_test.go @@ -669,15 +669,15 @@ func TestNodeStatValueFormatting(t *testing.T) { }{ { "true", - StatValue{BoolVal: boolToPtr(true)}, + StatValue{BoolVal: pointerOf(true)}, }, { "false", - StatValue{BoolVal: boolToPtr(false)}, + StatValue{BoolVal: pointerOf(false)}, }, { "myvalue", - StatValue{StringVal: stringToPtr("myvalue")}, + StatValue{StringVal: pointerOf("myvalue")}, }, { "2.718", @@ -710,28 +710,28 @@ func TestNodeStatValueFormatting(t *testing.T) { { "2", StatValue{ - IntNumeratorVal: int64ToPtr(2), + IntNumeratorVal: pointerOf(int64(2)), }, }, { "2 / 3", StatValue{ - IntNumeratorVal: int64ToPtr(2), - IntDenominatorVal: int64ToPtr(3), + IntNumeratorVal: pointerOf(int64(2)), + IntDenominatorVal: pointerOf(int64(3)), }, }, { "2 MHz", StatValue{ - IntNumeratorVal: int64ToPtr(2), + IntNumeratorVal: pointerOf(int64(2)), Unit: "MHz", }, }, { "2 / 3 MHz", StatValue{ - IntNumeratorVal: int64ToPtr(2), - IntDenominatorVal: int64ToPtr(3), + IntNumeratorVal: pointerOf(int64(2)), + IntDenominatorVal: pointerOf(int64(3)), Unit: "MHz", }, }, diff --git a/api/quota.go b/api/quota.go index be4e46c7e95..ecfb82e8e31 100644 --- a/api/quota.go +++ b/api/quota.go @@ -126,6 +126,12 @@ type QuotaLimit struct { // useful for once we support GPUs RegionLimit *Resources + // SecureVariablesLimit is the maximum total size of all secure + // variables SecureVariable.EncryptedData. A value of zero is + // treated as unlimited and a negative value is treated as fully + // disallowed. + SecureVariablesLimit *int `mapstructure:"secure_variables_limit" hcl:"secure_variables_limit,optional"` + // Hash is the hash of the object and is used to make replication efficient. Hash []byte } diff --git a/api/resources.go b/api/resources.go index b5ada2d9ec1..43f6bbe86ad 100644 --- a/api/resources.go +++ b/api/resources.go @@ -38,7 +38,7 @@ func (r *Resources) Canonicalize() { // CPU will be set to the default if cores is nil above. // If cpu is nil here then cores has been set and cpu should be 0 if r.CPU == nil { - r.CPU = intToPtr(0) + r.CPU = pointerOf(0) } if r.MemoryMB == nil { @@ -55,9 +55,9 @@ func (r *Resources) Canonicalize() { // and should be kept in sync. func DefaultResources() *Resources { return &Resources{ - CPU: intToPtr(100), - Cores: intToPtr(0), - MemoryMB: intToPtr(300), + CPU: pointerOf(100), + Cores: pointerOf(0), + MemoryMB: pointerOf(300), } } @@ -68,9 +68,9 @@ func DefaultResources() *Resources { // IN nomad/structs/structs.go and should be kept in sync. func MinResources() *Resources { return &Resources{ - CPU: intToPtr(1), - Cores: intToPtr(0), - MemoryMB: intToPtr(10), + CPU: pointerOf(1), + Cores: pointerOf(0), + MemoryMB: pointerOf(10), } } @@ -268,7 +268,7 @@ type RequestedDevice struct { func (d *RequestedDevice) Canonicalize() { if d.Count == nil { - d.Count = uint64ToPtr(1) + d.Count = pointerOf(uint64(1)) } for _, a := range d.Affinities { diff --git a/api/resources_test.go b/api/resources_test.go index 2f9904ba6e5..de67c74a0ae 100644 --- a/api/resources_test.go +++ b/api/resources_test.go @@ -23,25 +23,25 @@ func TestResources_Canonicalize(t *testing.T) { { name: "cores", input: &Resources{ - Cores: intToPtr(2), - MemoryMB: intToPtr(1024), + Cores: pointerOf(2), + MemoryMB: pointerOf(1024), }, expected: &Resources{ - CPU: intToPtr(0), - Cores: intToPtr(2), - MemoryMB: intToPtr(1024), + CPU: pointerOf(0), + Cores: pointerOf(2), + MemoryMB: pointerOf(1024), }, }, { name: "cpu", input: &Resources{ - CPU: intToPtr(500), - MemoryMB: intToPtr(1024), + CPU: pointerOf(500), + MemoryMB: pointerOf(1024), }, expected: &Resources{ - CPU: intToPtr(500), - Cores: intToPtr(0), - MemoryMB: intToPtr(1024), + CPU: pointerOf(500), + Cores: pointerOf(0), + MemoryMB: pointerOf(1024), }, }, } diff --git a/api/scaling.go b/api/scaling.go index 2266c895987..32259c9f415 100644 --- a/api/scaling.go +++ b/api/scaling.go @@ -35,7 +35,7 @@ func (s *Scaling) GetPolicy(id string, q *QueryOptions) (*ScalingPolicy, *QueryM func (p *ScalingPolicy) Canonicalize(taskGroupCount int) { if p.Enabled == nil { - p.Enabled = boolToPtr(true) + p.Enabled = pointerOf(true) } if p.Min == nil { var m int64 = int64(taskGroupCount) diff --git a/api/scaling_test.go b/api/scaling_test.go index 0d4a703c6ba..af9dff3b5e9 100644 --- a/api/scaling_test.go +++ b/api/scaling_test.go @@ -24,7 +24,7 @@ func TestScalingPolicies_ListPolicies(t *testing.T) { // Register a job with a scaling policy job := testJob() job.TaskGroups[0].Scaling = &ScalingPolicy{ - Max: int64ToPtr(100), + Max: pointerOf(int64(100)), } _, _, err = jobs.Register(job, nil) require.NoError(err) @@ -77,9 +77,9 @@ func TestScalingPolicies_GetPolicy(t *testing.T) { // Register a job with a scaling policy job := testJob() policy := &ScalingPolicy{ - Enabled: boolToPtr(true), - Min: int64ToPtr(1), - Max: int64ToPtr(1), + Enabled: pointerOf(true), + Min: pointerOf(int64(1)), + Max: pointerOf(int64(1)), Policy: map[string]interface{}{ "key": "value", }, diff --git a/api/sentinel.go b/api/sentinel.go index fdccd9f6b64..74c88dd6384 100644 --- a/api/sentinel.go +++ b/api/sentinel.go @@ -1,6 +1,8 @@ package api -import "fmt" +import ( + "errors" +) // SentinelPolicies is used to query the Sentinel Policy endpoints. type SentinelPolicies struct { @@ -25,7 +27,7 @@ func (a *SentinelPolicies) List(q *QueryOptions) ([]*SentinelPolicyListStub, *Qu // Upsert is used to create or update a policy func (a *SentinelPolicies) Upsert(policy *SentinelPolicy, q *WriteOptions) (*WriteMeta, error) { if policy == nil || policy.Name == "" { - return nil, fmt.Errorf("missing policy name") + return nil, errors.New("missing policy name") } wm, err := a.client.write("/v1/sentinel/policy/"+policy.Name, policy, nil, q) if err != nil { @@ -37,7 +39,7 @@ func (a *SentinelPolicies) Upsert(policy *SentinelPolicy, q *WriteOptions) (*Wri // Delete is used to delete a policy func (a *SentinelPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) { if policyName == "" { - return nil, fmt.Errorf("missing policy name") + return nil, errors.New("missing policy name") } wm, err := a.client.delete("/v1/sentinel/policy/"+policyName, nil, nil, q) if err != nil { @@ -49,7 +51,7 @@ func (a *SentinelPolicies) Delete(policyName string, q *WriteOptions) (*WriteMet // Info is used to query a specific policy func (a *SentinelPolicies) Info(policyName string, q *QueryOptions) (*SentinelPolicy, *QueryMeta, error) { if policyName == "" { - return nil, nil, fmt.Errorf("missing policy name") + return nil, nil, errors.New("missing policy name") } var resp SentinelPolicy wm, err := a.client.query("/v1/sentinel/policy/"+policyName, &resp, q) diff --git a/api/services.go b/api/services.go index 55d2b01c278..8d9b4157bab 100644 --- a/api/services.go +++ b/api/services.go @@ -144,7 +144,7 @@ func (c *CheckRestart) Canonicalize() { } if c.Grace == nil { - c.Grace = timeToPtr(1 * time.Second) + c.Grace = pointerOf(1 * time.Second) } } diff --git a/api/services_test.go b/api/services_test.go index 5de82e17f2a..c33fa429c33 100644 --- a/api/services_test.go +++ b/api/services_test.go @@ -24,8 +24,8 @@ func TestServiceRegistrations_Delete(t *testing.T) { func TestService_Canonicalize(t *testing.T) { testutil.Parallel(t) - j := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + j := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} s := &Service{ TaggedAddresses: make(map[string]string), @@ -45,8 +45,8 @@ func TestService_Canonicalize(t *testing.T) { func TestServiceCheck_Canonicalize(t *testing.T) { testutil.Parallel(t) - j := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + j := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} s := &Service{ Checks: []ServiceCheck{ @@ -64,8 +64,8 @@ func TestServiceCheck_Canonicalize(t *testing.T) { func TestService_Check_PassFail(t *testing.T) { testutil.Parallel(t) - job := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + job := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} t.Run("enforce minimums", func(t *testing.T) { @@ -100,13 +100,13 @@ func TestService_Check_PassFail(t *testing.T) { func TestService_CheckRestart(t *testing.T) { testutil.Parallel(t) - job := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + job := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} service := &Service{ CheckRestart: &CheckRestart{ Limit: 11, - Grace: timeToPtr(11 * time.Second), + Grace: pointerOf(11 * time.Second), IgnoreWarnings: true, }, Checks: []ServiceCheck{ @@ -114,7 +114,7 @@ func TestService_CheckRestart(t *testing.T) { Name: "all-set", CheckRestart: &CheckRestart{ Limit: 22, - Grace: timeToPtr(22 * time.Second), + Grace: pointerOf(22 * time.Second), IgnoreWarnings: true, }, }, @@ -122,7 +122,7 @@ func TestService_CheckRestart(t *testing.T) { Name: "some-set", CheckRestart: &CheckRestart{ Limit: 33, - Grace: timeToPtr(33 * time.Second), + Grace: pointerOf(33 * time.Second), }, }, { @@ -148,8 +148,8 @@ func TestService_CheckRestart(t *testing.T) { func TestService_Connect_proxy_settings(t *testing.T) { testutil.Parallel(t) - job := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + job := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} service := &Service{ Connect: &ConsulConnect{ @@ -183,8 +183,8 @@ func TestService_Tags(t *testing.T) { r := require.New(t) // canonicalize does not modify eto or tags - job := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + job := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} service := &Service{ Tags: []string{"a", "b"}, diff --git a/api/tasks.go b/api/tasks.go index 6cdb44da3a3..c8a1c0b3f1b 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -64,6 +64,24 @@ type AllocResourceUsage struct { Timestamp int64 } +// AllocCheckStatus contains the current status of a nomad service discovery check. +type AllocCheckStatus struct { + ID string + Check string + Group string + Mode string + Output string + Service string + Task string + Status string + StatusCode int + Timestamp int64 +} + +// AllocCheckStatuses holds the set of nomad service discovery checks within +// the allocation (including group and task level service checks). +type AllocCheckStatuses map[string]AllocCheckStatus + // RestartPolicy defines how the Nomad client restarts // tasks in a taskgroup when they fail type RestartPolicy struct { @@ -170,13 +188,13 @@ func NewAffinity(lTarget string, operand string, rTarget string, weight int8) *A LTarget: lTarget, RTarget: rTarget, Operand: operand, - Weight: int8ToPtr(weight), + Weight: pointerOf(int8(weight)), } } func (a *Affinity) Canonicalize() { if a.Weight == nil { - a.Weight = int8ToPtr(50) + a.Weight = pointerOf(int8(50)) } } @@ -187,35 +205,35 @@ func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy { // This needs to be in sync with DefaultServiceJobReschedulePolicy // in nomad/structs/structs.go dp = &ReschedulePolicy{ - Delay: timeToPtr(30 * time.Second), - DelayFunction: stringToPtr("exponential"), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Delay: pointerOf(30 * time.Second), + DelayFunction: pointerOf("exponential"), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), - Attempts: intToPtr(0), - Interval: timeToPtr(0), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), } case "batch": // This needs to be in sync with DefaultBatchJobReschedulePolicy // in nomad/structs/structs.go dp = &ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(24 * time.Hour), - Delay: timeToPtr(5 * time.Second), - DelayFunction: stringToPtr("constant"), + Attempts: pointerOf(1), + Interval: pointerOf(24 * time.Hour), + Delay: pointerOf(5 * time.Second), + DelayFunction: pointerOf("constant"), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), } case "system": dp = &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - DelayFunction: stringToPtr(""), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(time.Duration(0)), + DelayFunction: pointerOf(""), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), } default: @@ -223,12 +241,12 @@ func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy { // function and we need to ensure a non-nil object is returned so that // the canonicalization runs without panicking. dp = &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - DelayFunction: stringToPtr(""), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(time.Duration(0)), + DelayFunction: pointerOf(""), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), } } return dp @@ -276,14 +294,14 @@ func NewSpreadTarget(value string, percent uint8) *SpreadTarget { func NewSpread(attribute string, weight int8, spreadTargets []*SpreadTarget) *Spread { return &Spread{ Attribute: attribute, - Weight: int8ToPtr(weight), + Weight: pointerOf(int8(weight)), SpreadTarget: spreadTargets, } } func (s *Spread) Canonicalize() { if s.Weight == nil { - s.Weight = int8ToPtr(50) + s.Weight = pointerOf(int8(50)) } } @@ -296,21 +314,21 @@ type EphemeralDisk struct { func DefaultEphemeralDisk() *EphemeralDisk { return &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), } } func (e *EphemeralDisk) Canonicalize() { if e.Sticky == nil { - e.Sticky = boolToPtr(false) + e.Sticky = pointerOf(false) } if e.Migrate == nil { - e.Migrate = boolToPtr(false) + e.Migrate = pointerOf(false) } if e.SizeMB == nil { - e.SizeMB = intToPtr(300) + e.SizeMB = pointerOf(300) } } @@ -325,10 +343,10 @@ type MigrateStrategy struct { func DefaultMigrateStrategy() *MigrateStrategy { return &MigrateStrategy{ - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), } } @@ -405,10 +423,10 @@ type VolumeMount struct { func (vm *VolumeMount) Canonicalize() { if vm.PropagationMode == nil { - vm.PropagationMode = stringToPtr(VolumeMountPropagationPrivate) + vm.PropagationMode = pointerOf(VolumeMountPropagationPrivate) } if vm.ReadOnly == nil { - vm.ReadOnly = boolToPtr(false) + vm.ReadOnly = pointerOf(false) } } @@ -439,22 +457,22 @@ type TaskGroup struct { // NewTaskGroup creates a new TaskGroup. func NewTaskGroup(name string, count int) *TaskGroup { return &TaskGroup{ - Name: stringToPtr(name), - Count: intToPtr(count), + Name: pointerOf(name), + Count: pointerOf(count), } } // Canonicalize sets defaults and merges settings that should be inherited from the job func (g *TaskGroup) Canonicalize(job *Job) { if g.Name == nil { - g.Name = stringToPtr("") + g.Name = pointerOf("") } if g.Count == nil { if g.Scaling != nil && g.Scaling.Min != nil { - g.Count = intToPtr(int(*g.Scaling.Min)) + g.Count = pointerOf(int(*g.Scaling.Min)) } else { - g.Count = intToPtr(1) + g.Count = pointerOf(1) } } if g.Scaling != nil { @@ -558,10 +576,10 @@ func (g *TaskGroup) Canonicalize(job *Job) { // in nomad/structs/structs.go func defaultServiceJobRestartPolicy() *RestartPolicy { return &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr(RestartPolicyModeFail), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf(RestartPolicyModeFail), } } @@ -569,10 +587,10 @@ func defaultServiceJobRestartPolicy() *RestartPolicy { // in nomad/structs/structs.go func defaultBatchJobRestartPolicy() *RestartPolicy { return &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(3), - Interval: timeToPtr(24 * time.Hour), - Mode: stringToPtr(RestartPolicyModeFail), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(3), + Interval: pointerOf(24 * time.Hour), + Mode: pointerOf(RestartPolicyModeFail), } } @@ -623,17 +641,17 @@ type LogConfig struct { func DefaultLogConfig() *LogConfig { return &LogConfig{ - MaxFiles: intToPtr(10), - MaxFileSizeMB: intToPtr(10), + MaxFiles: pointerOf(10), + MaxFileSizeMB: pointerOf(10), } } func (l *LogConfig) Canonicalize() { if l.MaxFiles == nil { - l.MaxFiles = intToPtr(10) + l.MaxFiles = pointerOf(10) } if l.MaxFileSizeMB == nil { - l.MaxFileSizeMB = intToPtr(10) + l.MaxFileSizeMB = pointerOf(10) } } @@ -694,7 +712,7 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { t.Resources.Canonicalize() if t.KillTimeout == nil { - t.KillTimeout = timeToPtr(5 * time.Second) + t.KillTimeout = pointerOf(5 * time.Second) } if t.LogConfig == nil { t.LogConfig = DefaultLogConfig() @@ -746,11 +764,11 @@ type TaskArtifact struct { func (a *TaskArtifact) Canonicalize() { if a.GetterMode == nil { - a.GetterMode = stringToPtr("any") + a.GetterMode = pointerOf("any") } if a.GetterSource == nil { // Shouldn't be possible, but we don't want to panic - a.GetterSource = stringToPtr("") + a.GetterSource = pointerOf("") } if len(a.GetterOptions) == 0 { a.GetterOptions = nil @@ -768,7 +786,7 @@ func (a *TaskArtifact) Canonicalize() { a.RelativeDest = &dest default: // Default to a directory - a.RelativeDest = stringToPtr("local/") + a.RelativeDest = pointerOf("local/") } } } @@ -791,14 +809,39 @@ func (wc *WaitConfig) Copy() *WaitConfig { return nwc } +type ChangeScript struct { + Command *string `mapstructure:"command" hcl:"command"` + Args []string `mapstructure:"args" hcl:"args,optional"` + Timeout *time.Duration `mapstructure:"timeout" hcl:"timeout,optional"` + FailOnError *bool `mapstructure:"fail_on_error" hcl:"fail_on_error"` +} + +func (ch *ChangeScript) Canonicalize() { + if ch.Command == nil { + ch.Command = pointerOf("") + } + if ch.Args == nil { + ch.Args = []string{} + } + if ch.Timeout == nil { + ch.Timeout = pointerOf(5 * time.Second) + } + if ch.FailOnError == nil { + ch.FailOnError = pointerOf(false) + } +} + type Template struct { SourcePath *string `mapstructure:"source" hcl:"source,optional"` DestPath *string `mapstructure:"destination" hcl:"destination,optional"` EmbeddedTmpl *string `mapstructure:"data" hcl:"data,optional"` ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` + ChangeScript *ChangeScript `mapstructure:"change_script" hcl:"change_script,block"` ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` Splay *time.Duration `mapstructure:"splay" hcl:"splay,optional"` Perms *string `mapstructure:"perms" hcl:"perms,optional"` + Uid *int `mapstructure:"uid" hcl:"uid,optional"` + Gid *int `mapstructure:"gid" hcl:"gid,optional"` LeftDelim *string `mapstructure:"left_delimiter" hcl:"left_delimiter,optional"` RightDelim *string `mapstructure:"right_delimiter" hcl:"right_delimiter,optional"` Envvars *bool `mapstructure:"env" hcl:"env,optional"` @@ -808,46 +851,49 @@ type Template struct { func (tmpl *Template) Canonicalize() { if tmpl.SourcePath == nil { - tmpl.SourcePath = stringToPtr("") + tmpl.SourcePath = pointerOf("") } if tmpl.DestPath == nil { - tmpl.DestPath = stringToPtr("") + tmpl.DestPath = pointerOf("") } if tmpl.EmbeddedTmpl == nil { - tmpl.EmbeddedTmpl = stringToPtr("") + tmpl.EmbeddedTmpl = pointerOf("") } if tmpl.ChangeMode == nil { - tmpl.ChangeMode = stringToPtr("restart") + tmpl.ChangeMode = pointerOf("restart") } if tmpl.ChangeSignal == nil { if *tmpl.ChangeMode == "signal" { - tmpl.ChangeSignal = stringToPtr("SIGHUP") + tmpl.ChangeSignal = pointerOf("SIGHUP") } else { - tmpl.ChangeSignal = stringToPtr("") + tmpl.ChangeSignal = pointerOf("") } } else { sig := *tmpl.ChangeSignal - tmpl.ChangeSignal = stringToPtr(strings.ToUpper(sig)) + tmpl.ChangeSignal = pointerOf(strings.ToUpper(sig)) + } + if tmpl.ChangeScript != nil { + tmpl.ChangeScript.Canonicalize() } if tmpl.Splay == nil { - tmpl.Splay = timeToPtr(5 * time.Second) + tmpl.Splay = pointerOf(5 * time.Second) } if tmpl.Perms == nil { - tmpl.Perms = stringToPtr("0644") + tmpl.Perms = pointerOf("0644") } if tmpl.LeftDelim == nil { - tmpl.LeftDelim = stringToPtr("{{") + tmpl.LeftDelim = pointerOf("{{") } if tmpl.RightDelim == nil { - tmpl.RightDelim = stringToPtr("}}") + tmpl.RightDelim = pointerOf("}}") } if tmpl.Envvars == nil { - tmpl.Envvars = boolToPtr(false) + tmpl.Envvars = pointerOf(false) } //COMPAT(0.12) VaultGrace is deprecated and unused as of Vault 0.5 if tmpl.VaultGrace == nil { - tmpl.VaultGrace = timeToPtr(0) + tmpl.VaultGrace = pointerOf(time.Duration(0)) } } @@ -861,16 +907,16 @@ type Vault struct { func (v *Vault) Canonicalize() { if v.Env == nil { - v.Env = boolToPtr(true) + v.Env = pointerOf(true) } if v.Namespace == nil { - v.Namespace = stringToPtr("") + v.Namespace = pointerOf("") } if v.ChangeMode == nil { - v.ChangeMode = stringToPtr("restart") + v.ChangeMode = pointerOf("restart") } if v.ChangeSignal == nil { - v.ChangeSignal = stringToPtr("SIGHUP") + v.ChangeSignal = pointerOf("SIGHUP") } } @@ -1032,14 +1078,17 @@ type TaskCSIPluginConfig struct { // CSIPluginType instructs Nomad on how to handle processing a plugin Type CSIPluginType `mapstructure:"type" hcl:"type,optional"` - // MountDir is the destination that nomad should mount in its CSI - // directory for the plugin. It will then expect a file called CSISocketName - // to be created by the plugin, and will provide references into - // "MountDir/CSIIntermediaryDirname/VolumeName/AllocID for mounts. - // - // Default is /csi. + // MountDir is the directory (within its container) in which the plugin creates a + // socket (called CSISocketName) for communication with Nomad. Default is /csi. MountDir string `mapstructure:"mount_dir" hcl:"mount_dir,optional"` + // StagePublishBaseDir is the base directory (within its container) in which the plugin + // mounts volumes being staged and bind mounts volumes being published. + // e.g. staging_target_path = {StagePublishBaseDir}/staging/{volume-id}/{usage-mode} + // e.g. target_path = {StagePublishBaseDir}/per-alloc/{alloc-id}/{volume-id}/{usage-mode} + // Default is /local/csi. + StagePublishBaseDir string `mapstructure:"stage_publish_base_dir" hcl:"stage_publish_base_dir,optional"` + // HealthTimeout is the time after which the CSI plugin tasks will be killed // if the CSI Plugin is not healthy. HealthTimeout time.Duration `mapstructure:"health_timeout" hcl:"health_timeout,optional"` @@ -1050,6 +1099,10 @@ func (t *TaskCSIPluginConfig) Canonicalize() { t.MountDir = "/csi" } + if t.StagePublishBaseDir == "" { + t.StagePublishBaseDir = filepath.Join("/local", "csi") + } + if t.HealthTimeout == 0 { t.HealthTimeout = 30 * time.Second } diff --git a/api/tasks_test.go b/api/tasks_test.go index 02e20506a6e..b0312263d0e 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -15,8 +15,8 @@ func TestTaskGroup_NewTaskGroup(t *testing.T) { testutil.Parallel(t) grp := NewTaskGroup("grp1", 2) expect := &TaskGroup{ - Name: stringToPtr("grp1"), - Count: intToPtr(2), + Name: pointerOf("grp1"), + Count: pointerOf(2), } if !reflect.DeepEqual(grp, expect) { t.Fatalf("expect: %#v, got: %#v", expect, grp) @@ -79,13 +79,13 @@ func TestTaskGroup_AddAffinity(t *testing.T) { LTarget: "kernel.version", RTarget: "4.6", Operand: "=", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), }, { LTarget: "${node.affinity}", RTarget: "dc2", Operand: "=", - Weight: int8ToPtr(50), + Weight: pointerOf(int8(50)), }, } if !reflect.DeepEqual(grp.Affinities, expect) { @@ -143,7 +143,7 @@ func TestTaskGroup_AddSpread(t *testing.T) { expect := []*Spread{ { Attribute: "${meta.rack}", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), SpreadTarget: []*SpreadTarget{ { Value: "r1", @@ -153,7 +153,7 @@ func TestTaskGroup_AddSpread(t *testing.T) { }, { Attribute: "${node.datacenter}", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), SpreadTarget: []*SpreadTarget{ { Value: "dc1", @@ -263,13 +263,13 @@ func TestTask_Require(t *testing.T) { // Create some require resources resources := &Resources{ - CPU: intToPtr(1250), - MemoryMB: intToPtr(128), - DiskMB: intToPtr(2048), + CPU: pointerOf(1250), + MemoryMB: pointerOf(128), + DiskMB: pointerOf(2048), Networks: []*NetworkResource{ { CIDR: "0.0.0.0/0", - MBits: intToPtr(100), + MBits: pointerOf(100), ReservedPorts: []Port{{"", 80, 0, ""}, {"", 443, 0, ""}}, }, }, @@ -340,13 +340,13 @@ func TestTask_AddAffinity(t *testing.T) { LTarget: "kernel.version", RTarget: "4.6", Operand: "=", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), }, { LTarget: "${node.datacenter}", RTarget: "dc2", Operand: "=", - Weight: int8ToPtr(50), + Weight: pointerOf(int8(50)), }, } if !reflect.DeepEqual(task.Affinities, expect) { @@ -357,8 +357,8 @@ func TestTask_AddAffinity(t *testing.T) { func TestTask_Artifact(t *testing.T) { testutil.Parallel(t) a := TaskArtifact{ - GetterSource: stringToPtr("http://localhost/foo.txt"), - GetterMode: stringToPtr("file"), + GetterSource: pointerOf("http://localhost/foo.txt"), + GetterMode: pointerOf("file"), GetterHeaders: make(map[string]string), GetterOptions: make(map[string]string), } @@ -396,10 +396,10 @@ func TestTask_Canonicalize_TaskLifecycle(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), } j := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), } tc.task.Canonicalize(tg, j) require.Equal(t, tc.expected, tc.task.Lifecycle) @@ -429,16 +429,16 @@ func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) { { name: "all-fields", task: taskWithWait(&WaitConfig{ - Min: timeToPtr(5), - Max: timeToPtr(10), + Min: pointerOf(time.Duration(5)), + Max: pointerOf(time.Duration(10)), }), canonicalized: &WaitConfig{ - Min: timeToPtr(5), - Max: timeToPtr(10), + Min: pointerOf(time.Duration(5)), + Max: pointerOf(time.Duration(10)), }, copied: &WaitConfig{ - Min: timeToPtr(5), - Max: timeToPtr(10), + Min: pointerOf(time.Duration(5)), + Max: pointerOf(time.Duration(10)), }, }, { @@ -456,25 +456,25 @@ func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) { { name: "min-only", task: taskWithWait(&WaitConfig{ - Min: timeToPtr(5), + Min: pointerOf(time.Duration(5)), }), canonicalized: &WaitConfig{ - Min: timeToPtr(5), + Min: pointerOf(time.Duration(5)), }, copied: &WaitConfig{ - Min: timeToPtr(5), + Min: pointerOf(time.Duration(5)), }, }, { name: "max-only", task: taskWithWait(&WaitConfig{ - Max: timeToPtr(10), + Max: pointerOf(time.Duration(10)), }), canonicalized: &WaitConfig{ - Max: timeToPtr(10), + Max: pointerOf(time.Duration(10)), }, copied: &WaitConfig{ - Max: timeToPtr(10), + Max: pointerOf(time.Duration(10)), }, }, } @@ -482,10 +482,10 @@ func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), } j := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), } require.Equal(t, tc.copied, tc.task.Templates[0].Wait.Copy()) tc.task.Canonicalize(tg, j) @@ -504,10 +504,10 @@ func TestTask_Canonicalize_Vault(t *testing.T) { name: "empty", input: &Vault{}, expected: &Vault{ - Env: boolToPtr(true), - Namespace: stringToPtr(""), - ChangeMode: stringToPtr("restart"), - ChangeSignal: stringToPtr("SIGHUP"), + Env: pointerOf(true), + Namespace: pointerOf(""), + ChangeMode: pointerOf("restart"), + ChangeSignal: pointerOf("SIGHUP"), }, }, } @@ -525,22 +525,22 @@ func TestTaskGroup_Canonicalize_Update(t *testing.T) { testutil.Parallel(t) // Job with an Empty() Update job := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), Update: &UpdateStrategy{ - AutoRevert: boolToPtr(false), - AutoPromote: boolToPtr(false), - Canary: intToPtr(0), - HealthCheck: stringToPtr(""), - HealthyDeadline: timeToPtr(0), - ProgressDeadline: timeToPtr(0), - MaxParallel: intToPtr(0), - MinHealthyTime: timeToPtr(0), - Stagger: timeToPtr(0), + AutoRevert: pointerOf(false), + AutoPromote: pointerOf(false), + Canary: pointerOf(0), + HealthCheck: pointerOf(""), + HealthyDeadline: pointerOf(time.Duration(0)), + ProgressDeadline: pointerOf(time.Duration(0)), + MaxParallel: pointerOf(0), + MinHealthyTime: pointerOf(time.Duration(0)), + Stagger: pointerOf(time.Duration(0)), }, } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), } tg.Canonicalize(job) assert.NotNil(t, job.Update) @@ -552,15 +552,15 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { require := require.New(t) job := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), Count: nil, Scaling: &ScalingPolicy{ Min: nil, - Max: int64ToPtr(10), + Max: pointerOf(int64(10)), Policy: nil, Enabled: nil, CreateIndex: 0, @@ -578,7 +578,7 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { // count == nil => count = Scaling.Min tg.Count = nil - tg.Scaling.Min = int64ToPtr(5) + tg.Scaling.Min = pointerOf(int64(5)) tg.Canonicalize(job) require.NotNil(tg.Count) require.NotNil(tg.Scaling.Min) @@ -586,7 +586,7 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { require.EqualValues(*tg.Count, *tg.Scaling.Min) // Scaling.Min == nil => Scaling.Min == count - tg.Count = intToPtr(5) + tg.Count = pointerOf(5) tg.Scaling.Min = nil tg.Canonicalize(job) require.NotNil(tg.Count) @@ -595,8 +595,8 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { require.EqualValues(*tg.Scaling.Min, *tg.Count) // both present, both persisted - tg.Count = intToPtr(5) - tg.Scaling.Min = int64ToPtr(1) + tg.Count = pointerOf(5) + tg.Scaling.Min = pointerOf(int64(1)) tg.Canonicalize(job) require.NotNil(tg.Count) require.NotNil(tg.Scaling.Min) @@ -607,32 +607,32 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { func TestTaskGroup_Merge_Update(t *testing.T) { testutil.Parallel(t) job := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), Update: &UpdateStrategy{}, } job.Canonicalize() // Merge and canonicalize part of an update stanza tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), Update: &UpdateStrategy{ - AutoRevert: boolToPtr(true), - Canary: intToPtr(5), - HealthCheck: stringToPtr("foo"), + AutoRevert: pointerOf(true), + Canary: pointerOf(5), + HealthCheck: pointerOf("foo"), }, } tg.Canonicalize(job) require.Equal(t, &UpdateStrategy{ - AutoRevert: boolToPtr(true), - AutoPromote: boolToPtr(false), - Canary: intToPtr(5), - HealthCheck: stringToPtr("foo"), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - MaxParallel: intToPtr(1), - MinHealthyTime: timeToPtr(10 * time.Second), - Stagger: timeToPtr(30 * time.Second), + AutoRevert: pointerOf(true), + AutoPromote: pointerOf(false), + Canary: pointerOf(5), + HealthCheck: pointerOf("foo"), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + MaxParallel: pointerOf(1), + MinHealthyTime: pointerOf(10 * time.Second), + Stagger: pointerOf(30 * time.Second), }, tg.Update) } @@ -661,44 +661,44 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { jobMigrate: nil, taskMigrate: nil, expected: &MigrateStrategy{ - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), }, }, { desc: "Empty job migrate strategy", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(0), - HealthCheck: stringToPtr(""), - MinHealthyTime: timeToPtr(0), - HealthyDeadline: timeToPtr(0), + MaxParallel: pointerOf(0), + HealthCheck: pointerOf(""), + MinHealthyTime: pointerOf(time.Duration(0)), + HealthyDeadline: pointerOf(time.Duration(0)), }, taskMigrate: nil, expected: &MigrateStrategy{ - MaxParallel: intToPtr(0), - HealthCheck: stringToPtr(""), - MinHealthyTime: timeToPtr(0), - HealthyDeadline: timeToPtr(0), + MaxParallel: pointerOf(0), + HealthCheck: pointerOf(""), + MinHealthyTime: pointerOf(time.Duration(0)), + HealthyDeadline: pointerOf(time.Duration(0)), }, }, { desc: "Inherit from job", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(3), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(3), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, taskMigrate: nil, expected: &MigrateStrategy{ - MaxParallel: intToPtr(3), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(3), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, }, { @@ -706,67 +706,67 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { jobType: "service", jobMigrate: nil, taskMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(3), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(3), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, expected: &MigrateStrategy{ - MaxParallel: intToPtr(3), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(3), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, }, { desc: "Merge from job", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(11), + MaxParallel: pointerOf(11), }, taskMigrate: &MigrateStrategy{ - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, expected: &MigrateStrategy{ - MaxParallel: intToPtr(11), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(11), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, }, { desc: "Override from group", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(11), + MaxParallel: pointerOf(11), }, taskMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(5), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(5), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, expected: &MigrateStrategy{ - MaxParallel: intToPtr(5), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(5), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, }, { desc: "Parallel from job, defaulting", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(5), + MaxParallel: pointerOf(5), }, taskMigrate: nil, expected: &MigrateStrategy{ - MaxParallel: intToPtr(5), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), + MaxParallel: pointerOf(5), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), }, }, } @@ -774,13 +774,13 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { job := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), Migrate: tc.jobMigrate, - Type: stringToPtr(tc.jobType), + Type: pointerOf(tc.jobType), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), Migrate: tc.taskMigrate, } tg.Canonicalize(job) @@ -793,12 +793,12 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { func TestSpread_Canonicalize(t *testing.T) { testutil.Parallel(t) job := &Job{ - ID: stringToPtr("test"), - Type: stringToPtr("batch"), + ID: pointerOf("test"), + Type: pointerOf("batch"), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), } type testCase struct { desc string @@ -818,7 +818,7 @@ func TestSpread_Canonicalize(t *testing.T) { "Zero spread", &Spread{ Attribute: "test", - Weight: int8ToPtr(0), + Weight: pointerOf(int8(0)), }, 0, }, @@ -826,7 +826,7 @@ func TestSpread_Canonicalize(t *testing.T) { "Non Zero spread", &Spread{ Attribute: "test", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), }, 100, }, @@ -854,48 +854,48 @@ func Test_NewDefaultReschedulePolicy(t *testing.T) { desc: "service job type", inputJobType: "service", expected: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(30 * time.Second), - DelayFunction: stringToPtr("exponential"), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(30 * time.Second), + DelayFunction: pointerOf("exponential"), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, }, { desc: "batch job type", inputJobType: "batch", expected: &ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(24 * time.Hour), - Delay: timeToPtr(5 * time.Second), - DelayFunction: stringToPtr("constant"), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(1), + Interval: pointerOf(24 * time.Hour), + Delay: pointerOf(5 * time.Second), + DelayFunction: pointerOf("constant"), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), }, }, { desc: "system job type", inputJobType: "system", expected: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - DelayFunction: stringToPtr(""), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(time.Duration(0)), + DelayFunction: pointerOf(""), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), }, }, { desc: "unrecognised job type", inputJobType: "unrecognised", expected: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - DelayFunction: stringToPtr(""), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(time.Duration(0)), + DelayFunction: pointerOf(""), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), }, }, } @@ -912,13 +912,13 @@ func TestTaskGroup_Canonicalize_Consul(t *testing.T) { testutil.Parallel(t) t.Run("override job consul in group", func(t *testing.T) { job := &Job{ - ID: stringToPtr("job"), - ConsulNamespace: stringToPtr("ns1"), + ID: pointerOf("job"), + ConsulNamespace: pointerOf("ns1"), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("group"), + Name: pointerOf("group"), Consul: &Consul{Namespace: "ns2"}, } tg.Canonicalize(job) @@ -929,13 +929,13 @@ func TestTaskGroup_Canonicalize_Consul(t *testing.T) { t.Run("inherit job consul in group", func(t *testing.T) { job := &Job{ - ID: stringToPtr("job"), - ConsulNamespace: stringToPtr("ns1"), + ID: pointerOf("job"), + ConsulNamespace: pointerOf("ns1"), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("group"), + Name: pointerOf("group"), Consul: nil, // not set, inherit from job } tg.Canonicalize(job) @@ -946,13 +946,13 @@ func TestTaskGroup_Canonicalize_Consul(t *testing.T) { t.Run("set in group only", func(t *testing.T) { job := &Job{ - ID: stringToPtr("job"), + ID: pointerOf("job"), ConsulNamespace: nil, } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("group"), + Name: pointerOf("group"), Consul: &Consul{Namespace: "ns2"}, } tg.Canonicalize(job) diff --git a/api/util_test.go b/api/util_test.go index 7e5f2e1b575..efaecdf498e 100644 --- a/api/util_test.go +++ b/api/util_test.go @@ -27,18 +27,18 @@ func testJob() *Job { task := NewTask("task1", "raw_exec"). SetConfig("command", "/bin/sleep"). Require(&Resources{ - CPU: intToPtr(100), - MemoryMB: intToPtr(256), + CPU: pointerOf(100), + MemoryMB: pointerOf(256), }). SetLogConfig(&LogConfig{ - MaxFiles: intToPtr(1), - MaxFileSizeMB: intToPtr(2), + MaxFiles: pointerOf(1), + MaxFileSizeMB: pointerOf(2), }) group := NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job := NewBatchJob("job1", "redis", "global", 1). @@ -52,18 +52,18 @@ func testJobWithScalingPolicy() *Job { job := testJob() job.TaskGroups[0].Scaling = &ScalingPolicy{ Policy: map[string]interface{}{}, - Min: int64ToPtr(1), - Max: int64ToPtr(5), - Enabled: boolToPtr(true), + Min: pointerOf(int64(1)), + Max: pointerOf(int64(5)), + Enabled: pointerOf(true), } return job } func testPeriodicJob() *Job { job := testJob().AddPeriodicConfig(&PeriodicConfig{ - Enabled: boolToPtr(true), - Spec: stringToPtr("*/30 * * * *"), - SpecType: stringToPtr("cron"), + Enabled: pointerOf(true), + Spec: pointerOf("*/30 * * * *"), + SpecType: pointerOf("cron"), }) return job } @@ -109,8 +109,8 @@ func testQuotaSpec() *QuotaSpec { { Region: "global", RegionLimit: &Resources{ - CPU: intToPtr(2000), - MemoryMB: intToPtr(2000), + CPU: pointerOf(2000), + MemoryMB: pointerOf(2000), }, }, }, diff --git a/api/utils.go b/api/utils.go index 9e54306f6b6..a8e1c02e456 100644 --- a/api/utils.go +++ b/api/utils.go @@ -3,44 +3,8 @@ package api import ( "strconv" "strings" - "time" ) -// boolToPtr returns the pointer to a boolean -func boolToPtr(b bool) *bool { - return &b -} - -// int8ToPtr returns the pointer to an int8 -func int8ToPtr(i int8) *int8 { - return &i -} - -// intToPtr returns the pointer to an int -func intToPtr(i int) *int { - return &i -} - -// uint64ToPtr returns the pointer to an uint64 -func uint64ToPtr(u uint64) *uint64 { - return &u -} - -// int64ToPtr returns the pointer to a int64 -func int64ToPtr(i int64) *int64 { - return &i -} - -// stringToPtr returns the pointer to a string -func stringToPtr(str string) *string { - return &str -} - -// timeToPtr returns the pointer to a time stamp -func timeToPtr(t time.Duration) *time.Duration { - return &t -} - // formatFloat converts the floating-point number f to a string, // after rounding it to the passed unit. // @@ -61,3 +25,8 @@ func formatFloat(f float64, maxPrec int) string { return v[:sublen] } + +// pointerOf returns a pointer to a. +func pointerOf[A any](a A) *A { + return &a +} diff --git a/api/utils_test.go b/api/utils_test.go index 7e0d789bda5..969b1562bb4 100644 --- a/api/utils_test.go +++ b/api/utils_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/api/internal/testutil" + "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -39,3 +40,14 @@ func TestFormatRoundedFloat(t *testing.T) { require.Equal(t, c.expected, formatFloat(c.input, 3)) } } + +func Test_PointerOf(t *testing.T) { + s := "hello" + sPtr := pointerOf(s) + + must.Eq(t, s, *sPtr) + + b := "bye" + sPtr = &b + must.NotEq(t, s, *sPtr) +} diff --git a/client/acl.go b/client/acl.go index 43d994bc4dd..bf666fbaa05 100644 --- a/client/acl.go +++ b/client/acl.go @@ -81,7 +81,7 @@ func (c *Client) ResolveSecretToken(secretID string) (*structs.ACLToken, error) func (c *Client) resolveTokenAndACL(secretID string) (*acl.ACL, *structs.ACLToken, error) { // Fast-path if ACLs are disabled - if !c.config.ACLEnabled { + if !c.GetConfig().ACLEnabled { return nil, nil, nil } defer metrics.MeasureSince([]string{"client", "acl", "resolve_token"}, time.Now()) @@ -127,7 +127,7 @@ func (c *Client) resolveTokenValue(secretID string) (*structs.ACLToken, error) { raw, ok := c.tokenCache.Get(secretID) if ok { cached := raw.(*cachedACLValue) - if cached.Age() <= c.config.ACLTokenTTL { + if cached.Age() <= c.GetConfig().ACLTokenTTL { return cached.Token, nil } } @@ -179,7 +179,7 @@ func (c *Client) resolvePolicies(secretID string, policies []string) ([]*structs // Check if the cached value is valid or expired cached := raw.(*cachedACLValue) - if cached.Age() <= c.config.ACLPolicyTTL { + if cached.Age() <= c.GetConfig().ACLPolicyTTL { out = append(out, cached.Policy) } else { expired = append(expired, cached.Policy) diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index 12993c4985c..6508fbebe1c 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/command/agent/host" "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/command/agent/pprof" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" metrics "github.com/armon/go-metrics" @@ -42,7 +42,7 @@ func (a *Agent) Profile(args *structs.AgentPprofRequest, reply *structs.AgentPpr } // If ACLs are disabled, EnableDebug must be enabled - if aclObj == nil && !a.c.config.EnableDebug { + if aclObj == nil && !a.c.GetConfig().EnableDebug { return structs.ErrPermissionDenied } @@ -89,16 +89,16 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } // Check acl if aclObj, err := a.c.ResolveToken(args.AuthToken); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(403), encoder) + handleStreamResultError(err, pointer.Of(int64(403)), encoder) return } else if aclObj != nil && !aclObj.AllowAgentRead() { - handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) + handleStreamResultError(structs.ErrPermissionDenied, pointer.Of(int64(403)), encoder) return } @@ -108,7 +108,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { } if logLevel == log.NoLevel { - handleStreamResultError(errors.New("Unknown log level"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(errors.New("Unknown log level"), pointer.Of(int64(400)), encoder) return } @@ -206,7 +206,7 @@ OUTER: } if streamErr != nil { - handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) return } } @@ -218,7 +218,7 @@ func (a *Agent) Host(args *structs.HostDataRequest, reply *structs.HostDataRespo return err } if (aclObj != nil && !aclObj.AllowAgentRead()) || - (aclObj == nil && !a.c.config.EnableDebug) { + (aclObj == nil && !a.c.GetConfig().EnableDebug) { return structs.ErrPermissionDenied } diff --git a/client/alloc_endpoint.go b/client/alloc_endpoint.go index a0bfc3920e9..52ab8f414e6 100644 --- a/client/alloc_endpoint.go +++ b/client/alloc_endpoint.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" nstructs "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" @@ -102,7 +102,7 @@ func (a *Allocations) Restart(args *nstructs.AllocRestartRequest, reply *nstruct return nstructs.ErrPermissionDenied } - return a.c.RestartAllocation(args.AllocID, args.TaskName) + return a.c.RestartAllocation(args.AllocID, args.TaskName, args.AllTasks) } // Stats is used to collect allocation statistics @@ -183,7 +183,7 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e // Decode the arguments var req cstructs.AllocExecRequest if err := decoder.Decode(&req); err != nil { - return helper.Int64ToPtr(500), err + return pointer.Of(int64(500)), err } if a.c.GetConfig().DisableRemoteExec { @@ -191,13 +191,13 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e } if req.AllocID == "" { - return helper.Int64ToPtr(400), allocIDNotPresentErr + return pointer.Of(int64(400)), allocIDNotPresentErr } ar, err := a.c.getAllocRunner(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if nstructs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } return code, err @@ -232,17 +232,17 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e // Validate the arguments if req.Task == "" { - return helper.Int64ToPtr(400), taskNotPresentErr + return pointer.Of(int64(400)), taskNotPresentErr } if len(req.Cmd) == 0 { - return helper.Int64ToPtr(400), errors.New("command is not present") + return pointer.Of(int64(400)), errors.New("command is not present") } capabilities, err := ar.GetTaskDriverCapabilities(req.Task) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if nstructs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } return code, err @@ -258,9 +258,9 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e allocState, err := a.c.GetAllocState(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if nstructs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } return code, err @@ -269,11 +269,11 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e // Check that the task is there taskState := allocState.TaskStates[req.Task] if taskState == nil { - return helper.Int64ToPtr(400), fmt.Errorf("unknown task name %q", req.Task) + return pointer.Of(int64(400)), fmt.Errorf("unknown task name %q", req.Task) } if taskState.StartedAt.IsZero() { - return helper.Int64ToPtr(404), fmt.Errorf("task %q not started yet.", req.Task) + return pointer.Of(int64(404)), fmt.Errorf("task %q not started yet.", req.Task) } ctx, cancel := context.WithCancel(context.Background()) @@ -281,12 +281,12 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e h := ar.GetTaskExecHandler(req.Task) if h == nil { - return helper.Int64ToPtr(404), fmt.Errorf("task %q is not running.", req.Task) + return pointer.Of(int64(404)), fmt.Errorf("task %q is not running.", req.Task) } err = h(ctx, req.Cmd, req.Tty, newExecStream(decoder, encoder)) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) return code, err } diff --git a/client/alloc_endpoint_test.go b/client/alloc_endpoint_test.go index c8f2560d02b..f3c6e3e2bfd 100644 --- a/client/alloc_endpoint_test.go +++ b/client/alloc_endpoint_test.go @@ -68,6 +68,45 @@ func TestAllocations_Restart(t *testing.T) { }) } +func TestAllocations_RestartAllTasks(t *testing.T) { + ci.Parallel(t) + + require := require.New(t) + client, cleanup := TestClient(t, nil) + defer cleanup() + + alloc := mock.LifecycleAlloc() + require.Nil(client.addAlloc(alloc, "")) + + // Can't restart all tasks while specifying a task name. + req := &nstructs.AllocRestartRequest{ + AllocID: alloc.ID, + AllTasks: true, + TaskName: "web", + } + var resp nstructs.GenericResponse + err := client.ClientRPC("Allocations.Restart", &req, &resp) + require.Error(err) + + // Good request. + req = &nstructs.AllocRestartRequest{ + AllocID: alloc.ID, + AllTasks: true, + } + + testutil.WaitForResult(func() (bool, error) { + var resp2 nstructs.GenericResponse + err := client.ClientRPC("Allocations.Restart", &req, &resp2) + if err != nil && strings.Contains(err.Error(), "not running") { + return false, err + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %v", err) + }) +} + func TestAllocations_Restart_ACL(t *testing.T) { ci.Parallel(t) require := require.New(t) diff --git a/client/allocdir/alloc_dir_test.go b/client/allocdir/alloc_dir_test.go index b2467483ebb..e8871b4a672 100644 --- a/client/allocdir/alloc_dir_test.go +++ b/client/allocdir/alloc_dir_test.go @@ -82,8 +82,9 @@ func TestAllocDir_BuildAlloc(t *testing.T) { } // HACK: This function is copy/pasted from client.testutil to prevent a test -// import cycle, due to testutil transitively importing allocdir. This -// should be fixed after DriverManager is implemented. +// +// import cycle, due to testutil transitively importing allocdir. This +// should be fixed after DriverManager is implemented. func MountCompatible(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Windows does not support mount") diff --git a/client/allochealth/tracker.go b/client/allochealth/tracker.go index 87820d9ed67..1de1cb12fc0 100644 --- a/client/allochealth/tracker.go +++ b/client/allochealth/tracker.go @@ -258,8 +258,9 @@ func (t *Tracker) setTaskHealth(healthy, terminal bool) { // returns true if health is propagated and no more health monitoring is needed // // todo: this is currently being shared by watchConsulEvents and watchNomadEvents, -// and must be split up if/when we support registering services (and thus checks) -// of different providers. +// +// and must be split up if/when we support registering services (and thus checks) +// of different providers. func (t *Tracker) setCheckHealth(healthy bool) bool { t.lock.Lock() defer t.lock.Unlock() diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index 624c3145e9b..1d50c2595bb 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/allocrunner/state" + "github.com/hashicorp/nomad/client/allocrunner/tasklifecycle" "github.com/hashicorp/nomad/client/allocrunner/taskrunner" "github.com/hashicorp/nomad/client/allocwatcher" "github.com/hashicorp/nomad/client/config" @@ -27,8 +28,7 @@ import ( cstate "github.com/hashicorp/nomad/client/state" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/client/vaultclient" - agentconsul "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/device" "github.com/hashicorp/nomad/plugins/drivers" @@ -172,7 +172,9 @@ type allocRunner struct { // restore. serversContactedCh chan struct{} - taskHookCoordinator *taskHookCoordinator + // taskCoordinator is used to controlled when tasks are allowed to run + // depending on their lifecycle configuration. + taskCoordinator *tasklifecycle.Coordinator shutdownDelayCtx context.Context shutdownDelayCancelFn context.CancelFunc @@ -247,7 +249,7 @@ func NewAllocRunner(config *Config) (*allocRunner, error) { // Create alloc dir ar.allocDir = allocdir.NewAllocDir(ar.logger, config.ClientConfig.AllocDir, alloc.ID) - ar.taskHookCoordinator = newTaskHookCoordinator(ar.logger, tg.Tasks) + ar.taskCoordinator = tasklifecycle.NewCoordinator(ar.logger, tg.Tasks, ar.waitCh) shutdownDelayCtx, shutdownDelayCancel := context.WithCancel(context.Background()) ar.shutdownDelayCtx = shutdownDelayCtx @@ -270,27 +272,27 @@ func NewAllocRunner(config *Config) (*allocRunner, error) { func (ar *allocRunner) initTaskRunners(tasks []*structs.Task) error { for _, task := range tasks { trConfig := &taskrunner.Config{ - Alloc: ar.alloc, - ClientConfig: ar.clientConfig, - Task: task, - TaskDir: ar.allocDir.NewTaskDir(task.Name), - Logger: ar.logger, - StateDB: ar.stateDB, - StateUpdater: ar, - DynamicRegistry: ar.dynamicRegistry, - Consul: ar.consulClient, - ConsulProxies: ar.consulProxiesClient, - ConsulSI: ar.sidsClient, - Vault: ar.vaultClient, - DeviceStatsReporter: ar.deviceStatsReporter, - CSIManager: ar.csiManager, - DeviceManager: ar.devicemanager, - DriverManager: ar.driverManager, - ServersContactedCh: ar.serversContactedCh, - StartConditionMetCtx: ar.taskHookCoordinator.startConditionForTask(task), - ShutdownDelayCtx: ar.shutdownDelayCtx, - ServiceRegWrapper: ar.serviceRegWrapper, - Getter: ar.getter, + Alloc: ar.alloc, + ClientConfig: ar.clientConfig, + Task: task, + TaskDir: ar.allocDir.NewTaskDir(task.Name), + Logger: ar.logger, + StateDB: ar.stateDB, + StateUpdater: ar, + DynamicRegistry: ar.dynamicRegistry, + Consul: ar.consulClient, + ConsulProxies: ar.consulProxiesClient, + ConsulSI: ar.sidsClient, + Vault: ar.vaultClient, + DeviceStatsReporter: ar.deviceStatsReporter, + CSIManager: ar.csiManager, + DeviceManager: ar.devicemanager, + DriverManager: ar.driverManager, + ServersContactedCh: ar.serversContactedCh, + StartConditionMetCh: ar.taskCoordinator.StartConditionForTask(task), + ShutdownDelayCtx: ar.shutdownDelayCtx, + ServiceRegWrapper: ar.serviceRegWrapper, + Getter: ar.getter, } if ar.cpusetManager != nil { @@ -388,26 +390,12 @@ func (ar *allocRunner) shouldRun() bool { // runTasks is used to run the task runners and block until they exit. func (ar *allocRunner) runTasks() { - // Start all tasks + // Start and wait for all tasks. for _, task := range ar.tasks { go task.Run() } - - // Block on all tasks except poststop tasks for _, task := range ar.tasks { - if !task.IsPoststopTask() { - <-task.WaitCh() - } - } - - // Signal poststop tasks to proceed to main runtime - ar.taskHookCoordinator.StartPoststopTasks() - - // Wait for poststop tasks to finish before proceeding - for _, task := range ar.tasks { - if task.IsPoststopTask() { - <-task.WaitCh() - } + <-task.WaitCh() } } @@ -461,7 +449,7 @@ func (ar *allocRunner) Restore() error { states[tr.Task().Name] = tr.TaskState() } - ar.taskHookCoordinator.taskStateUpdated(states) + ar.taskCoordinator.Restore(states) return nil } @@ -558,45 +546,69 @@ func (ar *allocRunner) handleTaskStateUpdates() { } } - // if all live runners are sidecars - kill alloc - if killEvent == nil && hasSidecars && !hasNonSidecarTasks(liveRunners) { - killEvent = structs.NewTaskEvent(structs.TaskMainDead) - } - - // If there's a kill event set and live runners, kill them - if killEvent != nil && len(liveRunners) > 0 { - - // Log kill reason - switch killEvent.Type { - case structs.TaskLeaderDead: - ar.logger.Debug("leader task dead, destroying all tasks", "leader_task", killTask) - case structs.TaskMainDead: - ar.logger.Debug("main tasks dead, destroying all sidecar tasks") - default: - ar.logger.Debug("task failure, destroying all tasks", "failed_task", killTask) + if len(liveRunners) > 0 { + // if all live runners are sidecars - kill alloc + onlySidecarsRemaining := hasSidecars && !hasNonSidecarTasks(liveRunners) + if killEvent == nil && onlySidecarsRemaining { + killEvent = structs.NewTaskEvent(structs.TaskMainDead) } - // Emit kill event for live runners - for _, tr := range liveRunners { - tr.EmitEvent(killEvent) - } + // If there's a kill event set and live runners, kill them + if killEvent != nil { + + // Log kill reason + switch killEvent.Type { + case structs.TaskLeaderDead: + ar.logger.Debug("leader task dead, destroying all tasks", "leader_task", killTask) + case structs.TaskMainDead: + ar.logger.Debug("main tasks dead, destroying all sidecar tasks") + default: + ar.logger.Debug("task failure, destroying all tasks", "failed_task", killTask) + } + + // Emit kill event for live runners + for _, tr := range liveRunners { + tr.EmitEvent(killEvent) + } - // Kill 'em all - states = ar.killTasks() + // Kill 'em all + states = ar.killTasks() + + // Wait for TaskRunners to exit before continuing. This will + // prevent looping before TaskRunners have transitioned to + // Dead. + for _, tr := range liveRunners { + ar.logger.Info("waiting for task to exit", "task", tr.Task().Name) + select { + case <-tr.WaitCh(): + case <-ar.waitCh: + } + } + } + } else { + // If there are no live runners left kill all non-poststop task + // runners to unblock them from the alloc restart loop. + for _, tr := range ar.tasks { + if tr.IsPoststopTask() { + continue + } - // Wait for TaskRunners to exit before continuing to - // prevent looping before TaskRunners have transitioned - // to Dead. - for _, tr := range liveRunners { - ar.logger.Info("killing task", "task", tr.Task().Name) select { case <-tr.WaitCh(): case <-ar.waitCh: + default: + // Kill task runner without setting an event because the + // task is already dead, it's just waiting in the alloc + // restart loop. + err := tr.Kill(context.TODO(), nil) + if err != nil { + ar.logger.Warn("failed to kill task", "task", tr.Task().Name, "error", err) + } } } } - ar.taskHookCoordinator.taskStateUpdated(states) + ar.taskCoordinator.TaskStateUpdated(states) // Get the client allocation calloc := ar.clientAlloc(states) @@ -609,6 +621,28 @@ func (ar *allocRunner) handleTaskStateUpdates() { } } +// hasNonSidecarTasks returns false if all the passed tasks are sidecar tasks +func hasNonSidecarTasks(tasks []*taskrunner.TaskRunner) bool { + for _, tr := range tasks { + if !tr.IsSidecarTask() { + return true + } + } + + return false +} + +// hasSidecarTasks returns true if any of the passed tasks are sidecar tasks +func hasSidecarTasks(tasks map[string]*taskrunner.TaskRunner) bool { + for _, tr := range tasks { + if tr.IsSidecarTask() { + return true + } + } + + return false +} + // killTasks kills all task runners, leader (if there is one) first. Errors are // logged except taskrunner.ErrTaskNotRunning which is ignored. Task states // after Kill has been called are returned. @@ -637,7 +671,7 @@ func (ar *allocRunner) killTasks() map[string]*structs.TaskState { break } - // Kill the rest non-sidecar or poststop tasks concurrently + // Kill the rest non-sidecar and non-poststop tasks concurrently wg := sync.WaitGroup{} for name, tr := range ar.tasks { // Filter out poststop and sidecar tasks so that they stop after all the other tasks are killed @@ -727,7 +761,7 @@ func (ar *allocRunner) clientAlloc(taskStates map[string]*structs.TaskState) *st if a.ClientStatus == structs.AllocClientStatusFailed && alloc.DeploymentID != "" && !a.DeploymentStatus.HasHealth() { a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } } @@ -1194,19 +1228,37 @@ func (ar *allocRunner) GetTaskEventHandler(taskName string) drivermanager.EventH return nil } -// RestartTask signalls the task runner for the provided task to restart. -func (ar *allocRunner) RestartTask(taskName string, taskEvent *structs.TaskEvent) error { +// Restart satisfies the WorkloadRestarter interface and restarts all tasks +// that are currently running. +func (ar *allocRunner) Restart(ctx context.Context, event *structs.TaskEvent, failure bool) error { + return ar.restartTasks(ctx, event, failure, false) +} + +// RestartTask restarts the provided task. +func (ar *allocRunner) RestartTask(taskName string, event *structs.TaskEvent) error { tr, ok := ar.tasks[taskName] if !ok { return fmt.Errorf("Could not find task runner for task: %s", taskName) } - return tr.Restart(context.TODO(), taskEvent, false) + return tr.Restart(context.TODO(), event, false) } -// Restart satisfies the WorkloadRestarter interface restarts all task runners -// concurrently -func (ar *allocRunner) Restart(ctx context.Context, event *structs.TaskEvent, failure bool) error { +// RestartRunning restarts all tasks that are currently running. +func (ar *allocRunner) RestartRunning(event *structs.TaskEvent) error { + return ar.restartTasks(context.TODO(), event, false, false) +} + +// RestartAll restarts all tasks in the allocation, including dead ones. They +// will restart following their lifecycle order. +func (ar *allocRunner) RestartAll(event *structs.TaskEvent) error { + // Restart the taskCoordinator to allow dead tasks to run again. + ar.taskCoordinator.Restart() + return ar.restartTasks(context.TODO(), event, false, true) +} + +// restartTasks restarts all task runners concurrently. +func (ar *allocRunner) restartTasks(ctx context.Context, event *structs.TaskEvent, failure bool, force bool) error { waitCh := make(chan struct{}) var err *multierror.Error var errMutex sync.Mutex @@ -1219,10 +1271,19 @@ func (ar *allocRunner) Restart(ctx context.Context, event *structs.TaskEvent, fa defer close(waitCh) for tn, tr := range ar.tasks { wg.Add(1) - go func(taskName string, r agentconsul.WorkloadRestarter) { + go func(taskName string, taskRunner *taskrunner.TaskRunner) { defer wg.Done() - e := r.Restart(ctx, event, failure) - if e != nil { + + var e error + if force { + e = taskRunner.ForceRestart(ctx, event.Copy(), failure) + } else { + e = taskRunner.Restart(ctx, event.Copy(), failure) + } + + // Ignore ErrTaskNotRunning errors since tasks that are not + // running are expected to not be restarted. + if e != nil && e != taskrunner.ErrTaskNotRunning { errMutex.Lock() defer errMutex.Unlock() err = multierror.Append(err, fmt.Errorf("failed to restart task %s: %v", taskName, e)) @@ -1240,25 +1301,6 @@ func (ar *allocRunner) Restart(ctx context.Context, event *structs.TaskEvent, fa return err.ErrorOrNil() } -// RestartAll signalls all task runners in the allocation to restart and passes -// a copy of the task event to each restart event. -// Returns any errors in a concatenated form. -func (ar *allocRunner) RestartAll(taskEvent *structs.TaskEvent) error { - var err *multierror.Error - - // run alloc task restart hooks - ar.taskRestartHooks() - - for tn := range ar.tasks { - rerr := ar.RestartTask(tn, taskEvent.Copy()) - if rerr != nil { - err = multierror.Append(err, rerr) - } - } - - return err.ErrorOrNil() -} - // Signal sends a signal request to task runners inside an allocation. If the // taskName is empty, then it is sent to all tasks. func (ar *allocRunner) Signal(taskName, signal string) error { diff --git a/client/allocrunner/alloc_runner_test.go b/client/allocrunner/alloc_runner_test.go index 1ff8a59125f..90b94657b3d 100644 --- a/client/allocrunner/alloc_runner_test.go +++ b/client/allocrunner/alloc_runner_test.go @@ -10,8 +10,11 @@ import ( "time" "github.com/hashicorp/consul/api" + multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allochealth" + "github.com/hashicorp/nomad/client/allocrunner/tasklifecycle" + "github.com/hashicorp/nomad/client/allocrunner/taskrunner" "github.com/hashicorp/nomad/client/allocwatcher" "github.com/hashicorp/nomad/client/serviceregistration" regMock "github.com/hashicorp/nomad/client/serviceregistration/mock" @@ -481,6 +484,464 @@ func TestAllocRunner_Lifecycle_Poststop(t *testing.T) { } +func TestAllocRunner_Lifecycle_Restart(t *testing.T) { + ci.Parallel(t) + + // test cases can use this default or override w/ taskDefs param + alloc := mock.LifecycleAllocFromTasks([]mock.LifecycleTaskDef{ + {Name: "main", RunFor: "100s", ExitCode: 0, Hook: "", IsSidecar: false}, + {Name: "prestart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "prestart", IsSidecar: false}, + {Name: "prestart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "prestart", IsSidecar: true}, + {Name: "poststart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "poststart", IsSidecar: false}, + {Name: "poststart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "poststart", IsSidecar: true}, + {Name: "poststop", RunFor: "1s", ExitCode: 0, Hook: "poststop", IsSidecar: false}, + }) + alloc.Job.Type = structs.JobTypeService + rp := &structs.RestartPolicy{ + Attempts: 1, + Interval: 10 * time.Minute, + Delay: 1 * time.Nanosecond, + Mode: structs.RestartPolicyModeFail, + } + + ev := &structs.TaskEvent{Type: structs.TaskRestartSignal} + + testCases := []struct { + name string + taskDefs []mock.LifecycleTaskDef + isBatch bool + hasLeader bool + action func(*allocRunner, *structs.Allocation) error + expectedErr string + expectedAfter map[string]structs.TaskState + }{ + { + name: "restart entire allocation", + action: func(ar *allocRunner, alloc *structs.Allocation) error { + return ar.RestartAll(ev) + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "running", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 1}, + "prestart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 1}, + "poststart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststop": structs.TaskState{State: "pending", Restarts: 0}, + }, + }, + { + name: "restart only running tasks", + action: func(ar *allocRunner, alloc *structs.Allocation) error { + return ar.RestartRunning(ev) + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "running", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststop": structs.TaskState{State: "pending", Restarts: 0}, + }, + }, + { + name: "batch job restart entire allocation", + taskDefs: []mock.LifecycleTaskDef{ + {Name: "main", RunFor: "100s", ExitCode: 1, Hook: "", IsSidecar: false}, + {Name: "prestart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "prestart", IsSidecar: false}, + {Name: "prestart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "prestart", IsSidecar: true}, + {Name: "poststart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "poststart", IsSidecar: false}, + {Name: "poststart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "poststart", IsSidecar: true}, + {Name: "poststop", RunFor: "1s", ExitCode: 0, Hook: "poststop", IsSidecar: false}, + }, + isBatch: true, + action: func(ar *allocRunner, alloc *structs.Allocation) error { + return ar.RestartAll(ev) + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "running", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 1}, + "prestart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 1}, + "poststart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststop": structs.TaskState{State: "pending", Restarts: 0}, + }, + }, + { + name: "batch job restart only running tasks ", + taskDefs: []mock.LifecycleTaskDef{ + {Name: "main", RunFor: "100s", ExitCode: 1, Hook: "", IsSidecar: false}, + {Name: "prestart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "prestart", IsSidecar: false}, + {Name: "prestart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "prestart", IsSidecar: true}, + {Name: "poststart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "poststart", IsSidecar: false}, + {Name: "poststart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "poststart", IsSidecar: true}, + {Name: "poststop", RunFor: "1s", ExitCode: 0, Hook: "poststop", IsSidecar: false}, + }, + isBatch: true, + action: func(ar *allocRunner, alloc *structs.Allocation) error { + return ar.RestartRunning(ev) + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "running", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststop": structs.TaskState{State: "pending", Restarts: 0}, + }, + }, + { + name: "restart entire allocation with leader", + hasLeader: true, + action: func(ar *allocRunner, alloc *structs.Allocation) error { + return ar.RestartAll(ev) + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "running", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 1}, + "prestart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 1}, + "poststart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststop": structs.TaskState{State: "pending", Restarts: 0}, + }, + }, + { + name: "stop from server", + action: func(ar *allocRunner, alloc *structs.Allocation) error { + stopAlloc := alloc.Copy() + stopAlloc.DesiredStatus = structs.AllocDesiredStatusStop + ar.Update(stopAlloc) + return nil + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststop": structs.TaskState{State: "dead", Restarts: 0}, + }, + }, + { + name: "restart main task", + action: func(ar *allocRunner, alloc *structs.Allocation) error { + return ar.RestartTask("main", ev) + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "running", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "running", Restarts: 0}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "running", Restarts: 0}, + "poststop": structs.TaskState{State: "pending", Restarts: 0}, + }, + }, + { + name: "restart leader main task", + hasLeader: true, + action: func(ar *allocRunner, alloc *structs.Allocation) error { + return ar.RestartTask("main", ev) + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "running", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "running", Restarts: 0}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "running", Restarts: 0}, + "poststop": structs.TaskState{State: "pending", Restarts: 0}, + }, + }, + { + name: "main task fails and restarts once", + taskDefs: []mock.LifecycleTaskDef{ + {Name: "main", RunFor: "2s", ExitCode: 1, Hook: "", IsSidecar: false}, + {Name: "prestart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "prestart", IsSidecar: false}, + {Name: "prestart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "prestart", IsSidecar: true}, + {Name: "poststart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "poststart", IsSidecar: false}, + {Name: "poststart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "poststart", IsSidecar: true}, + {Name: "poststop", RunFor: "1s", ExitCode: 0, Hook: "poststop", IsSidecar: false}, + }, + action: func(ar *allocRunner, alloc *structs.Allocation) error { + time.Sleep(3 * time.Second) // make sure main task has exited + return nil + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "dead", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststop": structs.TaskState{State: "dead", Restarts: 0}, + }, + }, + { + name: "leader main task fails and restarts once", + taskDefs: []mock.LifecycleTaskDef{ + {Name: "main", RunFor: "2s", ExitCode: 1, Hook: "", IsSidecar: false}, + {Name: "prestart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "prestart", IsSidecar: false}, + {Name: "prestart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "prestart", IsSidecar: true}, + {Name: "poststart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "poststart", IsSidecar: false}, + {Name: "poststart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "poststart", IsSidecar: true}, + {Name: "poststop", RunFor: "1s", ExitCode: 0, Hook: "poststop", IsSidecar: false}, + }, + hasLeader: true, + action: func(ar *allocRunner, alloc *structs.Allocation) error { + time.Sleep(3 * time.Second) // make sure main task has exited + return nil + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "dead", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststop": structs.TaskState{State: "dead", Restarts: 0}, + }, + }, + { + name: "main stopped unexpectedly and restarts once", + taskDefs: []mock.LifecycleTaskDef{ + {Name: "main", RunFor: "2s", ExitCode: 0, Hook: "", IsSidecar: false}, + {Name: "prestart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "prestart", IsSidecar: false}, + {Name: "prestart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "prestart", IsSidecar: true}, + {Name: "poststart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "poststart", IsSidecar: false}, + {Name: "poststart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "poststart", IsSidecar: true}, + {Name: "poststop", RunFor: "1s", ExitCode: 0, Hook: "poststop", IsSidecar: false}, + }, + action: func(ar *allocRunner, alloc *structs.Allocation) error { + time.Sleep(3 * time.Second) // make sure main task has exited + return nil + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "dead", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststop": structs.TaskState{State: "dead", Restarts: 0}, + }, + }, + { + name: "leader main stopped unexpectedly and restarts once", + taskDefs: []mock.LifecycleTaskDef{ + {Name: "main", RunFor: "2s", ExitCode: 0, Hook: "", IsSidecar: false}, + {Name: "prestart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "prestart", IsSidecar: false}, + {Name: "prestart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "prestart", IsSidecar: true}, + {Name: "poststart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "poststart", IsSidecar: false}, + {Name: "poststart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "poststart", IsSidecar: true}, + {Name: "poststop", RunFor: "1s", ExitCode: 0, Hook: "poststop", IsSidecar: false}, + }, + action: func(ar *allocRunner, alloc *structs.Allocation) error { + time.Sleep(3 * time.Second) // make sure main task has exited + return nil + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "dead", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststop": structs.TaskState{State: "dead", Restarts: 0}, + }, + }, + { + name: "failed main task cannot be restarted", + taskDefs: []mock.LifecycleTaskDef{ + {Name: "main", RunFor: "2s", ExitCode: 1, Hook: "", IsSidecar: false}, + {Name: "prestart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "prestart", IsSidecar: false}, + {Name: "prestart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "prestart", IsSidecar: true}, + {Name: "poststart-oneshot", RunFor: "1s", ExitCode: 0, Hook: "poststart", IsSidecar: false}, + {Name: "poststart-sidecar", RunFor: "100s", ExitCode: 0, Hook: "poststart", IsSidecar: true}, + {Name: "poststop", RunFor: "1s", ExitCode: 0, Hook: "poststop", IsSidecar: false}, + }, + action: func(ar *allocRunner, alloc *structs.Allocation) error { + // make sure main task has had a chance to restart once on its + // own and fail again before we try to manually restart it + time.Sleep(5 * time.Second) + return ar.RestartTask("main", ev) + }, + expectedErr: "Task not running", + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "dead", Restarts: 1}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "dead", Restarts: 0}, + "poststop": structs.TaskState{State: "dead", Restarts: 0}, + }, + }, + { + name: "restart prestart-sidecar task", + action: func(ar *allocRunner, alloc *structs.Allocation) error { + return ar.RestartTask("prestart-sidecar", ev) + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "running", Restarts: 0}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "running", Restarts: 0}, + "poststop": structs.TaskState{State: "pending", Restarts: 0}, + }, + }, + { + name: "restart poststart-sidecar task", + action: func(ar *allocRunner, alloc *structs.Allocation) error { + return ar.RestartTask("poststart-sidecar", ev) + }, + expectedAfter: map[string]structs.TaskState{ + "main": structs.TaskState{State: "running", Restarts: 0}, + "prestart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "prestart-sidecar": structs.TaskState{State: "running", Restarts: 0}, + "poststart-oneshot": structs.TaskState{State: "dead", Restarts: 0}, + "poststart-sidecar": structs.TaskState{State: "running", Restarts: 1}, + "poststop": structs.TaskState{State: "pending", Restarts: 0}, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + ci.Parallel(t) + + alloc := alloc.Copy() + alloc.Job.TaskGroups[0].RestartPolicy = rp + if tc.taskDefs != nil { + alloc = mock.LifecycleAllocFromTasks(tc.taskDefs) + alloc.Job.Type = structs.JobTypeService + } + for _, task := range alloc.Job.TaskGroups[0].Tasks { + task.RestartPolicy = rp // tasks inherit the group policy + } + if tc.hasLeader { + for _, task := range alloc.Job.TaskGroups[0].Tasks { + if task.Name == "main" { + task.Leader = true + } + } + } + if tc.isBatch { + alloc.Job.Type = structs.JobTypeBatch + } + + conf, cleanup := testAllocRunnerConfig(t, alloc) + defer cleanup() + ar, err := NewAllocRunner(conf) + require.NoError(t, err) + defer destroy(ar) + go ar.Run() + + upd := conf.StateUpdater.(*MockStateUpdater) + + // assert our "before" states: + // - all one-shot tasks should be dead but not failed + // - all main tasks and sidecars should be running + // - no tasks should have restarted + testutil.WaitForResult(func() (bool, error) { + last := upd.Last() + if last == nil { + return false, fmt.Errorf("no update") + } + if last.ClientStatus != structs.AllocClientStatusRunning { + return false, fmt.Errorf( + "expected alloc to be running not %s", last.ClientStatus) + } + var errs *multierror.Error + + expectedBefore := map[string]string{ + "main": "running", + "prestart-oneshot": "dead", + "prestart-sidecar": "running", + "poststart-oneshot": "dead", + "poststart-sidecar": "running", + "poststop": "pending", + } + + for task, expected := range expectedBefore { + got, ok := last.TaskStates[task] + if !ok { + continue + } + if got.State != expected { + errs = multierror.Append(errs, fmt.Errorf( + "expected initial state of task %q to be %q not %q", + task, expected, got.State)) + } + if got.Restarts != 0 { + errs = multierror.Append(errs, fmt.Errorf( + "expected no initial restarts of task %q, not %q", + task, got.Restarts)) + } + if expected == "dead" && got.Failed { + errs = multierror.Append(errs, fmt.Errorf( + "expected ephemeral task %q to be dead but not failed", + task)) + } + + } + if errs.ErrorOrNil() != nil { + return false, errs.ErrorOrNil() + } + return true, nil + }, func(err error) { + require.NoError(t, err, "error waiting for initial state") + }) + + // perform the action + err = tc.action(ar, alloc.Copy()) + if tc.expectedErr != "" { + require.EqualError(t, err, tc.expectedErr) + } else { + require.NoError(t, err) + } + + // assert our "after" states + testutil.WaitForResult(func() (bool, error) { + last := upd.Last() + if last == nil { + return false, fmt.Errorf("no update") + } + var errs *multierror.Error + for task, expected := range tc.expectedAfter { + got, ok := last.TaskStates[task] + if !ok { + errs = multierror.Append(errs, fmt.Errorf( + "no final state found for task %q", task, + )) + } + if got.State != expected.State { + errs = multierror.Append(errs, fmt.Errorf( + "expected final state of task %q to be %q not %q", + task, expected.State, got.State)) + } + if expected.State == "dead" { + if got.FinishedAt.IsZero() || got.StartedAt.IsZero() { + errs = multierror.Append(errs, fmt.Errorf( + "expected final state of task %q to have start and finish time", task)) + } + if len(got.Events) < 2 { + errs = multierror.Append(errs, fmt.Errorf( + "expected final state of task %q to include at least 2 tasks", task)) + } + } + + if got.Restarts != expected.Restarts { + errs = multierror.Append(errs, fmt.Errorf( + "expected final restarts of task %q to be %v not %v", + task, expected.Restarts, got.Restarts)) + } + } + if errs.ErrorOrNil() != nil { + return false, errs.ErrorOrNil() + } + return true, nil + }, func(err error) { + require.NoError(t, err, "error waiting for final state") + }) + }) + } +} + func TestAllocRunner_TaskGroup_ShutdownDelay(t *testing.T) { ci.Parallel(t) @@ -803,28 +1264,44 @@ func TestAllocRunner_Restore_LifecycleHooks(t *testing.T) { ar, err := NewAllocRunner(conf) require.NoError(t, err) - // We should see all tasks with Prestart hooks are not blocked from running: - // i.e. the "init" and "side" task hook coordinator channels are closed - require.Truef(t, isChannelClosed(ar.taskHookCoordinator.startConditionForTask(ar.tasks["init"].Task())), "init channel was open, should be closed") - require.Truef(t, isChannelClosed(ar.taskHookCoordinator.startConditionForTask(ar.tasks["side"].Task())), "side channel was open, should be closed") + go ar.Run() + defer destroy(ar) + + // Wait for the coordinator to transition from the "init" state. + tasklifecycle.WaitNotInitUntil(ar.taskCoordinator, time.Second, func() { + t.Fatalf("task coordinator didn't transition from init in time") + }) - isChannelClosed(ar.taskHookCoordinator.startConditionForTask(ar.tasks["side"].Task())) + // We should see all tasks with Prestart hooks are not blocked from running. + tasklifecycle.RequireTaskAllowed(t, ar.taskCoordinator, ar.tasks["init"].Task()) + tasklifecycle.RequireTaskAllowed(t, ar.taskCoordinator, ar.tasks["side"].Task()) + tasklifecycle.RequireTaskBlocked(t, ar.taskCoordinator, ar.tasks["web"].Task()) + tasklifecycle.RequireTaskBlocked(t, ar.taskCoordinator, ar.tasks["poststart"].Task()) - // Mimic client dies while init task running, and client restarts after init task finished + // Mimic client dies while init task running, and client restarts after + // init task finished and web is running. ar.tasks["init"].UpdateState(structs.TaskStateDead, structs.NewTaskEvent(structs.TaskTerminated)) ar.tasks["side"].UpdateState(structs.TaskStateRunning, structs.NewTaskEvent(structs.TaskStarted)) + ar.tasks["web"].UpdateState(structs.TaskStateRunning, structs.NewTaskEvent(structs.TaskStarted)) - // Create a new AllocRunner to test RestoreState and Run + // Create a new AllocRunner to test Restore and Run. ar2, err := NewAllocRunner(conf) require.NoError(t, err) + require.NoError(t, ar2.Restore()) - if err := ar2.Restore(); err != nil { - t.Fatalf("error restoring state: %v", err) - } + go ar2.Run() + defer destroy(ar2) - // We want to see Restore resume execution with correct hook ordering: - // i.e. we should see the "web" main task hook coordinator channel is closed - require.Truef(t, isChannelClosed(ar2.taskHookCoordinator.startConditionForTask(ar.tasks["web"].Task())), "web channel was open, should be closed") + // Wait for the coordinator to transition from the "init" state. + tasklifecycle.WaitNotInitUntil(ar.taskCoordinator, time.Second, func() { + t.Fatalf("task coordinator didn't transition from init in time") + }) + + // Restore resumes execution with correct lifecycle ordering. + tasklifecycle.RequireTaskBlocked(t, ar2.taskCoordinator, ar2.tasks["init"].Task()) + tasklifecycle.RequireTaskAllowed(t, ar2.taskCoordinator, ar2.tasks["side"].Task()) + tasklifecycle.RequireTaskAllowed(t, ar2.taskCoordinator, ar2.tasks["web"].Task()) + tasklifecycle.RequireTaskAllowed(t, ar2.taskCoordinator, ar2.tasks["poststart"].Task()) } func TestAllocRunner_Update_Semantics(t *testing.T) { @@ -1195,7 +1672,7 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) { ar.Run() defer destroy(ar) - require.Equal(t, structs.AllocClientStatusComplete, ar.AllocState().ClientStatus) + WaitForClientState(t, ar, structs.AllocClientStatusComplete) // Step 2. Modify its directory task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1223,7 +1700,7 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) { ar2.Run() defer destroy(ar2) - require.Equal(t, structs.AllocClientStatusComplete, ar2.AllocState().ClientStatus) + WaitForClientState(t, ar, structs.AllocClientStatusComplete) // Ensure that data from ar was moved to ar2 dataFile = filepath.Join(ar2.allocDir.SharedDir, "data", "data_file") @@ -1811,3 +2288,113 @@ func TestAllocRunner_Lifecycle_Shutdown_Order(t *testing.T) { last = upd.Last() require.Less(t, last.TaskStates[sidecarTask.Name].FinishedAt, last.TaskStates[poststopTask.Name].FinishedAt) } + +func TestHasSidecarTasks(t *testing.T) { + ci.Parallel(t) + + testCases := []struct { + name string + lifecycle []*structs.TaskLifecycleConfig + hasSidecars bool + hasNonsidecars bool + }{ + { + name: "all sidecar - one", + lifecycle: []*structs.TaskLifecycleConfig{ + { + Hook: structs.TaskLifecycleHookPrestart, + Sidecar: true, + }, + }, + hasSidecars: true, + hasNonsidecars: false, + }, + { + name: "all sidecar - multiple", + lifecycle: []*structs.TaskLifecycleConfig{ + { + Hook: structs.TaskLifecycleHookPrestart, + Sidecar: true, + }, + { + Hook: structs.TaskLifecycleHookPrestart, + Sidecar: true, + }, + { + Hook: structs.TaskLifecycleHookPrestart, + Sidecar: true, + }, + }, + hasSidecars: true, + hasNonsidecars: false, + }, + { + name: "some sidecars, some others", + lifecycle: []*structs.TaskLifecycleConfig{ + nil, + { + Hook: structs.TaskLifecycleHookPrestart, + Sidecar: false, + }, + { + Hook: structs.TaskLifecycleHookPrestart, + Sidecar: true, + }, + }, + hasSidecars: true, + hasNonsidecars: true, + }, + { + name: "no sidecars", + lifecycle: []*structs.TaskLifecycleConfig{ + nil, + { + Hook: structs.TaskLifecycleHookPrestart, + Sidecar: false, + }, + nil, + }, + hasSidecars: false, + hasNonsidecars: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create alloc with the given task lifecycle configurations. + alloc := mock.BatchAlloc() + + tasks := []*structs.Task{} + resources := map[string]*structs.AllocatedTaskResources{} + + tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] + + for i, lifecycle := range tc.lifecycle { + task := alloc.Job.TaskGroups[0].Tasks[0].Copy() + task.Name = fmt.Sprintf("task%d", i) + task.Lifecycle = lifecycle + tasks = append(tasks, task) + resources[task.Name] = tr + } + + alloc.Job.TaskGroups[0].Tasks = tasks + alloc.AllocatedResources.Tasks = resources + + // Create alloc runner. + arConf, cleanup := testAllocRunnerConfig(t, alloc) + defer cleanup() + + ar, err := NewAllocRunner(arConf) + require.NoError(t, err) + + require.Equal(t, tc.hasSidecars, hasSidecarTasks(ar.tasks), "sidecars") + + runners := []*taskrunner.TaskRunner{} + for _, r := range ar.tasks { + runners = append(runners, r) + } + require.Equal(t, tc.hasNonsidecars, hasNonSidecarTasks(runners), "non-sidecars") + + }) + } +} diff --git a/client/allocrunner/alloc_runner_unix_test.go b/client/allocrunner/alloc_runner_unix_test.go index ab3c777dd20..0859569101e 100644 --- a/client/allocrunner/alloc_runner_unix_test.go +++ b/client/allocrunner/alloc_runner_unix_test.go @@ -207,18 +207,18 @@ func TestAllocRunner_Restore_CompletedBatch(t *testing.T) { go ar2.Run() defer destroy(ar2) - // AR waitCh must be closed even when task doesn't run again + // AR waitCh must be open as the task waits for a possible alloc restart. select { case <-ar2.WaitCh(): - case <-time.After(10 * time.Second): - require.Fail(t, "alloc.waitCh wasn't closed") + require.Fail(t, "alloc.waitCh was closed") + default: } - // TR waitCh must be closed too! + // TR waitCh must be open too! select { case <-ar2.tasks[task.Name].WaitCh(): - case <-time.After(10 * time.Second): - require.Fail(t, "tr.waitCh wasn't closed") + require.Fail(t, "tr.waitCh was closed") + default: } // Assert that events are unmodified, which they would if task re-run diff --git a/client/allocrunner/csi_hook_test.go b/client/allocrunner/csi_hook_test.go index 21d3fc91d7d..bb5362b9537 100644 --- a/client/allocrunner/csi_hook_test.go +++ b/client/allocrunner/csi_hook_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/nomad/client/pluginmanager" "github.com/hashicorp/nomad/client/pluginmanager/csimanager" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -193,8 +193,8 @@ func TestCSIHook(t *testing.T) { rpcer := mockRPCer{ alloc: alloc, callCounts: callCounts, - hasExistingClaim: helper.BoolToPtr(tc.startsWithClaims), - schedulable: helper.BoolToPtr(!tc.startsUnschedulable), + hasExistingClaim: pointer.Of(tc.startsWithClaims), + schedulable: pointer.Of(!tc.startsUnschedulable), } ar := mockAllocRunner{ res: &cstructs.AllocHookResources{}, @@ -298,8 +298,8 @@ func TestCSIHook_claimVolumesFromAlloc_Validation(t *testing.T) { rpcer := mockRPCer{ alloc: alloc, callCounts: callCounts, - hasExistingClaim: helper.BoolToPtr(false), - schedulable: helper.BoolToPtr(true), + hasExistingClaim: pointer.Of(false), + schedulable: pointer.Of(true), } ar := mockAllocRunner{ diff --git a/client/allocrunner/group_service_hook_test.go b/client/allocrunner/group_service_hook_test.go index 822ae04d72f..e05df8cbc19 100644 --- a/client/allocrunner/group_service_hook_test.go +++ b/client/allocrunner/group_service_hook_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/client/serviceregistration/wrapper" "github.com/hashicorp/nomad/client/taskenv" agentconsul "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -74,7 +74,7 @@ func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) { ci.Parallel(t) alloc := mock.Alloc() - alloc.Job.TaskGroups[0].ShutdownDelay = helper.TimeToPtr(10 * time.Second) + alloc.Job.TaskGroups[0].ShutdownDelay = pointer.Of(10 * time.Second) logger := testlog.HCLogger(t) consulMockClient := regMock.NewServiceRegistrationHandler(logger) @@ -95,7 +95,7 @@ func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) { require.NoError(t, h.Prerun()) // Incease shutdown Delay - alloc.Job.TaskGroups[0].ShutdownDelay = helper.TimeToPtr(15 * time.Second) + alloc.Job.TaskGroups[0].ShutdownDelay = pointer.Of(15 * time.Second) req := &interfaces.RunnerUpdateRequest{Alloc: alloc} require.NoError(t, h.Update(req)) diff --git a/client/allocrunner/task_hook_coordinator.go b/client/allocrunner/task_hook_coordinator.go deleted file mode 100644 index a056fa3eb83..00000000000 --- a/client/allocrunner/task_hook_coordinator.go +++ /dev/null @@ -1,199 +0,0 @@ -package allocrunner - -import ( - "context" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/client/allocrunner/taskrunner" - "github.com/hashicorp/nomad/nomad/structs" -) - -// TaskHookCoordinator helps coordinate when mainTasks start tasks can launch -// namely after all Prestart Tasks have run, and after all BlockUntilCompleted have completed -type taskHookCoordinator struct { - logger hclog.Logger - - // constant for quickly starting all prestart tasks - closedCh chan struct{} - - // Each context is used to gate task runners launching the tasks. A task - // runner waits until the context associated its lifecycle context is - // done/cancelled. - mainTaskCtx context.Context - mainTaskCtxCancel func() - - poststartTaskCtx context.Context - poststartTaskCtxCancel func() - poststopTaskCtx context.Context - poststopTaskCtxCancel context.CancelFunc - - prestartSidecar map[string]struct{} - prestartEphemeral map[string]struct{} - mainTasksRunning map[string]struct{} // poststop: main tasks running -> finished - mainTasksPending map[string]struct{} // poststart: main tasks pending -> running -} - -func newTaskHookCoordinator(logger hclog.Logger, tasks []*structs.Task) *taskHookCoordinator { - closedCh := make(chan struct{}) - close(closedCh) - - mainTaskCtx, mainCancelFn := context.WithCancel(context.Background()) - poststartTaskCtx, poststartCancelFn := context.WithCancel(context.Background()) - poststopTaskCtx, poststopTaskCancelFn := context.WithCancel(context.Background()) - - c := &taskHookCoordinator{ - logger: logger, - closedCh: closedCh, - mainTaskCtx: mainTaskCtx, - mainTaskCtxCancel: mainCancelFn, - prestartSidecar: map[string]struct{}{}, - prestartEphemeral: map[string]struct{}{}, - mainTasksRunning: map[string]struct{}{}, - mainTasksPending: map[string]struct{}{}, - poststartTaskCtx: poststartTaskCtx, - poststartTaskCtxCancel: poststartCancelFn, - poststopTaskCtx: poststopTaskCtx, - poststopTaskCtxCancel: poststopTaskCancelFn, - } - c.setTasks(tasks) - return c -} - -func (c *taskHookCoordinator) setTasks(tasks []*structs.Task) { - for _, task := range tasks { - - if task.Lifecycle == nil { - c.mainTasksPending[task.Name] = struct{}{} - c.mainTasksRunning[task.Name] = struct{}{} - continue - } - - switch task.Lifecycle.Hook { - case structs.TaskLifecycleHookPrestart: - if task.Lifecycle.Sidecar { - c.prestartSidecar[task.Name] = struct{}{} - } else { - c.prestartEphemeral[task.Name] = struct{}{} - } - case structs.TaskLifecycleHookPoststart: - // Poststart hooks don't need to be tracked. - case structs.TaskLifecycleHookPoststop: - // Poststop hooks don't need to be tracked. - default: - c.logger.Error("invalid lifecycle hook", "task", task.Name, "hook", task.Lifecycle.Hook) - } - } - - if !c.hasPrestartTasks() { - c.mainTaskCtxCancel() - } -} - -func (c *taskHookCoordinator) hasPrestartTasks() bool { - return len(c.prestartSidecar)+len(c.prestartEphemeral) > 0 -} - -func (c *taskHookCoordinator) hasRunningMainTasks() bool { - return len(c.mainTasksRunning) > 0 -} - -func (c *taskHookCoordinator) hasPendingMainTasks() bool { - return len(c.mainTasksPending) > 0 -} - -func (c *taskHookCoordinator) startConditionForTask(task *structs.Task) <-chan struct{} { - if task.Lifecycle == nil { - return c.mainTaskCtx.Done() - } - - switch task.Lifecycle.Hook { - case structs.TaskLifecycleHookPrestart: - // Prestart tasks start without checking status of other tasks - return c.closedCh - case structs.TaskLifecycleHookPoststart: - return c.poststartTaskCtx.Done() - case structs.TaskLifecycleHookPoststop: - return c.poststopTaskCtx.Done() - default: - // it should never have a lifecycle stanza w/o a hook, so report an error but allow the task to start normally - c.logger.Error("invalid lifecycle hook", "task", task.Name, "hook", task.Lifecycle.Hook) - return c.mainTaskCtx.Done() - } -} - -// This is not thread safe! This must only be called from one thread per alloc runner. -func (c *taskHookCoordinator) taskStateUpdated(states map[string]*structs.TaskState) { - for task := range c.prestartSidecar { - st := states[task] - if st == nil || st.StartedAt.IsZero() { - continue - } - - delete(c.prestartSidecar, task) - } - - for task := range c.prestartEphemeral { - st := states[task] - if st == nil || !st.Successful() { - continue - } - - delete(c.prestartEphemeral, task) - } - - for task := range c.mainTasksRunning { - st := states[task] - - if st == nil || st.State != structs.TaskStateDead { - continue - } - - delete(c.mainTasksRunning, task) - } - - for task := range c.mainTasksPending { - st := states[task] - if st == nil || st.StartedAt.IsZero() { - continue - } - - delete(c.mainTasksPending, task) - } - - if !c.hasPrestartTasks() { - c.mainTaskCtxCancel() - } - - if !c.hasPendingMainTasks() { - c.poststartTaskCtxCancel() - } - if !c.hasRunningMainTasks() { - c.poststopTaskCtxCancel() - } -} - -func (c *taskHookCoordinator) StartPoststopTasks() { - c.poststopTaskCtxCancel() -} - -// hasNonSidecarTasks returns false if all the passed tasks are sidecar tasks -func hasNonSidecarTasks(tasks []*taskrunner.TaskRunner) bool { - for _, tr := range tasks { - if !tr.IsSidecarTask() { - return true - } - } - - return false -} - -// hasSidecarTasks returns true if any of the passed tasks are sidecar tasks -func hasSidecarTasks(tasks map[string]*taskrunner.TaskRunner) bool { - for _, tr := range tasks { - if tr.IsSidecarTask() { - return true - } - } - - return false -} diff --git a/client/allocrunner/task_hook_coordinator_test.go b/client/allocrunner/task_hook_coordinator_test.go deleted file mode 100644 index 7399acdabb8..00000000000 --- a/client/allocrunner/task_hook_coordinator_test.go +++ /dev/null @@ -1,381 +0,0 @@ -package allocrunner - -import ( - "fmt" - "testing" - "time" - - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/client/allocrunner/taskrunner" - "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/nomad/mock" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/require" -) - -func TestTaskHookCoordinator_OnlyMainApp(t *testing.T) { - ci.Parallel(t) - - alloc := mock.Alloc() - tasks := alloc.Job.TaskGroups[0].Tasks - task := tasks[0] - logger := testlog.HCLogger(t) - - coord := newTaskHookCoordinator(logger, tasks) - - ch := coord.startConditionForTask(task) - - require.Truef(t, isChannelClosed(ch), "%s channel was open, should be closed", task.Name) -} - -func TestTaskHookCoordinator_PrestartRunsBeforeMain(t *testing.T) { - ci.Parallel(t) - - logger := testlog.HCLogger(t) - - alloc := mock.LifecycleAlloc() - tasks := alloc.Job.TaskGroups[0].Tasks - - mainTask := tasks[0] - sideTask := tasks[1] - initTask := tasks[2] - - coord := newTaskHookCoordinator(logger, tasks) - initCh := coord.startConditionForTask(initTask) - sideCh := coord.startConditionForTask(sideTask) - mainCh := coord.startConditionForTask(mainTask) - - require.Truef(t, isChannelClosed(initCh), "%s channel was open, should be closed", initTask.Name) - require.Truef(t, isChannelClosed(sideCh), "%s channel was open, should be closed", sideTask.Name) - require.Falsef(t, isChannelClosed(mainCh), "%s channel was closed, should be open", mainTask.Name) -} - -func TestTaskHookCoordinator_MainRunsAfterPrestart(t *testing.T) { - ci.Parallel(t) - - logger := testlog.HCLogger(t) - - alloc := mock.LifecycleAlloc() - tasks := alloc.Job.TaskGroups[0].Tasks - - mainTask := tasks[0] - sideTask := tasks[1] - initTask := tasks[2] - - coord := newTaskHookCoordinator(logger, tasks) - initCh := coord.startConditionForTask(initTask) - sideCh := coord.startConditionForTask(sideTask) - mainCh := coord.startConditionForTask(mainTask) - - require.Truef(t, isChannelClosed(initCh), "%s channel was open, should be closed", initTask.Name) - require.Truef(t, isChannelClosed(sideCh), "%s channel was open, should be closed", sideTask.Name) - require.Falsef(t, isChannelClosed(mainCh), "%s channel was closed, should be open", mainTask.Name) - - states := map[string]*structs.TaskState{ - mainTask.Name: { - State: structs.TaskStatePending, - Failed: false, - }, - initTask.Name: { - State: structs.TaskStateDead, - Failed: false, - StartedAt: time.Now(), - FinishedAt: time.Now(), - }, - sideTask.Name: { - State: structs.TaskStateRunning, - Failed: false, - StartedAt: time.Now(), - }, - } - - coord.taskStateUpdated(states) - - require.Truef(t, isChannelClosed(initCh), "%s channel was open, should be closed", initTask.Name) - require.Truef(t, isChannelClosed(sideCh), "%s channel was open, should be closed", sideTask.Name) - require.Truef(t, isChannelClosed(mainCh), "%s channel was open, should be closed", mainTask.Name) -} - -func TestTaskHookCoordinator_MainRunsAfterManyInitTasks(t *testing.T) { - ci.Parallel(t) - - logger := testlog.HCLogger(t) - - alloc := mock.LifecycleAlloc() - alloc.Job = mock.VariableLifecycleJob(structs.Resources{CPU: 100, MemoryMB: 256}, 1, 2, 0) - tasks := alloc.Job.TaskGroups[0].Tasks - - mainTask := tasks[0] - init1Task := tasks[1] - init2Task := tasks[2] - - coord := newTaskHookCoordinator(logger, tasks) - mainCh := coord.startConditionForTask(mainTask) - init1Ch := coord.startConditionForTask(init1Task) - init2Ch := coord.startConditionForTask(init2Task) - - require.Truef(t, isChannelClosed(init1Ch), "%s channel was open, should be closed", init1Task.Name) - require.Truef(t, isChannelClosed(init2Ch), "%s channel was open, should be closed", init2Task.Name) - require.Falsef(t, isChannelClosed(mainCh), "%s channel was closed, should be open", mainTask.Name) - - states := map[string]*structs.TaskState{ - mainTask.Name: { - State: structs.TaskStatePending, - Failed: false, - }, - init1Task.Name: { - State: structs.TaskStateDead, - Failed: false, - StartedAt: time.Now(), - FinishedAt: time.Now(), - }, - init2Task.Name: { - State: structs.TaskStateDead, - Failed: false, - StartedAt: time.Now(), - }, - } - - coord.taskStateUpdated(states) - - require.Truef(t, isChannelClosed(init1Ch), "%s channel was open, should be closed", init1Task.Name) - require.Truef(t, isChannelClosed(init2Ch), "%s channel was open, should be closed", init2Task.Name) - require.Truef(t, isChannelClosed(mainCh), "%s channel was open, should be closed", mainTask.Name) -} - -func TestTaskHookCoordinator_FailedInitTask(t *testing.T) { - ci.Parallel(t) - - logger := testlog.HCLogger(t) - - alloc := mock.LifecycleAlloc() - alloc.Job = mock.VariableLifecycleJob(structs.Resources{CPU: 100, MemoryMB: 256}, 1, 2, 0) - tasks := alloc.Job.TaskGroups[0].Tasks - - mainTask := tasks[0] - init1Task := tasks[1] - init2Task := tasks[2] - - coord := newTaskHookCoordinator(logger, tasks) - mainCh := coord.startConditionForTask(mainTask) - init1Ch := coord.startConditionForTask(init1Task) - init2Ch := coord.startConditionForTask(init2Task) - - require.Truef(t, isChannelClosed(init1Ch), "%s channel was open, should be closed", init1Task.Name) - require.Truef(t, isChannelClosed(init2Ch), "%s channel was open, should be closed", init2Task.Name) - require.Falsef(t, isChannelClosed(mainCh), "%s channel was closed, should be open", mainTask.Name) - - states := map[string]*structs.TaskState{ - mainTask.Name: { - State: structs.TaskStatePending, - Failed: false, - }, - init1Task.Name: { - State: structs.TaskStateDead, - Failed: false, - StartedAt: time.Now(), - FinishedAt: time.Now(), - }, - init2Task.Name: { - State: structs.TaskStateDead, - Failed: true, - StartedAt: time.Now(), - }, - } - - coord.taskStateUpdated(states) - - require.Truef(t, isChannelClosed(init1Ch), "%s channel was open, should be closed", init1Task.Name) - require.Truef(t, isChannelClosed(init2Ch), "%s channel was open, should be closed", init2Task.Name) - require.Falsef(t, isChannelClosed(mainCh), "%s channel was closed, should be open", mainTask.Name) -} - -func TestTaskHookCoordinator_SidecarNeverStarts(t *testing.T) { - ci.Parallel(t) - - logger := testlog.HCLogger(t) - - alloc := mock.LifecycleAlloc() - tasks := alloc.Job.TaskGroups[0].Tasks - - mainTask := tasks[0] - sideTask := tasks[1] - initTask := tasks[2] - - coord := newTaskHookCoordinator(logger, tasks) - initCh := coord.startConditionForTask(initTask) - sideCh := coord.startConditionForTask(sideTask) - mainCh := coord.startConditionForTask(mainTask) - - require.Truef(t, isChannelClosed(initCh), "%s channel was open, should be closed", initTask.Name) - require.Truef(t, isChannelClosed(sideCh), "%s channel was open, should be closed", sideTask.Name) - require.Falsef(t, isChannelClosed(mainCh), "%s channel was closed, should be open", mainTask.Name) - - states := map[string]*structs.TaskState{ - mainTask.Name: { - State: structs.TaskStatePending, - Failed: false, - }, - initTask.Name: { - State: structs.TaskStateDead, - Failed: false, - StartedAt: time.Now(), - FinishedAt: time.Now(), - }, - sideTask.Name: { - State: structs.TaskStatePending, - Failed: false, - }, - } - - coord.taskStateUpdated(states) - - require.Truef(t, isChannelClosed(initCh), "%s channel was open, should be closed", initTask.Name) - require.Truef(t, isChannelClosed(sideCh), "%s channel was open, should be closed", sideTask.Name) - require.Falsef(t, isChannelClosed(mainCh), "%s channel was closed, should be open", mainTask.Name) -} - -func TestTaskHookCoordinator_PoststartStartsAfterMain(t *testing.T) { - ci.Parallel(t) - - logger := testlog.HCLogger(t) - - alloc := mock.LifecycleAlloc() - tasks := alloc.Job.TaskGroups[0].Tasks - - mainTask := tasks[0] - sideTask := tasks[1] - postTask := tasks[2] - - // Make the the third task a poststart hook - postTask.Lifecycle.Hook = structs.TaskLifecycleHookPoststart - - coord := newTaskHookCoordinator(logger, tasks) - postCh := coord.startConditionForTask(postTask) - sideCh := coord.startConditionForTask(sideTask) - mainCh := coord.startConditionForTask(mainTask) - - require.Truef(t, isChannelClosed(sideCh), "%s channel was open, should be closed", sideTask.Name) - require.Falsef(t, isChannelClosed(mainCh), "%s channel was closed, should be open", mainTask.Name) - require.Falsef(t, isChannelClosed(mainCh), "%s channel was closed, should be open", postTask.Name) - - states := map[string]*structs.TaskState{ - postTask.Name: { - State: structs.TaskStatePending, - Failed: false, - }, - mainTask.Name: { - State: structs.TaskStateRunning, - Failed: false, - StartedAt: time.Now(), - }, - sideTask.Name: { - State: structs.TaskStateRunning, - Failed: false, - StartedAt: time.Now(), - }, - } - - coord.taskStateUpdated(states) - - require.Truef(t, isChannelClosed(postCh), "%s channel was open, should be closed", postTask.Name) - require.Truef(t, isChannelClosed(sideCh), "%s channel was open, should be closed", sideTask.Name) - require.Truef(t, isChannelClosed(mainCh), "%s channel was open, should be closed", mainTask.Name) -} - -func isChannelClosed(ch <-chan struct{}) bool { - select { - case <-ch: - return true - default: - return false - } -} - -func TestHasSidecarTasks(t *testing.T) { - ci.Parallel(t) - - falseV, trueV := false, true - - cases := []struct { - name string - // nil if main task, false if non-sidecar hook, true if sidecar hook - indicators []*bool - - hasSidecars bool - hasNonsidecars bool - }{ - { - name: "all sidecar - one", - indicators: []*bool{&trueV}, - hasSidecars: true, - hasNonsidecars: false, - }, - { - name: "all sidecar - multiple", - indicators: []*bool{&trueV, &trueV, &trueV}, - hasSidecars: true, - hasNonsidecars: false, - }, - { - name: "some sidecars, some others", - indicators: []*bool{nil, &falseV, &trueV}, - hasSidecars: true, - hasNonsidecars: true, - }, - { - name: "no sidecars", - indicators: []*bool{nil, &falseV, nil}, - hasSidecars: false, - hasNonsidecars: true, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - alloc := allocWithSidecarIndicators(c.indicators) - arConf, cleanup := testAllocRunnerConfig(t, alloc) - defer cleanup() - - ar, err := NewAllocRunner(arConf) - require.NoError(t, err) - - require.Equal(t, c.hasSidecars, hasSidecarTasks(ar.tasks), "sidecars") - - runners := []*taskrunner.TaskRunner{} - for _, r := range ar.tasks { - runners = append(runners, r) - } - require.Equal(t, c.hasNonsidecars, hasNonSidecarTasks(runners), "non-sidecars") - - }) - } -} - -func allocWithSidecarIndicators(indicators []*bool) *structs.Allocation { - alloc := mock.BatchAlloc() - - tasks := []*structs.Task{} - resources := map[string]*structs.AllocatedTaskResources{} - - tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] - - for i, indicator := range indicators { - task := alloc.Job.TaskGroups[0].Tasks[0].Copy() - task.Name = fmt.Sprintf("task%d", i) - if indicator != nil { - task.Lifecycle = &structs.TaskLifecycleConfig{ - Hook: structs.TaskLifecycleHookPrestart, - Sidecar: *indicator, - } - } - tasks = append(tasks, task) - resources[task.Name] = tr - } - - alloc.Job.TaskGroups[0].Tasks = tasks - - alloc.AllocatedResources.Tasks = resources - return alloc - -} diff --git a/client/allocrunner/tasklifecycle/coordinator.go b/client/allocrunner/tasklifecycle/coordinator.go new file mode 100644 index 00000000000..90a8e7fc097 --- /dev/null +++ b/client/allocrunner/tasklifecycle/coordinator.go @@ -0,0 +1,427 @@ +package tasklifecycle + +import ( + "fmt" + "sync" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/nomad/structs" +) + +// coordinatorState represents a state of the task lifecycle Coordinator FSM. +type coordinatorState uint8 + +const ( + coordinatorStateInit coordinatorState = iota + coordinatorStatePrestart + coordinatorStateMain + coordinatorStatePoststart + coordinatorStateWaitAlloc + coordinatorStatePoststop +) + +func (s coordinatorState) String() string { + switch s { + case coordinatorStateInit: + return "init" + case coordinatorStatePrestart: + return "prestart" + case coordinatorStateMain: + return "main" + case coordinatorStatePoststart: + return "poststart" + case coordinatorStateWaitAlloc: + return "wait_alloc" + case coordinatorStatePoststop: + return "poststart" + } + panic(fmt.Sprintf("Unexpected task coordinator state %d", s)) +} + +// lifecycleStage represents a lifecycle configuration used for task +// coordination. +// +// Not all possible combinations of hook X sidecar are defined, only the ones +// that are relevant for coordinating task initialization order. For example, a +// main task with sidecar set to `true` starts at the same time as a +// non-sidecar main task, so there is no need to treat them differently. +type lifecycleStage uint8 + +const ( + // lifecycleStagePrestartEphemeral are tasks with the "prestart" hook and + // sidecar set to "false". + lifecycleStagePrestartEphemeral lifecycleStage = iota + + // lifecycleStagePrestartSidecar are tasks with the "prestart" hook and + // sidecar set to "true". + lifecycleStagePrestartSidecar + + // lifecycleStageMain are tasks without a lifecycle or a lifecycle with an + // empty hook value. + lifecycleStageMain + + // lifecycleStagePoststartEphemeral are tasks with the "poststart" hook and + // sidecar set to "false" + lifecycleStagePoststartEphemeral + + // lifecycleStagePoststartSidecar are tasks with the "poststart" hook and + // sidecar set to "true". + lifecycleStagePoststartSidecar + + // lifecycleStagePoststop are tasks with the "poststop" hook. + lifecycleStagePoststop +) + +// Coordinator controls when tasks with a given lifecycle configuration are +// allowed to start and run. +// +// It behaves like a finite state machine where each state transition blocks or +// allows some task lifecycle types to run. +type Coordinator struct { + logger hclog.Logger + + // tasksByLifecycle is an index used to group and quickly access tasks by + // their lifecycle stage. + tasksByLifecycle map[lifecycleStage][]string + + // currentState is the current state of the FSM. It must only be accessed + // while holding the lock. + currentState coordinatorState + currentStateLock sync.RWMutex + + // gates store the gates that control each task lifecycle stage. + gates map[lifecycleStage]*Gate +} + +// NewCoordinator returns a new Coordinator with all tasks initially blocked. +func NewCoordinator(logger hclog.Logger, tasks []*structs.Task, shutdownCh <-chan struct{}) *Coordinator { + c := &Coordinator{ + logger: logger.Named("task_coordinator"), + tasksByLifecycle: indexTasksByLifecycle(tasks), + gates: make(map[lifecycleStage]*Gate), + } + + for lifecycle := range c.tasksByLifecycle { + c.gates[lifecycle] = NewGate(shutdownCh) + } + + c.enterStateLocked(coordinatorStateInit) + return c +} + +// Restart sets the Coordinator state back to "init" and is used to coordinate +// a full alloc restart. Since all tasks will run again they need to be pending +// before they are allowed to proceed. +func (c *Coordinator) Restart() { + c.currentStateLock.Lock() + defer c.currentStateLock.Unlock() + c.enterStateLocked(coordinatorStateInit) +} + +// Restore is used to set the Coordinator FSM to the correct state when an +// alloc is restored. Must be called before the allocrunner is running. +func (c *Coordinator) Restore(states map[string]*structs.TaskState) { + // Skip the "init" state when restoring since the tasks were likely already + // running, causing the Coordinator to be stuck waiting for them to be + // "pending". + c.enterStateLocked(coordinatorStatePrestart) + c.TaskStateUpdated(states) +} + +// StartConditionForTask returns a channel that is unblocked when the task is +// allowed to run. +func (c *Coordinator) StartConditionForTask(task *structs.Task) <-chan struct{} { + lifecycle := taskLifecycleStage(task) + return c.gates[lifecycle].WaitCh() +} + +// TaskStateUpdated notifies that a task state has changed. This may cause the +// Coordinator to transition to another state. +func (c *Coordinator) TaskStateUpdated(states map[string]*structs.TaskState) { + c.currentStateLock.Lock() + defer c.currentStateLock.Unlock() + + // We may be able to move directly through some states (for example, when + // an alloc doesn't have any prestart task we can skip the prestart state), + // so loop until we stabilize. + // This is also important when restoring an alloc since we need to find the + // state where FSM was last positioned. + for { + nextState := c.nextStateLocked(states) + if nextState == c.currentState { + return + } + + c.enterStateLocked(nextState) + } +} + +// nextStateLocked returns the state the FSM should transition to given its +// current internal state and the received states of the tasks. +// The currentStateLock must be held before calling this method. +func (c *Coordinator) nextStateLocked(states map[string]*structs.TaskState) coordinatorState { + + // coordinatorStatePoststop is the terminal state of the FSM, and can be + // reached at any time. + if c.isAllocDone(states) { + return coordinatorStatePoststop + } + + switch c.currentState { + case coordinatorStateInit: + if !c.isInitDone(states) { + return coordinatorStateInit + } + return coordinatorStatePrestart + + case coordinatorStatePrestart: + if !c.isPrestartDone(states) { + return coordinatorStatePrestart + } + return coordinatorStateMain + + case coordinatorStateMain: + if !c.isMainDone(states) { + return coordinatorStateMain + } + return coordinatorStatePoststart + + case coordinatorStatePoststart: + if !c.isPoststartDone(states) { + return coordinatorStatePoststart + } + return coordinatorStateWaitAlloc + + case coordinatorStateWaitAlloc: + if !c.isAllocDone(states) { + return coordinatorStateWaitAlloc + } + return coordinatorStatePoststop + + case coordinatorStatePoststop: + return coordinatorStatePoststop + } + + // If the code reaches here it's a programming error, since the switch + // statement should cover all possible states and return the next state. + panic(fmt.Sprintf("unexpected state %s", c.currentState)) +} + +// enterStateLocked updates the current state of the Coordinator FSM and +// executes any action necessary for the state transition. +// The currentStateLock must be held before calling this method. +func (c *Coordinator) enterStateLocked(state coordinatorState) { + c.logger.Trace("state transition", "from", c.currentState, "to", state) + + switch state { + case coordinatorStateInit: + c.block(lifecycleStagePrestartEphemeral) + c.block(lifecycleStagePrestartSidecar) + c.block(lifecycleStageMain) + c.block(lifecycleStagePoststartEphemeral) + c.block(lifecycleStagePoststartSidecar) + c.block(lifecycleStagePoststop) + + case coordinatorStatePrestart: + c.block(lifecycleStageMain) + c.block(lifecycleStagePoststartEphemeral) + c.block(lifecycleStagePoststartSidecar) + c.block(lifecycleStagePoststop) + + c.allow(lifecycleStagePrestartEphemeral) + c.allow(lifecycleStagePrestartSidecar) + + case coordinatorStateMain: + c.block(lifecycleStagePrestartEphemeral) + c.block(lifecycleStagePoststartEphemeral) + c.block(lifecycleStagePoststartSidecar) + c.block(lifecycleStagePoststop) + + c.allow(lifecycleStagePrestartSidecar) + c.allow(lifecycleStageMain) + + case coordinatorStatePoststart: + c.block(lifecycleStagePrestartEphemeral) + c.block(lifecycleStagePoststop) + + c.allow(lifecycleStagePrestartSidecar) + c.allow(lifecycleStageMain) + c.allow(lifecycleStagePoststartEphemeral) + c.allow(lifecycleStagePoststartSidecar) + + case coordinatorStateWaitAlloc: + c.block(lifecycleStagePrestartEphemeral) + c.block(lifecycleStagePoststartEphemeral) + c.block(lifecycleStagePoststop) + + c.allow(lifecycleStagePrestartSidecar) + c.allow(lifecycleStageMain) + c.allow(lifecycleStagePoststartSidecar) + + case coordinatorStatePoststop: + c.block(lifecycleStagePrestartEphemeral) + c.block(lifecycleStagePrestartSidecar) + c.block(lifecycleStageMain) + c.block(lifecycleStagePoststartEphemeral) + c.block(lifecycleStagePoststartSidecar) + + c.allow(lifecycleStagePoststop) + } + + c.currentState = state +} + +// isInitDone returns true when the following conditions are met: +// - all tasks are in the "pending" state. +func (c *Coordinator) isInitDone(states map[string]*structs.TaskState) bool { + for _, task := range states { + if task.State != structs.TaskStatePending { + return false + } + } + return true +} + +// isPrestartDone returns true when the following conditions are met: +// - there is at least one prestart task +// - all ephemeral prestart tasks are successful. +// - no ephemeral prestart task has failed. +// - all prestart sidecar tasks are running. +func (c *Coordinator) isPrestartDone(states map[string]*structs.TaskState) bool { + if !c.hasPrestart() { + return true + } + + for _, task := range c.tasksByLifecycle[lifecycleStagePrestartEphemeral] { + if !states[task].Successful() { + return false + } + } + for _, task := range c.tasksByLifecycle[lifecycleStagePrestartSidecar] { + if states[task].State != structs.TaskStateRunning { + return false + } + } + return true +} + +// isMainDone returns true when the following conditions are met: +// - there is at least one main task. +// - all main tasks are no longer "pending". +func (c *Coordinator) isMainDone(states map[string]*structs.TaskState) bool { + if !c.hasMain() { + return true + } + + for _, task := range c.tasksByLifecycle[lifecycleStageMain] { + if states[task].State == structs.TaskStatePending { + return false + } + } + return true +} + +// isPoststartDone returns true when the following conditions are met: +// - there is at least one poststart task. +// - all ephemeral poststart tasks are in the "dead" state. +func (c *Coordinator) isPoststartDone(states map[string]*structs.TaskState) bool { + if !c.hasPoststart() { + return true + } + + for _, task := range c.tasksByLifecycle[lifecycleStagePoststartEphemeral] { + if states[task].State != structs.TaskStateDead { + return false + } + } + return true +} + +// isAllocDone returns true when the following conditions are met: +// - all non-poststop tasks are in the "dead" state. +func (c *Coordinator) isAllocDone(states map[string]*structs.TaskState) bool { + for lifecycle, tasks := range c.tasksByLifecycle { + if lifecycle == lifecycleStagePoststop { + continue + } + + for _, task := range tasks { + if states[task].State != structs.TaskStateDead { + return false + } + } + } + return true +} + +func (c *Coordinator) hasPrestart() bool { + return len(c.tasksByLifecycle[lifecycleStagePrestartEphemeral])+ + len(c.tasksByLifecycle[lifecycleStagePrestartSidecar]) > 0 +} + +func (c *Coordinator) hasMain() bool { + return len(c.tasksByLifecycle[lifecycleStageMain]) > 0 +} + +func (c *Coordinator) hasPoststart() bool { + return len(c.tasksByLifecycle[lifecycleStagePoststartEphemeral])+ + len(c.tasksByLifecycle[lifecycleStagePoststartSidecar]) > 0 +} + +func (c *Coordinator) hasPoststop() bool { + return len(c.tasksByLifecycle[lifecycleStagePoststop]) > 0 +} + +// block is used to block the execution of tasks in the given lifecycle stage. +func (c *Coordinator) block(lifecycle lifecycleStage) { + gate := c.gates[lifecycle] + if gate != nil { + gate.Close() + } +} + +// allows is used to allow the execution of tasks in the given lifecycle stage. +func (c *Coordinator) allow(lifecycle lifecycleStage) { + gate := c.gates[lifecycle] + if gate != nil { + gate.Open() + } +} + +// indexTasksByLifecycle generates a map that groups tasks by their lifecycle +// configuration. This makes it easier to retrieve tasks by these groups or to +// determine if a task has a certain lifecycle configuration. +func indexTasksByLifecycle(tasks []*structs.Task) map[lifecycleStage][]string { + index := make(map[lifecycleStage][]string) + + for _, task := range tasks { + lifecycle := taskLifecycleStage(task) + + if _, ok := index[lifecycle]; !ok { + index[lifecycle] = []string{} + } + index[lifecycle] = append(index[lifecycle], task.Name) + } + + return index +} + +// taskLifecycleStage returns the relevant lifecycle stage for a given task. +func taskLifecycleStage(task *structs.Task) lifecycleStage { + if task.IsPrestart() { + if task.Lifecycle.Sidecar { + return lifecycleStagePrestartSidecar + } + return lifecycleStagePrestartEphemeral + } else if task.IsPoststart() { + if task.Lifecycle.Sidecar { + return lifecycleStagePoststartSidecar + } + return lifecycleStagePoststartEphemeral + } else if task.IsPoststop() { + return lifecycleStagePoststop + } + + // Assume task is "main" by default. + return lifecycleStageMain +} diff --git a/client/allocrunner/tasklifecycle/coordinator_test.go b/client/allocrunner/tasklifecycle/coordinator_test.go new file mode 100644 index 00000000000..3f86dcc99f3 --- /dev/null +++ b/client/allocrunner/tasklifecycle/coordinator_test.go @@ -0,0 +1,560 @@ +package tasklifecycle + +import ( + "testing" + "time" + + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" +) + +func TestCoordinator_OnlyMainApp(t *testing.T) { + ci.Parallel(t) + + alloc := mock.Alloc() + tasks := alloc.Job.TaskGroups[0].Tasks + task := tasks[0] + logger := testlog.HCLogger(t) + + shutdownCh := make(chan struct{}) + defer close(shutdownCh) + coord := NewCoordinator(logger, tasks, shutdownCh) + + // Tasks starts blocked. + RequireTaskBlocked(t, coord, task) + + // When main is pending it's allowed to run. + states := map[string]*structs.TaskState{ + task.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, task) + + // After main is running, main tasks are still allowed to run. + states = map[string]*structs.TaskState{ + task.Name: { + State: structs.TaskStateRunning, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, task) +} + +func TestCoordinator_PrestartRunsBeforeMain(t *testing.T) { + ci.Parallel(t) + + logger := testlog.HCLogger(t) + + alloc := mock.LifecycleAlloc() + tasks := alloc.Job.TaskGroups[0].Tasks + + mainTask := tasks[0] + sideTask := tasks[1] + initTask := tasks[2] + + // Only use the tasks that we care about. + tasks = []*structs.Task{mainTask, sideTask, initTask} + + shutdownCh := make(chan struct{}) + defer close(shutdownCh) + coord := NewCoordinator(logger, tasks, shutdownCh) + + // All tasks start blocked. + RequireTaskBlocked(t, coord, initTask) + RequireTaskBlocked(t, coord, sideTask) + RequireTaskBlocked(t, coord, mainTask) + + // Set initial state, prestart tasks are allowed to run. + states := map[string]*structs.TaskState{ + initTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + sideTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, initTask) + RequireTaskAllowed(t, coord, sideTask) + RequireTaskBlocked(t, coord, mainTask) + + // Sidecar task is running, main is blocked. + states = map[string]*structs.TaskState{ + initTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + sideTask.Name: { + State: structs.TaskStateRunning, + Failed: false, + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, initTask) + RequireTaskAllowed(t, coord, sideTask) + RequireTaskBlocked(t, coord, mainTask) + + // Init task is running, main is blocked. + states = map[string]*structs.TaskState{ + initTask.Name: { + State: structs.TaskStateRunning, + Failed: false, + }, + sideTask.Name: { + State: structs.TaskStateRunning, + Failed: false, + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, initTask) + RequireTaskAllowed(t, coord, sideTask) + RequireTaskBlocked(t, coord, mainTask) + + // Init task is done, main is now allowed to run. + states = map[string]*structs.TaskState{ + initTask.Name: { + State: structs.TaskStateDead, + Failed: false, + }, + sideTask.Name: { + State: structs.TaskStateRunning, + Failed: false, + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskBlocked(t, coord, initTask) + RequireTaskAllowed(t, coord, sideTask) + RequireTaskAllowed(t, coord, mainTask) +} + +func TestCoordinator_MainRunsAfterManyInitTasks(t *testing.T) { + ci.Parallel(t) + + logger := testlog.HCLogger(t) + + alloc := mock.LifecycleAlloc() + alloc.Job = mock.VariableLifecycleJob(structs.Resources{CPU: 100, MemoryMB: 256}, 1, 2, 0) + tasks := alloc.Job.TaskGroups[0].Tasks + + mainTask := tasks[0] + init1Task := tasks[1] + init2Task := tasks[2] + + // Only use the tasks that we care about. + tasks = []*structs.Task{mainTask, init1Task, init2Task} + + shutdownCh := make(chan struct{}) + defer close(shutdownCh) + coord := NewCoordinator(logger, tasks, shutdownCh) + + // All tasks start blocked. + RequireTaskBlocked(t, coord, init1Task) + RequireTaskBlocked(t, coord, init2Task) + RequireTaskBlocked(t, coord, mainTask) + + // Set initial state, prestart tasks are allowed to run, main is blocked. + states := map[string]*structs.TaskState{ + init1Task.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + init2Task.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, init1Task) + RequireTaskAllowed(t, coord, init2Task) + RequireTaskBlocked(t, coord, mainTask) + + // Init tasks complete, main is allowed to run. + states = map[string]*structs.TaskState{ + init1Task.Name: { + State: structs.TaskStateDead, + Failed: false, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + init2Task.Name: { + State: structs.TaskStateDead, + Failed: false, + StartedAt: time.Now(), + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskBlocked(t, coord, init1Task) + RequireTaskBlocked(t, coord, init2Task) + RequireTaskAllowed(t, coord, mainTask) +} + +func TestCoordinator_FailedInitTask(t *testing.T) { + ci.Parallel(t) + + logger := testlog.HCLogger(t) + + alloc := mock.LifecycleAlloc() + alloc.Job = mock.VariableLifecycleJob(structs.Resources{CPU: 100, MemoryMB: 256}, 1, 2, 0) + tasks := alloc.Job.TaskGroups[0].Tasks + + mainTask := tasks[0] + init1Task := tasks[1] + init2Task := tasks[2] + + // Only use the tasks that we care about. + tasks = []*structs.Task{mainTask, init1Task, init2Task} + + shutdownCh := make(chan struct{}) + defer close(shutdownCh) + coord := NewCoordinator(logger, tasks, shutdownCh) + + // All tasks start blocked. + RequireTaskBlocked(t, coord, init1Task) + RequireTaskBlocked(t, coord, init2Task) + RequireTaskBlocked(t, coord, mainTask) + + // Set initial state, prestart tasks are allowed to run, main is blocked. + states := map[string]*structs.TaskState{ + init1Task.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + init2Task.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, init1Task) + RequireTaskAllowed(t, coord, init2Task) + RequireTaskBlocked(t, coord, mainTask) + + // Init task dies, main is still blocked. + states = map[string]*structs.TaskState{ + init1Task.Name: { + State: structs.TaskStateDead, + Failed: false, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + init2Task.Name: { + State: structs.TaskStateDead, + Failed: true, + StartedAt: time.Now(), + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, init1Task) + RequireTaskAllowed(t, coord, init2Task) + RequireTaskBlocked(t, coord, mainTask) +} + +func TestCoordinator_SidecarNeverStarts(t *testing.T) { + ci.Parallel(t) + + logger := testlog.HCLogger(t) + + alloc := mock.LifecycleAlloc() + tasks := alloc.Job.TaskGroups[0].Tasks + + mainTask := tasks[0] + sideTask := tasks[1] + initTask := tasks[2] + + // Only use the tasks that we care about. + tasks = []*structs.Task{mainTask, sideTask, initTask} + + shutdownCh := make(chan struct{}) + defer close(shutdownCh) + coord := NewCoordinator(logger, tasks, shutdownCh) + + // All tasks start blocked. + RequireTaskBlocked(t, coord, initTask) + RequireTaskBlocked(t, coord, sideTask) + RequireTaskBlocked(t, coord, mainTask) + + // Set initial state, prestart tasks are allowed to run, main is blocked. + states := map[string]*structs.TaskState{ + initTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + sideTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, initTask) + RequireTaskAllowed(t, coord, sideTask) + RequireTaskBlocked(t, coord, mainTask) + + // Init completes, but sidecar not yet. + states = map[string]*structs.TaskState{ + initTask.Name: { + State: structs.TaskStateDead, + Failed: false, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + sideTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, initTask) + RequireTaskAllowed(t, coord, sideTask) + RequireTaskBlocked(t, coord, mainTask) +} + +func TestCoordinator_PoststartStartsAfterMain(t *testing.T) { + ci.Parallel(t) + + logger := testlog.HCLogger(t) + + alloc := mock.LifecycleAlloc() + tasks := alloc.Job.TaskGroups[0].Tasks + + mainTask := tasks[0] + sideTask := tasks[1] + postTask := tasks[2] + + // Only use the tasks that we care about. + tasks = []*structs.Task{mainTask, sideTask, postTask} + + // Make the the third task is a poststart hook + postTask.Lifecycle.Hook = structs.TaskLifecycleHookPoststart + + shutdownCh := make(chan struct{}) + defer close(shutdownCh) + coord := NewCoordinator(logger, tasks, shutdownCh) + + // All tasks start blocked. + RequireTaskBlocked(t, coord, sideTask) + RequireTaskBlocked(t, coord, mainTask) + RequireTaskBlocked(t, coord, postTask) + + // Set initial state, prestart tasks are allowed to run, main and poststart + // are blocked. + states := map[string]*structs.TaskState{ + sideTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + mainTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + postTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, sideTask) + RequireTaskBlocked(t, coord, mainTask) + RequireTaskBlocked(t, coord, postTask) + + // Sidecar and main running, poststart allowed to run. + states = map[string]*structs.TaskState{ + sideTask.Name: { + State: structs.TaskStateRunning, + Failed: false, + StartedAt: time.Now(), + }, + mainTask.Name: { + State: structs.TaskStateRunning, + Failed: false, + StartedAt: time.Now(), + }, + postTask.Name: { + State: structs.TaskStatePending, + Failed: false, + }, + } + coord.TaskStateUpdated(states) + RequireTaskAllowed(t, coord, sideTask) + RequireTaskAllowed(t, coord, mainTask) + RequireTaskAllowed(t, coord, postTask) +} + +func TestCoordinator_Restore(t *testing.T) { + ci.Parallel(t) + + task := mock.Job().TaskGroups[0].Tasks[0] + + preEphemeral := task.Copy() + preEphemeral.Name = "pre_ephemeral" + preEphemeral.Lifecycle = &structs.TaskLifecycleConfig{ + Hook: structs.TaskLifecycleHookPrestart, + Sidecar: false, + } + + preSide := task.Copy() + preSide.Name = "pre_side" + preSide.Lifecycle = &structs.TaskLifecycleConfig{ + Hook: structs.TaskLifecycleHookPrestart, + Sidecar: true, + } + + main := task.Copy() + main.Name = "main" + main.Lifecycle = nil + + postEphemeral := task.Copy() + postEphemeral.Name = "post_ephemeral" + postEphemeral.Lifecycle = &structs.TaskLifecycleConfig{ + Hook: structs.TaskLifecycleHookPoststart, + Sidecar: false, + } + + postSide := task.Copy() + postSide.Name = "post_side" + postSide.Lifecycle = &structs.TaskLifecycleConfig{ + Hook: structs.TaskLifecycleHookPoststart, + Sidecar: true, + } + + poststop := task.Copy() + poststop.Name = "poststop" + poststop.Lifecycle = &structs.TaskLifecycleConfig{ + Hook: structs.TaskLifecycleHookPoststop, + Sidecar: false, + } + + testCases := []struct { + name string + tasks []*structs.Task + tasksState map[string]*structs.TaskState + testFn func(*testing.T, *Coordinator) + }{ + { + name: "prestart ephemeral running", + tasks: []*structs.Task{preEphemeral, preSide, main}, + tasksState: map[string]*structs.TaskState{ + preEphemeral.Name: {State: structs.TaskStateRunning}, + preSide.Name: {State: structs.TaskStateRunning}, + main.Name: {State: structs.TaskStatePending}, + }, + testFn: func(t *testing.T, c *Coordinator) { + RequireTaskBlocked(t, c, main) + + RequireTaskAllowed(t, c, preEphemeral) + RequireTaskAllowed(t, c, preSide) + }, + }, + { + name: "prestart ephemeral complete", + tasks: []*structs.Task{preEphemeral, preSide, main}, + tasksState: map[string]*structs.TaskState{ + preEphemeral.Name: {State: structs.TaskStateDead}, + preSide.Name: {State: structs.TaskStateRunning}, + main.Name: {State: structs.TaskStatePending}, + }, + testFn: func(t *testing.T, c *Coordinator) { + RequireTaskBlocked(t, c, preEphemeral) + + RequireTaskAllowed(t, c, preSide) + RequireTaskAllowed(t, c, main) + }, + }, + { + name: "main running", + tasks: []*structs.Task{main}, + tasksState: map[string]*structs.TaskState{ + main.Name: {State: structs.TaskStateRunning}, + }, + testFn: func(t *testing.T, c *Coordinator) { + RequireTaskAllowed(t, c, main) + }, + }, + { + name: "poststart with sidecar", + tasks: []*structs.Task{main, postEphemeral, postSide}, + tasksState: map[string]*structs.TaskState{ + main.Name: {State: structs.TaskStateRunning}, + postEphemeral.Name: {State: structs.TaskStateDead}, + postSide.Name: {State: structs.TaskStateRunning}, + }, + testFn: func(t *testing.T, c *Coordinator) { + RequireTaskBlocked(t, c, postEphemeral) + + RequireTaskAllowed(t, c, main) + RequireTaskAllowed(t, c, postSide) + }, + }, + { + name: "poststop running", + tasks: []*structs.Task{main, poststop}, + tasksState: map[string]*structs.TaskState{ + main.Name: {State: structs.TaskStateDead}, + poststop.Name: {State: structs.TaskStateRunning}, + }, + testFn: func(t *testing.T, c *Coordinator) { + RequireTaskBlocked(t, c, main) + + RequireTaskAllowed(t, c, poststop) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + shutdownCh := make(chan struct{}) + defer close(shutdownCh) + + c := NewCoordinator(testlog.HCLogger(t), tc.tasks, shutdownCh) + c.Restore(tc.tasksState) + tc.testFn(t, c) + }) + } +} diff --git a/client/allocrunner/tasklifecycle/doc.go b/client/allocrunner/tasklifecycle/doc.go new file mode 100644 index 00000000000..95d806fe7ff --- /dev/null +++ b/client/allocrunner/tasklifecycle/doc.go @@ -0,0 +1,92 @@ +/* +Package tasklifecycle manages the execution order of tasks based on their +lifecycle configuration. Its main structs are the Coordinator and the Gate. + +The Coordinator is used by an allocRunner to signal if a taskRunner is allowed +to start or not. It does so using a set of Gates, each for a given task +lifecycle configuration. + +The Gate provides a channel that can be used to block its listener on demand. +This is done by calling the Open() and Close() methods in the Gate which will +cause activate or deactivate a producer at the other end of the channel. + +The allocRunner feeds task state updates to the Coordinator that then uses this +information to determine which Gates it should open or close. Each Gate is +connected to a taskRunner with a matching lifecycle configuration. + +In the diagrams below, a solid line from a Gate indicates that it's open +(active), while a dashed line indicates that it's closed (inactive). A +taskRunner connected to an open Gate is allowed to run, while one that is +connected to a closed Gate is blocked. + +The Open/Close control line represents the Coordinator calling the Open() and +Close() methods of the Gates. + +In this state, the Coordinator is allowing prestart tasks to run, while +blocking the main tasks. + + ┌────────┐ + │ ALLOC │ + │ RUNNER │ + └───┬────┘ + │ + Task state + │ + ┌────────────▼────────────┐ + │Current state: │ + │Prestart │ ┌─────────────┐ + │ │ │ TASK RUNNER │ + │ ┌───────────────────┼─────────┤ (Prestart) │ + │ │ │ └─────────────┘ + │ │ │ + │ │ │ ┌─────────────┐ + │ │ COORDINATOR │ │ TASK RUNNER │ + │ │ ┌─ ─ ─┼─ ─ ─ ─┬╶┤ (Main) │ + │ │ ╷ │ ╷ └─────────────┘ + │ │ ╷ │ ╷ + │ │ ╷ │ ╷ ┌─────────────┐ + │ Prestart Main │ ╷ │ TASK RUNNER │ + └─────┬─┬───────────┬─┬───┘ └╶┤ (Main) │ + │ │Open/ ╷ │Open/ └─────────────┘ + │ │Close ╷ │Close + ┌──┴─▼─┐ ┌──┴─▼─┐ + │ GATE │ │ GATE │ + └──────┘ └──────┘ + +When the prestart task completes, the allocRunner will send a new batch of task +states to the Coordinator that will cause it to transition to a state where it +will close the Gate for prestart tasks, blocking their execution, and will open +the Gate for main tasks, allowing them to start. + + ┌────────┐ + │ ALLOC │ + │ RUNNER │ + └───┬────┘ + │ + Task state + │ + ┌────────────▼────────────┐ + │Current state: │ + │Main │ ┌─────────────┐ + │ │ │ TASK RUNNER │ + │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┼─ ─ ─ ─ ─┤ (Prestart) │ + │ ╷ │ └─────────────┘ + │ ╷ │ + │ ╷ │ ┌─────────────┐ + │ ╷ COORDINATOR │ │ TASK RUNNER │ + │ ╷ ┌─────┼───────┬─┤ (Main) │ + │ ╷ │ │ │ └─────────────┘ + │ ╷ │ │ │ + │ ╷ │ │ │ ┌─────────────┐ + │ Prestart Main │ │ │ TASK RUNNER │ + └─────┼─┬───────────┬─┬───┘ └─┤ (Main) │ + ╷ │Open/ │ │Open/ └─────────────┘ + ╷ │Close │ │Close + ┌──┴─▼─┐ ┌──┴─▼─┐ + │ GATE │ │ GATE │ + └──────┘ └──────┘ + +Diagram source: +https://asciiflow.com/#/share/eJyrVspLzE1VssorzcnRUcpJrEwtUrJSqo5RqohRsjI0MDTViVGqBDKNLA2ArJLUihIgJ0ZJAQYeTdmDB8XE5CGrVHD08fF3BjPRZYJC%2Ffxcg7DIEGk6VDWyUEhicbZCcUliSSp2hfgNR6BpxCmDmelcWlSUmlcCsdkKm62%2BiZmo7kEOCOK8jtVmrGZiMVchxDHYGzXEYSpIspVUpKAREOQaHOIYFKKpgGkvjcIDp8kk2t7zaEoDcWgCmsnO%2Fv5BLp5%2BjiH%2BQVhNbkKLjyY8LtNFAyDdCgoavo6efppQ0%2FDorkETrQGypxDtrxmkmEyiK8iJ24CiVGAeKyqBGgPNVWjmYk%2FrVE7X8LhBiwtEcQRSBcT%2B%2Bs4KyK5D4pOewlFMRglfuDy6vmkoLoaL1yDLwXUquDuGuCogq4aLYDd9CnbT0V2uVKtUCwCqNQgp) +*/ +package tasklifecycle diff --git a/client/allocrunner/tasklifecycle/gate.go b/client/allocrunner/tasklifecycle/gate.go new file mode 100644 index 00000000000..02d96b29763 --- /dev/null +++ b/client/allocrunner/tasklifecycle/gate.go @@ -0,0 +1,87 @@ +package tasklifecycle + +const ( + gateClosed = false + gateOpened = true +) + +// Gate is used by the Coordinator to block or allow tasks from running. +// +// It provides a channel that taskRunners listens on to determine when they are +// allowed to run. The Gate has an infinite loop that is either feeding this +// channel (therefore allowing listeners to proceed) or not doing anything +// (causing listeners to block an wait). +// +// The Coordinator uses the Gate Open() and Close() methods to control this +// producer loop. +type Gate struct { + sendCh chan struct{} + updateCh chan bool + shutdownCh <-chan struct{} +} + +// NewGate returns a new Gate that is initially closed. The Gate should not be +// used after the shutdownCh is closed. +func NewGate(shutdownCh <-chan struct{}) *Gate { + g := &Gate{ + sendCh: make(chan struct{}), + updateCh: make(chan bool), + shutdownCh: shutdownCh, + } + go g.run(gateClosed) + + return g +} + +// WaitCh returns a channel that the listener must block on before starting its +// task. +// +// Callers must also check the state of the shutdownCh used to create the Gate +// to avoid blocking indefinitely. +func (g *Gate) WaitCh() <-chan struct{} { + return g.sendCh +} + +// Open is used to allow listeners to proceed. +// If the gate shutdownCh channel is closed, this method is a no-op so callers +// should check its state. +func (g *Gate) Open() { + select { + case <-g.shutdownCh: + case g.updateCh <- gateOpened: + } +} + +// Close is used to block listeners from proceeding. +// if the gate shutdownch channel is closed, this method is a no-op so callers +// should check its state. +func (g *Gate) Close() { + select { + case <-g.shutdownCh: + case g.updateCh <- gateClosed: + } +} + +// run starts the infinite loop that feeds the channel if the Gate is opened. +func (g *Gate) run(initState bool) { + isOpen := initState + for { + if isOpen { + select { + // Feed channel if the gate is open. + case g.sendCh <- struct{}{}: + case <-g.shutdownCh: + return + case isOpen = <-g.updateCh: + continue + } + } else { + select { + case <-g.shutdownCh: + return + case isOpen = <-g.updateCh: + continue + } + } + } +} diff --git a/client/allocrunner/tasklifecycle/gate_test.go b/client/allocrunner/tasklifecycle/gate_test.go new file mode 100644 index 00000000000..3b77fadca91 --- /dev/null +++ b/client/allocrunner/tasklifecycle/gate_test.go @@ -0,0 +1,135 @@ +package tasklifecycle + +import ( + "testing" + "time" + + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/helper" +) + +func TestGate(t *testing.T) { + ci.Parallel(t) + + testCases := []struct { + name string + test func(*testing.T, *Gate) + }{ + { + name: "starts blocked", + test: func(t *testing.T, g *Gate) { + requireChannelBlocking(t, g.WaitCh(), "wait") + }, + }, + { + name: "block", + test: func(t *testing.T, g *Gate) { + g.Close() + requireChannelBlocking(t, g.WaitCh(), "wait") + }, + }, + { + name: "allow", + test: func(t *testing.T, g *Gate) { + g.Open() + requireChannelPassing(t, g.WaitCh(), "wait") + }, + }, + { + name: "block twice", + test: func(t *testing.T, g *Gate) { + g.Close() + g.Close() + requireChannelBlocking(t, g.WaitCh(), "wait") + }, + }, + { + name: "allow twice", + test: func(t *testing.T, g *Gate) { + g.Open() + g.Open() + requireChannelPassing(t, g.WaitCh(), "wait") + }, + }, + { + name: "allow block allow", + test: func(t *testing.T, g *Gate) { + g.Open() + requireChannelPassing(t, g.WaitCh(), "first allow") + g.Close() + requireChannelBlocking(t, g.WaitCh(), "block") + g.Open() + requireChannelPassing(t, g.WaitCh(), "second allow") + }, + }, + { + name: "concurrent access", + test: func(t *testing.T, g *Gate) { + x := 100 + go func() { + for i := 0; i < x; i++ { + g.Open() + } + }() + go func() { + for i := 0; i < x/10; i++ { + g.Close() + } + }() + requireChannelPassing(t, g.WaitCh(), "gate should be open") + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + shutdownCh := make(chan struct{}) + defer close(shutdownCh) + + g := NewGate(shutdownCh) + tc.test(t, g) + }) + } +} + +// TestGate_shutdown tests a gate with a closed shutdown channel. +func TestGate_shutdown(t *testing.T) { + ci.Parallel(t) + + // Create a Gate with a closed shutdownCh. + shutdownCh := make(chan struct{}) + close(shutdownCh) + + g := NewGate(shutdownCh) + + // Test that Open() and Close() doesn't block forever. + openCh := make(chan struct{}) + closeCh := make(chan struct{}) + + go func() { + g.Open() + close(openCh) + }() + go func() { + g.Close() + close(closeCh) + }() + + timer, stop := helper.NewSafeTimer(time.Second) + defer stop() + + select { + case <-openCh: + case <-timer.C: + t.Fatalf("timeout waiting for gate operations") + } + + select { + case <-closeCh: + case <-timer.C: + t.Fatalf("timeout waiting for gate operations") + } + + // A Gate with a shutdownCh should be closed. + requireChannelBlocking(t, g.WaitCh(), "gate should be closed") +} diff --git a/client/allocrunner/tasklifecycle/testing.go b/client/allocrunner/tasklifecycle/testing.go new file mode 100644 index 00000000000..7e7ee0eb18f --- /dev/null +++ b/client/allocrunner/tasklifecycle/testing.go @@ -0,0 +1,56 @@ +package tasklifecycle + +import ( + "time" + + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/testutil" + testing "github.com/mitchellh/go-testing-interface" +) + +func RequireTaskBlocked(t testing.T, c *Coordinator, task *structs.Task) { + ch := c.StartConditionForTask(task) + requireChannelBlocking(t, ch, task.Name) +} + +func RequireTaskAllowed(t testing.T, c *Coordinator, task *structs.Task) { + ch := c.StartConditionForTask(task) + requireChannelPassing(t, ch, task.Name) +} + +func WaitNotInitUntil(c *Coordinator, until time.Duration, errorFunc func()) { + testutil.WaitForResultUntil(until, + func() (bool, error) { + c.currentStateLock.RLock() + defer c.currentStateLock.RUnlock() + return c.currentState != coordinatorStateInit, nil + }, + func(_ error) { + errorFunc() + }) +} + +func requireChannelPassing(t testing.T, ch <-chan struct{}, name string) { + testutil.WaitForResult(func() (bool, error) { + return !isChannelBlocking(ch), nil + }, func(_ error) { + t.Fatalf("%s channel was blocking, should be passing", name) + }) +} + +func requireChannelBlocking(t testing.T, ch <-chan struct{}, name string) { + testutil.WaitForResult(func() (bool, error) { + return isChannelBlocking(ch), nil + }, func(_ error) { + t.Fatalf("%s channel was passing, should be blocking", name) + }) +} + +func isChannelBlocking(ch <-chan struct{}) bool { + select { + case <-ch: + return false + default: + return true + } +} diff --git a/client/allocrunner/taskrunner/connect_native_hook_test.go b/client/allocrunner/taskrunner/connect_native_hook_test.go index 0718db1c823..9f40bd442f8 100644 --- a/client/allocrunner/taskrunner/connect_native_hook_test.go +++ b/client/allocrunner/taskrunner/connect_native_hook_test.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/testutil" agentconsul "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -478,8 +478,8 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) { // TLS config consumed by native application ShareSSL: shareSSL, - EnableSSL: helper.BoolToPtr(true), - VerifySSL: helper.BoolToPtr(true), + EnableSSL: pointer.Of(true), + VerifySSL: pointer.Of(true), CAFile: fakeCert, CertFile: fakeCert, KeyFile: fakeCert, @@ -528,7 +528,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) { // so make sure an unset value turns the feature on. t.Run("share_ssl is true", func(t *testing.T) { - try(t, helper.BoolToPtr(true)) + try(t, pointer.Of(true)) }) t.Run("share_ssl is nil", func(t *testing.T) { @@ -596,9 +596,9 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) { Addr: consulConfig.Address, // TLS config consumed by native application - ShareSSL: helper.BoolToPtr(true), - EnableSSL: helper.BoolToPtr(true), - VerifySSL: helper.BoolToPtr(true), + ShareSSL: pointer.Of(true), + EnableSSL: pointer.Of(true), + VerifySSL: pointer.Of(true), CAFile: fakeCert, CertFile: fakeCert, KeyFile: fakeCert, diff --git a/client/allocrunner/taskrunner/envoy_bootstrap_hook.go b/client/allocrunner/taskrunner/envoy_bootstrap_hook.go index 144a4a1607e..3cc9e4fd4d1 100644 --- a/client/allocrunner/taskrunner/envoy_bootstrap_hook.go +++ b/client/allocrunner/taskrunner/envoy_bootstrap_hook.go @@ -152,8 +152,8 @@ func newEnvoyBootstrapHook(c *envoyBootstrapHookConfig) *envoyBootstrapHook { } // getConsulNamespace will resolve the Consul namespace, choosing between -// - agent config (low precedence) -// - task group config (high precedence) +// - agent config (low precedence) +// - task group config (high precedence) func (h *envoyBootstrapHook) getConsulNamespace() string { var namespace string if h.consulConfig.Namespace != "" { diff --git a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go index c729db41a96..70ebe47b59b 100644 --- a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go +++ b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go @@ -23,8 +23,8 @@ import ( "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/testutil" agentconsul "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/args" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -95,8 +95,8 @@ func TestEnvoyBootstrapHook_decodeTriState(t *testing.T) { ci.Parallel(t) require.Equal(t, "", decodeTriState(nil)) - require.Equal(t, "true", decodeTriState(helper.BoolToPtr(true))) - require.Equal(t, "false", decodeTriState(helper.BoolToPtr(false))) + require.Equal(t, "true", decodeTriState(pointer.Of(true))) + require.Equal(t, "false", decodeTriState(pointer.Of(false))) } var ( diff --git a/client/allocrunner/taskrunner/envoy_version_hook.go b/client/allocrunner/taskrunner/envoy_version_hook.go index 3501e27252a..ef958736d96 100644 --- a/client/allocrunner/taskrunner/envoy_version_hook.go +++ b/client/allocrunner/taskrunner/envoy_version_hook.go @@ -181,9 +181,9 @@ func (h *envoyVersionHook) tweakImage(configured string, supported map[string][] // semver sanitizes the envoy version string coming from Consul into the format // used by the Envoy project when publishing images (i.e. proper semver). This // resulting string value does NOT contain the 'v' prefix for 2 reasons: -// 1) the version library does not include the 'v' -// 2) its plausible unofficial images use the 3 numbers without the prefix for -// tagging their own images +// 1. the version library does not include the 'v' +// 2. its plausible unofficial images use the 3 numbers without the prefix for +// tagging their own images func semver(chosen string) (string, error) { v, err := version.NewVersion(chosen) if err != nil { diff --git a/client/allocrunner/taskrunner/lifecycle.go b/client/allocrunner/taskrunner/lifecycle.go index b812156a846..90c3d37189a 100644 --- a/client/allocrunner/taskrunner/lifecycle.go +++ b/client/allocrunner/taskrunner/lifecycle.go @@ -6,28 +6,103 @@ import ( "github.com/hashicorp/nomad/nomad/structs" ) -// Restart a task. Returns immediately if no task is running. Blocks until -// existing task exits or passed-in context is canceled. +// Restart restarts a task that is already running. Returns an error if the +// task is not running. Blocks until existing task exits or passed-in context +// is canceled. func (tr *TaskRunner) Restart(ctx context.Context, event *structs.TaskEvent, failure bool) error { - tr.logger.Trace("Restart requested", "failure", failure) + tr.logger.Trace("Restart requested", "failure", failure, "event", event.GoString()) - // Grab the handle - handle := tr.getDriverHandle() + taskState := tr.TaskState() + if taskState == nil { + return ErrTaskNotRunning + } - // Check it is running - if handle == nil { + switch taskState.State { + case structs.TaskStatePending, structs.TaskStateDead: + return ErrTaskNotRunning + } + + return tr.restartImpl(ctx, event, failure) +} + +// ForceRestart restarts a task that is already running or reruns it if dead. +// Returns an error if the task is not able to rerun. Blocks until existing +// task exits or passed-in context is canceled. +// +// Callers must restart the AllocRuner taskCoordinator beforehand to make sure +// the task will be able to run again. +func (tr *TaskRunner) ForceRestart(ctx context.Context, event *structs.TaskEvent, failure bool) error { + tr.logger.Trace("Force restart requested", "failure", failure, "event", event.GoString()) + + taskState := tr.TaskState() + if taskState == nil { + return ErrTaskNotRunning + } + + tr.stateLock.Lock() + localState := tr.localState.Copy() + tr.stateLock.Unlock() + + if localState == nil { + return ErrTaskNotRunning + } + + switch taskState.State { + case structs.TaskStatePending: + return ErrTaskNotRunning + + case structs.TaskStateDead: + // Tasks that are in the "dead" state are only allowed to restart if + // their Run() method is still active. + if localState.RunComplete { + return ErrTaskNotRunning + } + } + + return tr.restartImpl(ctx, event, failure) +} + +// restartImpl implements to task restart process. +// +// It should never be called directly as it doesn't verify if the task state +// allows for a restart. +func (tr *TaskRunner) restartImpl(ctx context.Context, event *structs.TaskEvent, failure bool) error { + + // Check if the task is able to restart based on its state and the type of + // restart event that was triggered. + taskState := tr.TaskState() + if taskState == nil { return ErrTaskNotRunning } // Emit the event since it may take a long time to kill tr.EmitEvent(event) - // Run the pre-kill hooks prior to restarting the task - tr.preKill() - // Tell the restart tracker that a restart triggered the exit tr.restartTracker.SetRestartTriggered(failure) + // Signal a restart to unblock tasks that are in the "dead" state, but + // don't block since the channel is buffered. Only one signal is enough to + // notify the tr.Run() loop. + // The channel must be signaled after SetRestartTriggered is called so the + // tr.Run() loop runs again. + if taskState.State == structs.TaskStateDead { + select { + case tr.restartCh <- struct{}{}: + default: + } + } + + // Grab the handle to see if the task is still running and needs to be + // killed. + handle := tr.getDriverHandle() + if handle == nil { + return nil + } + + // Run the pre-kill hooks prior to restarting the task + tr.preKill() + // Grab a handle to the wait channel that will timeout with context cancelation // _before_ killing the task. waitCh, err := handle.WaitCh(ctx) @@ -69,14 +144,17 @@ func (tr *TaskRunner) Signal(event *structs.TaskEvent, s string) error { // Kill a task. Blocks until task exits or context is canceled. State is set to // dead. func (tr *TaskRunner) Kill(ctx context.Context, event *structs.TaskEvent) error { - tr.logger.Trace("Kill requested", "event_type", event.Type, "event_reason", event.KillReason) + tr.logger.Trace("Kill requested") // Cancel the task runner to break out of restart delay or the main run // loop. tr.killCtxCancel() // Emit kill event - tr.EmitEvent(event) + if event != nil { + tr.logger.Trace("Kill event", "event_type", event.Type, "event_reason", event.KillReason) + tr.EmitEvent(event) + } select { case <-tr.WaitCh(): diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index 4696bc53f38..a21c632f8da 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -22,11 +22,11 @@ import ( // to their requisite plugin manager. // // It provides a few things to a plugin task running inside Nomad. These are: -// * A mount to the `csi_plugin.mount_dir` where the plugin will create its csi.sock -// * A mount to `local/csi` that node plugins will use to stage volume mounts. -// * When the task has started, it starts a loop of attempting to connect to the -// plugin, to perform initial fingerprinting of the plugins capabilities before -// notifying the plugin manager of the plugin. +// - A mount to the `csi_plugin.mount_dir` where the plugin will create its csi.sock +// - A mount to `local/csi` that node plugins will use to stage volume mounts. +// - When the task has started, it starts a loop of attempting to connect to the +// plugin, to perform initial fingerprinting of the plugins capabilities before +// notifying the plugin manager of the plugin. type csiPluginSupervisorHook struct { logger hclog.Logger alloc *structs.Allocation @@ -81,7 +81,7 @@ var _ interfaces.TaskStopHook = &csiPluginSupervisorHook{} // Per-allocation directories of unix domain sockets used to communicate // with the CSI plugin. Nomad creates the directory and the plugin creates // the socket file. This directory is bind-mounted to the -// csi_plugin.mount_config dir in the plugin task. +// csi_plugin.mount_dir in the plugin task. // // {plugin-type}/{plugin-id}/ // staging/ @@ -103,6 +103,16 @@ func newCSIPluginSupervisorHook(config *csiPluginSupervisorHookConfig) *csiPlugi socketMountPoint := filepath.Join(config.clientStateDirPath, "csi", "plugins", config.runner.Alloc().ID) + // In v1.3.0, Nomad started instructing CSI plugins to stage and publish + // within /local/csi. Plugins deployed after the introduction of + // StagePublishBaseDir default to StagePublishBaseDir = /local/csi. However, + // plugins deployed between v1.3.0 and the introduction of + // StagePublishBaseDir have StagePublishBaseDir = "". Default to /local/csi here + // to avoid breaking plugins that aren't redeployed. + if task.CSIPluginConfig.StagePublishBaseDir == "" { + task.CSIPluginConfig.StagePublishBaseDir = filepath.Join("/local", "csi") + } + if task.CSIPluginConfig.HealthTimeout == 0 { task.CSIPluginConfig.HealthTimeout = 30 * time.Second } @@ -157,8 +167,7 @@ func (h *csiPluginSupervisorHook) Prestart(ctx context.Context, } // where the staging and per-alloc directories will be mounted volumeStagingMounts := &drivers.MountConfig{ - // TODO(tgross): add this TaskPath to the CSIPluginConfig as well - TaskPath: "/local/csi", + TaskPath: h.task.CSIPluginConfig.StagePublishBaseDir, HostPath: h.mountPoint, Readonly: false, PropagationMode: "bidirectional", @@ -238,13 +247,13 @@ func (h *csiPluginSupervisorHook) Poststart(_ context.Context, _ *interfaces.Tas // the passed in context is terminated. // // The supervisor works by: -// - Initially waiting for the plugin to become available. This loop is expensive -// and may do things like create new gRPC Clients on every iteration. -// - After receiving an initial healthy status, it will inform the plugin catalog -// of the plugin, registering it with the plugins fingerprinted capabilities. -// - We then perform a more lightweight check, simply probing the plugin on a less -// frequent interval to ensure it is still alive, emitting task events when this -// status changes. +// - Initially waiting for the plugin to become available. This loop is expensive +// and may do things like create new gRPC Clients on every iteration. +// - After receiving an initial healthy status, it will inform the plugin catalog +// of the plugin, registering it with the plugins fingerprinted capabilities. +// - We then perform a more lightweight check, simply probing the plugin on a less +// frequent interval to ensure it is still alive, emitting task events when this +// status changes. // // Deeper fingerprinting of the plugin is implemented by the csimanager. func (h *csiPluginSupervisorHook) ensureSupervisorLoop(ctx context.Context) { @@ -360,7 +369,7 @@ func (h *csiPluginSupervisorHook) registerPlugin(client csi.CSIPlugin, socketPat Options: map[string]string{ "Provider": info.Name, // vendor name "MountPoint": h.mountPoint, - "ContainerMountPoint": "/local/csi", + "ContainerMountPoint": h.task.CSIPluginConfig.StagePublishBaseDir, }, } } diff --git a/client/allocrunner/taskrunner/remotetask_hook.go b/client/allocrunner/taskrunner/remotetask_hook.go index 5a8ac03d12a..4fd9a63769b 100644 --- a/client/allocrunner/taskrunner/remotetask_hook.go +++ b/client/allocrunner/taskrunner/remotetask_hook.go @@ -32,10 +32,10 @@ func (h *remoteTaskHook) Name() string { } // Prestart performs 2 remote task driver related tasks: -// 1. If there is no local handle, see if there is a handle propagated from a -// previous alloc to be restored. -// 2. If the alloc is lost make sure the task signal is set to detach instead -// of kill. +// 1. If there is no local handle, see if there is a handle propagated from a +// previous alloc to be restored. +// 2. If the alloc is lost make sure the task signal is set to detach instead +// of kill. func (h *remoteTaskHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { if h.tr.getDriverHandle() != nil { // Driver handle already exists so don't try to load remote diff --git a/client/allocrunner/taskrunner/restarts/restarts.go b/client/allocrunner/taskrunner/restarts/restarts.go index a117e8d49d1..0e39e471524 100644 --- a/client/allocrunner/taskrunner/restarts/restarts.go +++ b/client/allocrunner/taskrunner/restarts/restarts.go @@ -139,11 +139,11 @@ func (r *RestartTracker) GetCount() int { // GetState returns the tasks next state given the set exit code and start // error. One of the following states are returned: -// * TaskRestarting - Task should be restarted -// * TaskNotRestarting - Task should not be restarted and has exceeded its -// restart policy. -// * TaskTerminated - Task has terminated successfully and does not need a -// restart. +// - TaskRestarting - Task should be restarted +// - TaskNotRestarting - Task should not be restarted and has exceeded its +// restart policy. +// - TaskTerminated - Task has terminated successfully and does not need a +// restart. // // If TaskRestarting is returned, the duration is how long to wait until // starting the task again. diff --git a/client/allocrunner/taskrunner/sids_hook_test.go b/client/allocrunner/taskrunner/sids_hook_test.go index d5951ed017e..c8f7657417c 100644 --- a/client/allocrunner/taskrunner/sids_hook_test.go +++ b/client/allocrunner/taskrunner/sids_hook_test.go @@ -22,7 +22,6 @@ import ( "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" ) @@ -297,11 +296,7 @@ func TestTaskRunner_DeriveSIToken_UnWritableTokenFile(t *testing.T) { go tr.Run() // wait for task runner to finish running - select { - case <-tr.WaitCh(): - case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): - r.Fail("timed out waiting for task runner") - } + testWaitForTaskToDie(t, tr) // assert task exited un-successfully finalState := tr.TaskState() diff --git a/client/allocrunner/taskrunner/state/state.go b/client/allocrunner/taskrunner/state/state.go index 5f83c476c80..a4bc26b0f0c 100644 --- a/client/allocrunner/taskrunner/state/state.go +++ b/client/allocrunner/taskrunner/state/state.go @@ -16,6 +16,11 @@ type LocalState struct { // TaskHandle is the handle used to reattach to the task during recovery TaskHandle *drivers.TaskHandle + + // RunComplete is set to true when the TaskRunner.Run() method finishes. + // It is used to distinguish between a dead task that could be restarted + // and one that will never run again. + RunComplete bool } func NewLocalState() *LocalState { @@ -52,6 +57,7 @@ func (s *LocalState) Copy() *LocalState { Hooks: make(map[string]*HookState, len(s.Hooks)), DriverNetwork: s.DriverNetwork.Copy(), TaskHandle: s.TaskHandle.Copy(), + RunComplete: s.RunComplete, } // Copy the hook state diff --git a/client/allocrunner/taskrunner/task_runner.go b/client/allocrunner/taskrunner/task_runner.go index 981998b4f32..445cf044038 100644 --- a/client/allocrunner/taskrunner/task_runner.go +++ b/client/allocrunner/taskrunner/task_runner.go @@ -62,6 +62,11 @@ const ( // updates have come in since the last one was handled, we only need to // handle the last one. triggerUpdateChCap = 1 + + // restartChCap is the capacity for the restartCh used for triggering task + // restarts. It should be exactly 1 as even if multiple restarts have come + // we only need to handle the last one. + restartChCap = 1 ) type TaskRunner struct { @@ -95,6 +100,9 @@ type TaskRunner struct { // stateDB is for persisting localState and taskState stateDB cstate.StateDB + // restartCh is used to signal that the task should restart. + restartCh chan struct{} + // shutdownCtx is used to exit the TaskRunner *without* affecting task state. shutdownCtx context.Context @@ -233,8 +241,8 @@ type TaskRunner struct { // GetClientAllocs has been called in case of a failed restore. serversContactedCh <-chan struct{} - // startConditionMetCtx is done when TR should start the task - startConditionMetCtx <-chan struct{} + // startConditionMetCh signals the TaskRunner when it should start the task + startConditionMetCh <-chan struct{} // waitOnServers defaults to false but will be set true if a restore // fails and the Run method should wait until serversContactedCh is @@ -304,8 +312,8 @@ type Config struct { // servers succeeds and allocs are synced. ServersContactedCh chan struct{} - // startConditionMetCtx is done when TR should start the task - StartConditionMetCtx <-chan struct{} + // StartConditionMetCh signals the TaskRunner when it should start the task + StartConditionMetCh <-chan struct{} // ShutdownDelayCtx is a context from the alloc runner which will // tell us to exit early from shutdown_delay @@ -367,6 +375,7 @@ func NewTaskRunner(config *Config) (*TaskRunner, error) { shutdownCtx: trCtx, shutdownCtxCancel: trCancel, triggerUpdateCh: make(chan struct{}, triggerUpdateChCap), + restartCh: make(chan struct{}, restartChCap), waitCh: make(chan struct{}), csiManager: config.CSIManager, cpusetCgroupPathGetter: config.CpusetCgroupPathGetter, @@ -374,7 +383,7 @@ func NewTaskRunner(config *Config) (*TaskRunner, error) { driverManager: config.DriverManager, maxEvents: defaultMaxEvents, serversContactedCh: config.ServersContactedCh, - startConditionMetCtx: config.StartConditionMetCtx, + startConditionMetCh: config.StartConditionMetCh, shutdownDelayCtx: config.ShutdownDelayCtx, shutdownDelayCancelFn: config.ShutdownDelayCancelFn, serviceRegWrapper: config.ServiceRegWrapper, @@ -506,20 +515,25 @@ func (tr *TaskRunner) Run() { tr.stateLock.RLock() dead := tr.state.State == structs.TaskStateDead + runComplete := tr.localState.RunComplete tr.stateLock.RUnlock() - // if restoring a dead task, ensure that task is cleared and all post hooks - // are called without additional state updates + // If restoring a dead task, ensure the task is cleared and, if the local + // state indicates that the previous Run() call is complete, execute all + // post stop hooks and exit early, otherwise proceed until the + // ALLOC_RESTART loop skipping MAIN since the task is dead. if dead { // do cleanup functions without emitting any additional events/work // to handle cases where we restored a dead task where client terminated // after task finished before completing post-run actions. tr.clearDriverHandle() tr.stateUpdater.TaskStateUpdated() - if err := tr.stop(); err != nil { - tr.logger.Error("stop failed on terminal task", "error", err) + if runComplete { + if err := tr.stop(); err != nil { + tr.logger.Error("stop failed on terminal task", "error", err) + } + return } - return } // Updates are handled asynchronously with the other hooks but each @@ -541,27 +555,27 @@ func (tr *TaskRunner) Run() { } } - select { - case <-tr.startConditionMetCtx: - tr.logger.Debug("lifecycle start condition has been met, proceeding") - // yay proceed - case <-tr.killCtx.Done(): - case <-tr.shutdownCtx.Done(): - return - } + // Set the initial task state. + tr.stateUpdater.TaskStateUpdated() timer, stop := helper.NewSafeTimer(0) // timer duration calculated JIT defer stop() MAIN: for !tr.shouldShutdown() { + if dead { + break + } + select { case <-tr.killCtx.Done(): break MAIN case <-tr.shutdownCtx.Done(): // TaskRunner was told to exit immediately return - default: + case <-tr.startConditionMetCh: + tr.logger.Debug("lifecycle start condition has been met, proceeding") + // yay proceed } // Run the prestart hooks @@ -671,6 +685,38 @@ MAIN: // Mark the task as dead tr.UpdateState(structs.TaskStateDead, nil) + // Wait here in case the allocation is restarted. Poststop tasks will never + // run again so skip them to avoid blocking forever. + if !tr.Task().IsPoststop() { + ALLOC_RESTART: + // Run in a loop to handle cases where restartCh is triggered but the + // task runner doesn't need to restart. + for { + select { + case <-tr.killCtx.Done(): + break ALLOC_RESTART + case <-tr.shutdownCtx.Done(): + return + case <-tr.restartCh: + // Restart without delay since the task is not running anymore. + restart, _ := tr.shouldRestart() + if restart { + // Set runner as not dead to allow the MAIN loop to run. + dead = false + goto MAIN + } + } + } + } + + tr.stateLock.Lock() + tr.localState.RunComplete = true + err := tr.stateDB.PutTaskRunnerLocalState(tr.allocID, tr.taskName, tr.localState) + if err != nil { + tr.logger.Warn("error persisting task state on run loop exit", "error", err) + } + tr.stateLock.Unlock() + // Run the stop hooks if err := tr.stop(); err != nil { tr.logger.Error("stop failed", "error", err) @@ -1197,8 +1243,10 @@ func (tr *TaskRunner) UpdateState(state string, event *structs.TaskEvent) { tr.stateLock.Lock() defer tr.stateLock.Unlock() + tr.logger.Trace("setting task state", "state", state) + if event != nil { - tr.logger.Trace("setting task state", "state", state, "event", event.Type) + tr.logger.Trace("appending task event", "state", state, "event", event.Type) // Append the event tr.appendEvent(event) @@ -1413,7 +1461,7 @@ func (tr *TaskRunner) UpdateStats(ru *cstructs.TaskResourceUsage) { } } -//TODO Remove Backwardscompat or use tr.Alloc()? +// TODO Remove Backwardscompat or use tr.Alloc()? func (tr *TaskRunner) setGaugeForMemory(ru *cstructs.TaskResourceUsage) { alloc := tr.Alloc() var allocatedMem float32 @@ -1445,7 +1493,7 @@ func (tr *TaskRunner) setGaugeForMemory(ru *cstructs.TaskResourceUsage) { } } -//TODO Remove Backwardscompat or use tr.Alloc()? +// TODO Remove Backwardscompat or use tr.Alloc()? func (tr *TaskRunner) setGaugeForCPU(ru *cstructs.TaskResourceUsage) { alloc := tr.Alloc() var allocatedCPU float32 diff --git a/client/allocrunner/taskrunner/task_runner_test.go b/client/allocrunner/taskrunner/task_runner_test.go index 845ae4e1ce1..6c9eda48f70 100644 --- a/client/allocrunner/taskrunner/task_runner_test.go +++ b/client/allocrunner/taskrunner/task_runner_test.go @@ -31,7 +31,7 @@ import ( agentconsul "github.com/hashicorp/nomad/command/agent/consul" mockdriver "github.com/hashicorp/nomad/drivers/mock" "github.com/hashicorp/nomad/drivers/rawexec" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -118,7 +118,7 @@ func testTaskRunnerConfig(t *testing.T, alloc *structs.Allocation, taskName stri shutdownDelayCtx, shutdownDelayCancelFn := context.WithCancel(context.Background()) - // Create a closed channel to mock TaskHookCoordinator.startConditionForTask. + // Create a closed channel to mock TaskCoordinator.startConditionForTask. // Closed channel indicates this task is not blocked on prestart hooks. closedCh := make(chan struct{}) close(closedCh) @@ -142,7 +142,7 @@ func testTaskRunnerConfig(t *testing.T, alloc *structs.Allocation, taskName stri DeviceManager: devicemanager.NoopMockManager(), DriverManager: drivermanager.TestDriverManager(t), ServersContactedCh: make(chan struct{}), - StartConditionMetCtx: closedCh, + StartConditionMetCh: closedCh, ShutdownDelayCtx: shutdownDelayCtx, ShutdownDelayCancelFn: shutdownDelayCancelFn, ServiceRegWrapper: wrapperMock, @@ -335,7 +335,7 @@ func TestTaskRunner_Restore_Running(t *testing.T) { defer newTR.Kill(context.Background(), structs.NewTaskEvent("cleanup")) // Wait for new task runner to exit when the process does - <-newTR.WaitCh() + testWaitForTaskToDie(t, newTR) // Assert that the process was only started once started := 0 @@ -349,6 +349,87 @@ func TestTaskRunner_Restore_Running(t *testing.T) { assert.Equal(t, 1, started) } +// TestTaskRunner_Restore_Dead asserts that restoring a dead task will place it +// back in the correct state. If the task was waiting for an alloc restart it +// must be able to be restarted after restore, otherwise a restart must fail. +func TestTaskRunner_Restore_Dead(t *testing.T) { + ci.Parallel(t) + + alloc := mock.BatchAlloc() + alloc.Job.TaskGroups[0].Count = 1 + task := alloc.Job.TaskGroups[0].Tasks[0] + task.Driver = "mock_driver" + task.Config = map[string]interface{}{ + "run_for": "2s", + } + conf, cleanup := testTaskRunnerConfig(t, alloc, task.Name) + conf.StateDB = cstate.NewMemDB(conf.Logger) // "persist" state between task runners + defer cleanup() + + // Run the first TaskRunner + origTR, err := NewTaskRunner(conf) + require.NoError(t, err) + go origTR.Run() + defer origTR.Kill(context.Background(), structs.NewTaskEvent("cleanup")) + + // Wait for it to be dead + testWaitForTaskToDie(t, origTR) + + // Cause TR to exit without shutting down task + origTR.Shutdown() + + // Start a new TaskRunner and do the Restore + newTR, err := NewTaskRunner(conf) + require.NoError(t, err) + require.NoError(t, newTR.Restore()) + + go newTR.Run() + defer newTR.Kill(context.Background(), structs.NewTaskEvent("cleanup")) + + // Verify that the TaskRunner is still active since it was recovered after + // a forced shutdown. + select { + case <-newTR.WaitCh(): + require.Fail(t, "WaitCh is not blocking") + default: + } + + // Verify that we can restart task. + // Retry a few times as the newTR.Run() may not have started yet. + testutil.WaitForResult(func() (bool, error) { + ev := &structs.TaskEvent{Type: structs.TaskRestartSignal} + err = newTR.ForceRestart(context.Background(), ev, false) + return err == nil, err + }, func(err error) { + require.NoError(t, err) + }) + testWaitForTaskToStart(t, newTR) + + // Kill task to verify that it's restored as dead and not able to restart. + newTR.Kill(context.Background(), nil) + testutil.WaitForResult(func() (bool, error) { + select { + case <-newTR.WaitCh(): + return true, nil + default: + return false, fmt.Errorf("task still running") + } + }, func(err error) { + require.NoError(t, err) + }) + + newTR2, err := NewTaskRunner(conf) + require.NoError(t, err) + require.NoError(t, newTR2.Restore()) + + go newTR2.Run() + defer newTR2.Kill(context.Background(), structs.NewTaskEvent("cleanup")) + + ev := &structs.TaskEvent{Type: structs.TaskRestartSignal} + err = newTR2.ForceRestart(context.Background(), ev, false) + require.Equal(t, err, ErrTaskNotRunning) +} + // setupRestoreFailureTest starts a service, shuts down the task runner, and // kills the task before restarting a new TaskRunner. The new TaskRunner is // returned once it is running and waiting in pending along with a cleanup @@ -603,11 +684,7 @@ func TestTaskRunner_TaskEnv_Interpolated(t *testing.T) { defer cleanup() // Wait for task to complete - select { - case <-tr.WaitCh(): - case <-time.After(3 * time.Second): - require.Fail("timeout waiting for task to exit") - } + testWaitForTaskToDie(t, tr) // Get the mock driver plugin driverPlugin, err := conf.DriverManager.Dispense(mockdriver.PluginID.Name) @@ -654,7 +731,9 @@ func TestTaskRunner_TaskEnv_Chroot(t *testing.T) { go tr.Run() defer tr.Kill(context.Background(), structs.NewTaskEvent("cleanup")) - // Wait for task to exit + // Wait for task to exit and kill the task runner to run the stop hooks. + testWaitForTaskToDie(t, tr) + tr.Kill(context.Background(), structs.NewTaskEvent("kill")) timeout := 15 * time.Second if testutil.IsCI() { timeout = 120 * time.Second @@ -703,7 +782,9 @@ func TestTaskRunner_TaskEnv_Image(t *testing.T) { tr, conf, cleanup := runTestTaskRunner(t, alloc, task.Name) defer cleanup() - // Wait for task to exit + // Wait for task to exit and kill task runner to run the stop hooks. + testWaitForTaskToDie(t, tr) + tr.Kill(context.Background(), structs.NewTaskEvent("kill")) select { case <-tr.WaitCh(): case <-time.After(15 * time.Second): @@ -750,7 +831,9 @@ func TestTaskRunner_TaskEnv_None(t *testing.T) { %s `, root, taskDir, taskDir, os.Getenv("PATH")) - // Wait for task to exit + // Wait for task to exit and kill the task runner to run the stop hooks. + testWaitForTaskToDie(t, tr) + tr.Kill(context.Background(), structs.NewTaskEvent("kill")) select { case <-tr.WaitCh(): case <-time.After(15 * time.Second): @@ -818,10 +901,7 @@ func TestTaskRunner_DevicePropogation(t *testing.T) { defer tr.Kill(context.Background(), structs.NewTaskEvent("cleanup")) // Wait for task to complete - select { - case <-tr.WaitCh(): - case <-time.After(3 * time.Second): - } + testWaitForTaskToDie(t, tr) // Get the mock driver plugin driverPlugin, err := conf.DriverManager.Dispense(mockdriver.PluginID.Name) @@ -1065,7 +1145,7 @@ func TestTaskRunner_NoShutdownDelay(t *testing.T) { maxTimeToFailDuration := time.Duration(testutil.TestMultiplier()) * time.Second alloc := mock.Alloc() - alloc.DesiredTransition = structs.DesiredTransition{NoShutdownDelay: helper.BoolToPtr(true)} + alloc.DesiredTransition = structs.DesiredTransition{NoShutdownDelay: pointer.Of(true)} task := alloc.Job.TaskGroups[0].Tasks[0] task.Services[0].Tags = []string{"tag1"} task.Services = task.Services[:1] // only need 1 for this test @@ -1328,11 +1408,7 @@ func TestTaskRunner_CheckWatcher_Restart(t *testing.T) { // Wait until the task exits. Don't simply wait for it to run as it may // get restarted and terminated before the test is able to observe it // running. - select { - case <-tr.WaitCh(): - case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): - require.Fail(t, "timeout") - } + testWaitForTaskToDie(t, tr) state := tr.TaskState() actualEvents := make([]string, len(state.Events)) @@ -1421,11 +1497,7 @@ func TestTaskRunner_BlockForSIDSToken(t *testing.T) { // task runner should exit now that it has been unblocked and it is a batch // job with a zero sleep time - select { - case <-tr.WaitCh(): - case <-time.After(15 * time.Second * time.Duration(testutil.TestMultiplier())): - r.Fail("timed out waiting for batch task to exist") - } + testWaitForTaskToDie(t, tr) // assert task exited successfully finalState := tr.TaskState() @@ -1478,11 +1550,7 @@ func TestTaskRunner_DeriveSIToken_Retry(t *testing.T) { go tr.Run() // assert task runner blocks on SI token - select { - case <-tr.WaitCh(): - case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): - r.Fail("timed out waiting for task runner") - } + testWaitForTaskToDie(t, tr) // assert task exited successfully finalState := tr.TaskState() @@ -1598,11 +1666,7 @@ func TestTaskRunner_BlockForVaultToken(t *testing.T) { // TR should exit now that it's unblocked by vault as its a batch job // with 0 sleeping. - select { - case <-tr.WaitCh(): - case <-time.After(15 * time.Second * time.Duration(testutil.TestMultiplier())): - require.Fail(t, "timed out waiting for batch task to exit") - } + testWaitForTaskToDie(t, tr) // Assert task exited successfully finalState := tr.TaskState() @@ -1615,6 +1679,14 @@ func TestTaskRunner_BlockForVaultToken(t *testing.T) { require.NoError(t, err) require.Equal(t, token, string(data)) + // Kill task runner to trigger stop hooks + tr.Kill(context.Background(), structs.NewTaskEvent("kill")) + select { + case <-tr.WaitCh(): + case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): + require.Fail(t, "timed out waiting for task runner to exit") + } + // Check the token was revoked testutil.WaitForResult(func() (bool, error) { if len(vaultClient.StoppedTokens()) != 1 { @@ -1661,17 +1733,21 @@ func TestTaskRunner_DeriveToken_Retry(t *testing.T) { defer tr.Kill(context.Background(), structs.NewTaskEvent("cleanup")) go tr.Run() - // Wait for TR to exit and check its state + // Wait for TR to die and check its state + testWaitForTaskToDie(t, tr) + + state := tr.TaskState() + require.Equal(t, structs.TaskStateDead, state.State) + require.False(t, state.Failed) + + // Kill task runner to trigger stop hooks + tr.Kill(context.Background(), structs.NewTaskEvent("kill")) select { case <-tr.WaitCh(): case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): require.Fail(t, "timed out waiting for task runner to exit") } - state := tr.TaskState() - require.Equal(t, structs.TaskStateDead, state.State) - require.False(t, state.Failed) - require.Equal(t, 1, count) // Check that the token is on disk @@ -1771,11 +1847,7 @@ func TestTaskRunner_Download_ChrootExec(t *testing.T) { defer cleanup() // Wait for task to run and exit - select { - case <-tr.WaitCh(): - case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): - require.Fail(t, "timed out waiting for task runner to exit") - } + testWaitForTaskToDie(t, tr) state := tr.TaskState() require.Equal(t, structs.TaskStateDead, state.State) @@ -1816,11 +1888,7 @@ func TestTaskRunner_Download_RawExec(t *testing.T) { defer cleanup() // Wait for task to run and exit - select { - case <-tr.WaitCh(): - case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): - require.Fail(t, "timed out waiting for task runner to exit") - } + testWaitForTaskToDie(t, tr) state := tr.TaskState() require.Equal(t, structs.TaskStateDead, state.State) @@ -1851,11 +1919,7 @@ func TestTaskRunner_Download_List(t *testing.T) { defer cleanup() // Wait for task to run and exit - select { - case <-tr.WaitCh(): - case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): - require.Fail(t, "timed out waiting for task runner to exit") - } + testWaitForTaskToDie(t, tr) state := tr.TaskState() require.Equal(t, structs.TaskStateDead, state.State) @@ -1902,11 +1966,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) { tr, _, cleanup := runTestTaskRunner(t, alloc, task.Name) defer cleanup() - select { - case <-tr.WaitCh(): - case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): - require.Fail(t, "timed out waiting for task to exit") - } + testWaitForTaskToDie(t, tr) state := tr.TaskState() require.Equal(t, structs.TaskStateDead, state.State) @@ -2100,6 +2160,8 @@ func TestTaskRunner_RestartSignalTask_NotRunning(t *testing.T) { case <-time.After(1 * time.Second): } + require.Equal(t, structs.TaskStatePending, tr.TaskState().State) + // Send a signal and restart err = tr.Signal(structs.NewTaskEvent("don't panic"), "QUIT") require.EqualError(t, err, ErrTaskNotRunning.Error()) @@ -2110,12 +2172,7 @@ func TestTaskRunner_RestartSignalTask_NotRunning(t *testing.T) { // Unblock and let it finish waitCh <- struct{}{} - - select { - case <-tr.WaitCh(): - case <-time.After(10 * time.Second): - require.Fail(t, "timed out waiting for task to complete") - } + testWaitForTaskToDie(t, tr) // Assert the task ran and never restarted state := tr.TaskState() @@ -2153,11 +2210,7 @@ func TestTaskRunner_Run_RecoverableStartError(t *testing.T) { tr, _, cleanup := runTestTaskRunner(t, alloc, task.Name) defer cleanup() - select { - case <-tr.WaitCh(): - case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): - require.Fail(t, "timed out waiting for task to exit") - } + testWaitForTaskToDie(t, tr) state := tr.TaskState() require.Equal(t, structs.TaskStateDead, state.State) @@ -2202,11 +2255,7 @@ func TestTaskRunner_Template_Artifact(t *testing.T) { go tr.Run() // Wait for task to run and exit - select { - case <-tr.WaitCh(): - case <-time.After(15 * time.Second * time.Duration(testutil.TestMultiplier())): - require.Fail(t, "timed out waiting for task runner to exit") - } + testWaitForTaskToDie(t, tr) state := tr.TaskState() require.Equal(t, structs.TaskStateDead, state.State) @@ -2536,7 +2585,9 @@ func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) { tr, err := NewTaskRunner(conf) require.NoError(t, err) defer tr.Kill(context.Background(), structs.NewTaskEvent("cleanup")) - tr.Run() + go tr.Run() + + testWaitForTaskToDie(t, tr) state := tr.TaskState() require.Equal(t, structs.TaskStateDead, state.State) @@ -2562,7 +2613,17 @@ func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) { func testWaitForTaskToStart(t *testing.T, tr *TaskRunner) { testutil.WaitForResult(func() (bool, error) { ts := tr.TaskState() - return ts.State == structs.TaskStateRunning, fmt.Errorf("%v", ts.State) + return ts.State == structs.TaskStateRunning, fmt.Errorf("expected task to be running, got %v", ts.State) + }, func(err error) { + require.NoError(t, err) + }) +} + +// testWaitForTaskToDie waits for the task to die or fails the test +func testWaitForTaskToDie(t *testing.T, tr *TaskRunner) { + testutil.WaitForResult(func() (bool, error) { + ts := tr.TaskState() + return ts.State == structs.TaskStateDead, fmt.Errorf("expected task to be dead, got %v", ts.State) }, func(err error) { require.NoError(t, err) }) diff --git a/client/allocrunner/taskrunner/template/template.go b/client/allocrunner/taskrunner/template/template.go index 37c3f1da6eb..10d45ccb73e 100644 --- a/client/allocrunner/taskrunner/template/template.go +++ b/client/allocrunner/taskrunner/template/template.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/taskenv" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" ) @@ -54,6 +54,10 @@ type TaskTemplateManager struct { // runner is the consul-template runner runner *manager.Runner + // handle is used to execute scripts + handle interfaces.ScriptExecutor + handleLock sync.Mutex + // signals is a lookup map from the string representation of a signal to its // actual signal signals map[string]os.Signal @@ -192,6 +196,14 @@ func (tm *TaskTemplateManager) Stop() { } } +// SetDriverHandle sets the executor +func (tm *TaskTemplateManager) SetDriverHandle(executor interfaces.ScriptExecutor) { + tm.handleLock.Lock() + defer tm.handleLock.Unlock() + tm.handle = executor + +} + // run is the long lived loop that handles errors and templates being rendered func (tm *TaskTemplateManager) run() { // Runner is nil if there are no templates @@ -392,6 +404,7 @@ func (tm *TaskTemplateManager) onTemplateRendered(handledRenders map[string]time var handling []string signals := make(map[string]struct{}) + scripts := []*structs.ChangeScript{} restart := false var splay time.Duration @@ -436,6 +449,8 @@ func (tm *TaskTemplateManager) onTemplateRendered(handledRenders map[string]time signals[tmpl.ChangeSignal] = struct{}{} case structs.TemplateChangeModeRestart: restart = true + case structs.TemplateChangeModeScript: + scripts = append(scripts, tmpl.ChangeScript) case structs.TemplateChangeModeNoop: continue } @@ -494,6 +509,72 @@ func (tm *TaskTemplateManager) onTemplateRendered(handledRenders map[string]time } } + // process script execution concurrently + var wg sync.WaitGroup + for _, script := range scripts { + wg.Add(1) + go tm.processScript(script, &wg) + } + wg.Wait() +} + +// handleScriptError is a helper function that produces a TaskKilling event and +// emits a message +func (tm *TaskTemplateManager) handleScriptError(script *structs.ChangeScript, msg string) { + ev := structs.NewTaskEvent(structs.TaskHookFailed).SetDisplayMessage(msg) + tm.config.Events.EmitEvent(ev) + + if script.FailOnError { + tm.config.Lifecycle.Kill(context.Background(), + structs.NewTaskEvent(structs.TaskKilling). + SetFailsTask(). + SetDisplayMessage("Template script failed, task is being killed")) + } +} + +// processScript is used for executing change_mode script and handling errors +func (tm *TaskTemplateManager) processScript(script *structs.ChangeScript, wg *sync.WaitGroup) { + defer wg.Done() + + if tm.handle == nil { + failureMsg := fmt.Sprintf( + "Template failed to run script %v with arguments %v because task driver doesn't support the exec operation", + script.Command, + script.Args, + ) + tm.handleScriptError(script, failureMsg) + return + } + _, exitCode, err := tm.handle.Exec(script.Timeout, script.Command, script.Args) + if err != nil { + failureMsg := fmt.Sprintf( + "Template failed to run script %v with arguments %v on change: %v Exit code: %v", + script.Command, + script.Args, + err, + exitCode, + ) + tm.handleScriptError(script, failureMsg) + return + } + if exitCode != 0 { + failureMsg := fmt.Sprintf( + "Template ran script %v with arguments %v on change but it exited with code code: %v", + script.Command, + script.Args, + exitCode, + ) + tm.handleScriptError(script, failureMsg) + return + } + tm.config.Events.EmitEvent(structs.NewTaskEvent(structs.TaskHookMessage). + SetDisplayMessage( + fmt.Sprintf( + "Template successfully ran script %v with arguments: %v. Exit code: %v", + script.Command, + script.Args, + exitCode, + ))) } // allTemplatesNoop returns whether all the managed templates have change mode noop. @@ -611,7 +692,7 @@ func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[*ctconf.Templa } ct.Wait = &ctconf.WaitConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Min: tmpl.Wait.Min, Max: tmpl.Wait.Max, } @@ -626,6 +707,14 @@ func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[*ctconf.Templa m := os.FileMode(v) ct.Perms = &m } + // Set ownership + if tmpl.Uid != nil && *tmpl.Uid >= 0 { + ct.Uid = tmpl.Uid + } + if tmpl.Gid != nil && *tmpl.Gid >= 0 { + ct.Gid = tmpl.Gid + } + ct.Finalize() ctmpls[ct] = tmpl @@ -717,7 +806,7 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, if cc.ConsulConfig.EnableSSL != nil && *cc.ConsulConfig.EnableSSL { verify := cc.ConsulConfig.VerifySSL != nil && *cc.ConsulConfig.VerifySSL conf.Consul.SSL = &ctconf.SSLConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Verify: &verify, Cert: &cc.ConsulConfig.CertFile, Key: &cc.ConsulConfig.KeyFile, @@ -732,7 +821,7 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, } conf.Consul.Auth = &ctconf.AuthConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Username: &parts[0], Password: &parts[1], } @@ -761,7 +850,7 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, // Set up the Vault config // Always set these to ensure nothing is picked up from the environment emptyStr := "" - conf.Vault.RenewToken = helper.BoolToPtr(false) + conf.Vault.RenewToken = pointer.Of(false) conf.Vault.Token = &emptyStr if cc.VaultConfig != nil && cc.VaultConfig.IsEnabled() { conf.Vault.Address = &cc.VaultConfig.Addr @@ -780,7 +869,7 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, skipVerify := cc.VaultConfig.TLSSkipVerify != nil && *cc.VaultConfig.TLSSkipVerify verify := !skipVerify conf.Vault.SSL = &ctconf.SSLConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Verify: &verify, Cert: &cc.VaultConfig.TLSCertFile, Key: &cc.VaultConfig.TLSKeyFile, @@ -790,8 +879,8 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, } } else { conf.Vault.SSL = &ctconf.SSLConfig{ - Enabled: helper.BoolToPtr(false), - Verify: helper.BoolToPtr(false), + Enabled: pointer.Of(false), + Verify: pointer.Of(false), Cert: &emptyStr, Key: &emptyStr, CaCert: &emptyStr, @@ -817,6 +906,17 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, conf.Nomad.Namespace = &config.NomadNamespace conf.Nomad.Transport.CustomDialer = cc.TemplateDialer conf.Nomad.Token = &config.NomadToken + if cc.TemplateConfig != nil && cc.TemplateConfig.NomadRetry != nil { + // Set the user-specified Nomad RetryConfig + var err error + if err = cc.TemplateConfig.NomadRetry.Validate(); err != nil { + return nil, err + } + conf.Nomad.Retry, err = cc.TemplateConfig.NomadRetry.ToConsulTemplate() + if err != nil { + return nil, err + } + } conf.Finalize() return conf, nil diff --git a/client/allocrunner/taskrunner/template/template_test.go b/client/allocrunner/taskrunner/template/template_test.go index 0221cfe1ca5..e4d7ca69ebd 100644 --- a/client/allocrunner/taskrunner/template/template_test.go +++ b/client/allocrunner/taskrunner/template/template_test.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "os/user" "path/filepath" @@ -16,6 +15,7 @@ import ( "strconv" "strings" "sync" + "syscall" "testing" "time" @@ -25,7 +25,7 @@ import ( "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/taskenv" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -33,6 +33,7 @@ import ( sconfig "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/testutil" "github.com/kr/pretty" + "github.com/shoenig/test/must" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -121,6 +122,16 @@ func (m *MockTaskHooks) EmitEvent(event *structs.TaskEvent) { func (m *MockTaskHooks) SetState(state string, event *structs.TaskEvent) {} +// mockExecutor implements script executor interface +type mockExecutor struct { + DesiredExit int + DesiredErr error +} + +func (m *mockExecutor) Exec(timeout time.Duration, cmd string, args []string) ([]byte, int, error) { + return []byte{}, m.DesiredExit, m.DesiredErr +} + // testHarness is used to test the TaskTemplateManager by spinning up // Consul/Vault as needed type testHarness struct { @@ -154,7 +165,7 @@ func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault b TemplateConfig: &config.ClientTemplateConfig{ FunctionDenylist: config.DefaultTemplateFunctionDenylist, DisableSandbox: false, - ConsulRetry: &config.RetryConfig{Backoff: helper.TimeToPtr(10 * time.Millisecond)}, + ConsulRetry: &config.RetryConfig{Backoff: pointer.Of(10 * time.Millisecond)}, }}, emitRate: DefaultMaxTemplateEventRate, } @@ -211,7 +222,6 @@ func (h *testHarness) startWithErr() error { EnvBuilder: h.envBuilder, MaxTemplateEventRate: h.emitRate, }) - return err } @@ -379,7 +389,7 @@ func TestTaskTemplateManager_InvalidConfig(t *testing.T) { func TestTaskTemplateManager_HostPath(t *testing.T) { ci.Parallel(t) // Make a template that will render immediately and write it to a tmp file - f, err := ioutil.TempFile("", "") + f, err := os.CreateTemp("", "") if err != nil { t.Fatalf("Bad: %v", err) } @@ -415,7 +425,7 @@ func TestTaskTemplateManager_HostPath(t *testing.T) { // Check the file is there path := filepath.Join(harness.taskDir, file) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -492,7 +502,7 @@ func TestTaskTemplateManager_Unblock_Static(t *testing.T) { // Check the file is there path := filepath.Join(harness.taskDir, file) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -512,6 +522,8 @@ func TestTaskTemplateManager_Permissions(t *testing.T) { DestPath: file, ChangeMode: structs.TemplateChangeModeNoop, Perms: "777", + Uid: pointer.Of(503), + Gid: pointer.Of(20), } harness := newTestHarness(t, []*structs.Template{template}, false, false) @@ -535,6 +547,13 @@ func TestTaskTemplateManager_Permissions(t *testing.T) { if m := fi.Mode(); m != os.ModePerm { t.Fatalf("Got mode %v; want %v", m, os.ModePerm) } + + sys := fi.Sys() + uid := pointer.Of(int(sys.(*syscall.Stat_t).Uid)) + gid := pointer.Of(int(sys.(*syscall.Stat_t).Gid)) + + must.Eq(t, template.Uid, uid) + must.Eq(t, template.Gid, gid) } func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) { @@ -562,7 +581,7 @@ func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) { // Check the file is there path := filepath.Join(harness.taskDir, file) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -587,7 +606,7 @@ func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) { // Write the contents path := filepath.Join(harness.taskDir, file) - if err := ioutil.WriteFile(path, []byte(content), 0777); err != nil { + if err := os.WriteFile(path, []byte(content), 0777); err != nil { t.Fatalf("Failed to write data: %v", err) } @@ -603,7 +622,7 @@ func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) { // Check the file is there path = filepath.Join(harness.taskDir, file) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -649,7 +668,7 @@ func TestTaskTemplateManager_Unblock_Consul(t *testing.T) { // Check the file is there path := filepath.Join(harness.taskDir, file) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -699,7 +718,7 @@ func TestTaskTemplateManager_Unblock_Vault(t *testing.T) { // Check the file is there path := filepath.Join(harness.taskDir, file) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -744,7 +763,7 @@ func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) { // Check that the static file has been rendered path := filepath.Join(harness.taskDir, staticFile) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -765,7 +784,7 @@ func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) { // Check the consul file is there path = filepath.Join(harness.taskDir, consulFile) - raw, err = ioutil.ReadFile(path) + raw, err = os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -817,7 +836,7 @@ func TestTaskTemplateManager_FirstRender_Restored(t *testing.T) { // Check the file is there path := filepath.Join(harness.taskDir, file) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) require.NoError(err, "Failed to read rendered template from %q", path) require.Equal(content, string(raw), "Unexpected template data; got %s, want %q", raw, content) @@ -911,7 +930,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) { // Check the file is there path := filepath.Join(harness.taskDir, file) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -933,7 +952,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) { // Check the file has been updated path = filepath.Join(harness.taskDir, file) - raw, err = ioutil.ReadFile(path) + raw, err = os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -1023,7 +1042,7 @@ OUTER: // Check the files have been updated path := filepath.Join(harness.taskDir, file1) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -1033,7 +1052,7 @@ OUTER: } path = filepath.Join(harness.taskDir, file2) - raw, err = ioutil.ReadFile(path) + raw, err = os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -1097,7 +1116,7 @@ OUTER: // Check the files have been updated path := filepath.Join(harness.taskDir, file1) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -1132,7 +1151,7 @@ func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) { // Check the file is there actual := fmt.Sprintf("%s.tmpl", harness.node.ID) path := filepath.Join(harness.taskDir, actual) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { t.Fatalf("Failed to read rendered template from %q: %v", path, err) } @@ -1190,6 +1209,168 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) { require.Contains(harness.mockHooks.KillEvent.DisplayMessage, "failed to send signals") } +func TestTaskTemplateManager_ScriptExecution(t *testing.T) { + ci.Parallel(t) + + // Make a template that renders based on a key in Consul and triggers script + key1 := "bam" + key2 := "bar" + content1_1 := "cat" + content1_2 := "dog" + t1 := &structs.Template{ + EmbeddedTmpl: ` +FOO={{key "bam"}} +`, + DestPath: "test.env", + ChangeMode: structs.TemplateChangeModeScript, + ChangeScript: &structs.ChangeScript{ + Command: "/bin/foo", + Args: []string{}, + Timeout: 5 * time.Second, + FailOnError: false, + }, + Envvars: true, + } + t2 := &structs.Template{ + EmbeddedTmpl: ` +BAR={{key "bar"}} +`, + DestPath: "test2.env", + ChangeMode: structs.TemplateChangeModeScript, + ChangeScript: &structs.ChangeScript{ + Command: "/bin/foo", + Args: []string{}, + Timeout: 5 * time.Second, + FailOnError: false, + }, + Envvars: true, + } + + me := mockExecutor{DesiredExit: 0, DesiredErr: nil} + harness := newTestHarness(t, []*structs.Template{t1, t2}, true, false) + harness.start(t) + harness.manager.SetDriverHandle(&me) + defer harness.stop() + + // Ensure no unblock + select { + case <-harness.mockHooks.UnblockCh: + require.Fail(t, "Task unblock should not have been called") + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + } + + // Write the key to Consul + harness.consul.SetKV(t, key1, []byte(content1_1)) + harness.consul.SetKV(t, key2, []byte(content1_1)) + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + require.Fail(t, "Task unblock should have been called") + } + + // Update the keys in Consul + harness.consul.SetKV(t, key1, []byte(content1_2)) + + // Wait for restart + timeout := time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second) +OUTER: + for { + select { + case <-harness.mockHooks.RestartCh: + require.Fail(t, "restart not expected") + case ev := <-harness.mockHooks.EmitEventCh: + if strings.Contains(ev.DisplayMessage, t1.ChangeScript.Command) { + break OUTER + } + case <-harness.mockHooks.SignalCh: + require.Fail(t, "signal not expected") + case <-timeout: + require.Fail(t, "should have received an event") + } + } +} + +// TestTaskTemplateManager_ScriptExecutionFailTask tests whether we fail the +// task upon script execution failure if that's how it's configured. +func TestTaskTemplateManager_ScriptExecutionFailTask(t *testing.T) { + ci.Parallel(t) + require := require.New(t) + + // Make a template that renders based on a key in Consul and triggers script + key1 := "bam" + key2 := "bar" + content1_1 := "cat" + content1_2 := "dog" + t1 := &structs.Template{ + EmbeddedTmpl: ` +FOO={{key "bam"}} +`, + DestPath: "test.env", + ChangeMode: structs.TemplateChangeModeScript, + ChangeScript: &structs.ChangeScript{ + Command: "/bin/foo", + Args: []string{}, + Timeout: 5 * time.Second, + FailOnError: true, + }, + Envvars: true, + } + t2 := &structs.Template{ + EmbeddedTmpl: ` +BAR={{key "bar"}} +`, + DestPath: "test2.env", + ChangeMode: structs.TemplateChangeModeScript, + ChangeScript: &structs.ChangeScript{ + Command: "/bin/foo", + Args: []string{}, + Timeout: 5 * time.Second, + FailOnError: false, + }, + Envvars: true, + } + + me := mockExecutor{DesiredExit: 1, DesiredErr: fmt.Errorf("Script failed")} + harness := newTestHarness(t, []*structs.Template{t1, t2}, true, false) + harness.start(t) + harness.manager.SetDriverHandle(&me) + defer harness.stop() + + // Ensure no unblock + select { + case <-harness.mockHooks.UnblockCh: + require.Fail("Task unblock should not have been called") + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + } + + // Write the key to Consul + harness.consul.SetKV(t, key1, []byte(content1_1)) + harness.consul.SetKV(t, key2, []byte(content1_1)) + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + require.Fail("Task unblock should have been called") + } + + // Update the keys in Consul + harness.consul.SetKV(t, key1, []byte(content1_2)) + + // Wait for kill channel + select { + case <-harness.mockHooks.KillCh: + break + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + require.Fail("Should have received a signals: %+v", harness.mockHooks) + } + + require.NotNil(harness.mockHooks.KillEvent) + require.Contains(harness.mockHooks.KillEvent.DisplayMessage, "task is being killed") +} + // TestTaskTemplateManager_FiltersProcessEnvVars asserts that we only render // environment variables found in task env-vars and not read the nomad host // process environment variables. nomad host process environment variables @@ -1230,7 +1411,7 @@ TEST_ENV_NOT_FOUND: {{env "` + testenv + `_NOTFOUND" }}` // Check the file is there path := filepath.Join(harness.taskDir, file) - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) require.NoError(t, err) require.Equal(t, expected, string(raw)) @@ -1286,7 +1467,7 @@ func TestTaskTemplateManager_Env_Missing(t *testing.T) { d := t.TempDir() // Fake writing the file so we don't have to run the whole template manager - err := ioutil.WriteFile(filepath.Join(d, "exists.env"), []byte("FOO=bar\n"), 0644) + err := os.WriteFile(filepath.Join(d, "exists.env"), []byte("FOO=bar\n"), 0644) if err != nil { t.Fatalf("error writing template file: %v", err) } @@ -1319,7 +1500,7 @@ func TestTaskTemplateManager_Env_InterpolatedDest(t *testing.T) { d := t.TempDir() // Fake writing the file so we don't have to run the whole template manager - err := ioutil.WriteFile(filepath.Join(d, "exists.env"), []byte("FOO=bar\n"), 0644) + err := os.WriteFile(filepath.Join(d, "exists.env"), []byte("FOO=bar\n"), 0644) if err != nil { t.Fatalf("error writing template file: %v", err) } @@ -1354,11 +1535,11 @@ func TestTaskTemplateManager_Env_Multi(t *testing.T) { d := t.TempDir() // Fake writing the files so we don't have to run the whole template manager - err := ioutil.WriteFile(filepath.Join(d, "zzz.env"), []byte("FOO=bar\nSHARED=nope\n"), 0644) + err := os.WriteFile(filepath.Join(d, "zzz.env"), []byte("FOO=bar\nSHARED=nope\n"), 0644) if err != nil { t.Fatalf("error writing template file 1: %v", err) } - err = ioutil.WriteFile(filepath.Join(d, "aaa.env"), []byte("BAR=foo\nSHARED=yup\n"), 0644) + err = os.WriteFile(filepath.Join(d, "aaa.env"), []byte("BAR=foo\nSHARED=yup\n"), 0644) if err != nil { t.Fatalf("error writing template file 2: %v", err) } @@ -1478,7 +1659,7 @@ func TestTaskTemplateManager_Config_ServerName(t *testing.T) { c := config.DefaultConfig() c.Node = mock.Node() c.VaultConfig = &sconfig.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Addr: "https://localhost/", TLSServerName: "notlocalhost", } @@ -1506,7 +1687,7 @@ func TestTaskTemplateManager_Config_VaultNamespace(t *testing.T) { c := config.DefaultConfig() c.Node = mock.Node() c.VaultConfig = &sconfig.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Addr: "https://localhost/", TLSServerName: "notlocalhost", Namespace: testNS, @@ -1537,7 +1718,7 @@ func TestTaskTemplateManager_Config_VaultNamespace_TaskOverride(t *testing.T) { c := config.DefaultConfig() c.Node = mock.Node() c.VaultConfig = &sconfig.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Addr: "https://localhost/", TLSServerName: "notlocalhost", Namespace: testNS, @@ -1923,7 +2104,7 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { clientConfig.Node = mock.Node() clientConfig.VaultConfig = &sconfig.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Namespace: testNS, } @@ -1933,29 +2114,30 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { // helper to reduce boilerplate waitConfig := &config.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), } // helper to reduce boilerplate retryConfig := &config.RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), - MaxBackoff: helper.TimeToPtr(20 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), + MaxBackoff: pointer.Of(20 * time.Second), } - clientConfig.TemplateConfig.MaxStale = helper.TimeToPtr(5 * time.Second) - clientConfig.TemplateConfig.BlockQueryWaitTime = helper.TimeToPtr(60 * time.Second) + clientConfig.TemplateConfig.MaxStale = pointer.Of(5 * time.Second) + clientConfig.TemplateConfig.BlockQueryWaitTime = pointer.Of(60 * time.Second) clientConfig.TemplateConfig.Wait = waitConfig.Copy() clientConfig.TemplateConfig.ConsulRetry = retryConfig.Copy() clientConfig.TemplateConfig.VaultRetry = retryConfig.Copy() + clientConfig.TemplateConfig.NomadRetry = retryConfig.Copy() alloc := mock.Alloc() allocWithOverride := mock.Alloc() allocWithOverride.Job.TaskGroups[0].Tasks[0].Templates = []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(2 * time.Second), - Max: helper.TimeToPtr(12 * time.Second), + Min: pointer.Of(2 * time.Second), + Max: pointer.Of(12 * time.Second), }, }, } @@ -1970,11 +2152,12 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { { "basic-wait-config", &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), + NomadRetry: retryConfig.Copy(), }, &TaskTemplateManagerConfig{ ClientConfig: clientConfig, @@ -1983,29 +2166,31 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { }, &config.Config{ TemplateConfig: &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), + NomadRetry: retryConfig.Copy(), }, }, &templateconfig.TemplateConfig{ Wait: &templateconfig.WaitConfig{ - Enabled: helper.BoolToPtr(true), - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Enabled: pointer.Of(true), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, }, { "template-override", &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), + NomadRetry: retryConfig.Copy(), }, &TaskTemplateManagerConfig{ ClientConfig: clientConfig, @@ -2014,33 +2199,35 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { }, &config.Config{ TemplateConfig: &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), + NomadRetry: retryConfig.Copy(), }, }, &templateconfig.TemplateConfig{ Wait: &templateconfig.WaitConfig{ - Enabled: helper.BoolToPtr(true), - Min: helper.TimeToPtr(2 * time.Second), - Max: helper.TimeToPtr(12 * time.Second), + Enabled: pointer.Of(true), + Min: pointer.Of(2 * time.Second), + Max: pointer.Of(12 * time.Second), }, }, }, { "bounds-override", &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), WaitBounds: &config.WaitConfig{ - Min: helper.TimeToPtr(3 * time.Second), - Max: helper.TimeToPtr(11 * time.Second), + Min: pointer.Of(3 * time.Second), + Max: pointer.Of(11 * time.Second), }, ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), + NomadRetry: retryConfig.Copy(), }, &TaskTemplateManagerConfig{ ClientConfig: clientConfig, @@ -2049,30 +2236,31 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { Templates: []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(2 * time.Second), - Max: helper.TimeToPtr(12 * time.Second), + Min: pointer.Of(2 * time.Second), + Max: pointer.Of(12 * time.Second), }, }, }, }, &config.Config{ TemplateConfig: &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), WaitBounds: &config.WaitConfig{ - Min: helper.TimeToPtr(3 * time.Second), - Max: helper.TimeToPtr(11 * time.Second), + Min: pointer.Of(3 * time.Second), + Max: pointer.Of(11 * time.Second), }, ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), + NomadRetry: retryConfig.Copy(), }, }, &templateconfig.TemplateConfig{ Wait: &templateconfig.WaitConfig{ - Enabled: helper.BoolToPtr(true), - Min: helper.TimeToPtr(3 * time.Second), - Max: helper.TimeToPtr(11 * time.Second), + Enabled: pointer.Of(true), + Min: pointer.Of(3 * time.Second), + Max: pointer.Of(11 * time.Second), }, }, }, @@ -2106,6 +2294,12 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { require.Equal(t, *_case.ExpectedRunnerConfig.TemplateConfig.VaultRetry.Attempts, *runnerConfig.Vault.Retry.Attempts) require.Equal(t, *_case.ExpectedRunnerConfig.TemplateConfig.VaultRetry.Backoff, *runnerConfig.Vault.Retry.Backoff) require.Equal(t, *_case.ExpectedRunnerConfig.TemplateConfig.VaultRetry.MaxBackoff, *runnerConfig.Vault.Retry.MaxBackoff) + // Nomad Retry + require.NotNil(t, runnerConfig.Nomad) + require.NotNil(t, runnerConfig.Nomad.Retry) + require.Equal(t, *_case.ExpectedRunnerConfig.TemplateConfig.NomadRetry.Attempts, *runnerConfig.Nomad.Retry.Attempts) + require.Equal(t, *_case.ExpectedRunnerConfig.TemplateConfig.NomadRetry.Backoff, *runnerConfig.Nomad.Retry.Backoff) + require.Equal(t, *_case.ExpectedRunnerConfig.TemplateConfig.NomadRetry.MaxBackoff, *runnerConfig.Nomad.Retry.MaxBackoff) // Test that wait_bounds are enforced for _, tmpl := range *runnerConfig.Templates { @@ -2135,8 +2329,8 @@ func TestTaskTemplateManager_Template_Wait_Set(t *testing.T) { Templates: []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, }, @@ -2185,7 +2379,7 @@ func TestTaskTemplateManager_writeToFile_Disabled(t *testing.T) { // Check the file is not there path := filepath.Join(harness.taskDir, file) - _, err := ioutil.ReadFile(path) + _, err := os.ReadFile(path) require.Error(t, err) } @@ -2238,13 +2432,13 @@ func TestTaskTemplateManager_writeToFile(t *testing.T) { // Check the templated file is there path := filepath.Join(harness.taskDir, file) - r, err := ioutil.ReadFile(path) + r, err := os.ReadFile(path) require.NoError(t, err) require.True(t, bytes.HasSuffix(r, []byte("...done\n")), string(r)) // Check that writeToFile was allowed path = filepath.Join(harness.taskDir, "writetofile.out") - r, err = ioutil.ReadFile(path) + r, err = os.ReadFile(path) require.NoError(t, err) require.Equal(t, "hello", string(r)) } diff --git a/client/allocrunner/taskrunner/template_hook.go b/client/allocrunner/taskrunner/template_hook.go index cdc6ee16f7b..4e14fcc43f2 100644 --- a/client/allocrunner/taskrunner/template_hook.go +++ b/client/allocrunner/taskrunner/template_hook.go @@ -115,6 +115,19 @@ func (h *templateHook) Prestart(ctx context.Context, req *interfaces.TaskPrestar return nil } +func (h *templateHook) Poststart(ctx context.Context, req *interfaces.TaskPoststartRequest, resp *interfaces.TaskPoststartResponse) error { + if req.DriverExec != nil { + h.templateManager.SetDriverHandle(req.DriverExec) + } else { + for _, template := range h.config.templates { + if template.ChangeMode == structs.TemplateChangeModeScript { + return fmt.Errorf("template has change mode set to 'script' but the driver it uses does not provide exec capability") + } + } + } + return nil +} + func (h *templateHook) newManager() (unblock chan struct{}, err error) { unblock = make(chan struct{}) m, err := template.NewTaskTemplateManager(&template.TaskTemplateManagerConfig{ diff --git a/client/allocrunner/testing.go b/client/allocrunner/testing.go index 6f2fd7b03df..44e3eb52479 100644 --- a/client/allocrunner/testing.go +++ b/client/allocrunner/testing.go @@ -4,6 +4,7 @@ package allocrunner import ( + "fmt" "sync" "testing" @@ -20,6 +21,7 @@ import ( "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/client/vaultclient" "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) @@ -104,3 +106,13 @@ func TestAllocRunnerFromAlloc(t *testing.T, alloc *structs.Allocation) (*allocRu return ar, cleanup } + +func WaitForClientState(t *testing.T, ar *allocRunner, state string) { + testutil.WaitForResult(func() (bool, error) { + got := ar.AllocState().ClientStatus + return got == state, + fmt.Errorf("expected alloc runner to be in state %s, got %s", state, got) + }, func(err error) { + require.NoError(t, err) + }) +} diff --git a/client/client.go b/client/client.go index 5d2194d3a8e..9e8253132ee 100644 --- a/client/client.go +++ b/client/client.go @@ -47,6 +47,7 @@ import ( "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/envoy" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/pool" hstats "github.com/hashicorp/nomad/helper/stats" "github.com/hashicorp/nomad/helper/tlsutil" @@ -136,7 +137,7 @@ type ClientStatsReporter interface { } // AllocRunner is the interface implemented by the core alloc runner. -//TODO Create via factory to allow testing Client with mock AllocRunners. +// TODO Create via factory to allow testing Client with mock AllocRunners. type AllocRunner interface { Alloc() *structs.Allocation AllocState() *arstate.State @@ -159,6 +160,7 @@ type AllocRunner interface { PersistState() error RestartTask(taskName string, taskEvent *structs.TaskEvent) error + RestartRunning(taskEvent *structs.TaskEvent) error RestartAll(taskEvent *structs.TaskEvent) error Reconnect(update *structs.Allocation) error @@ -170,15 +172,22 @@ type AllocRunner interface { // are expected to register as a schedule-able node to the servers, and to // run allocations as determined by the servers. type Client struct { - config *config.Config - start time.Time + start time.Time // stateDB is used to efficiently store client state. stateDB state.StateDB - // configCopy is a copy that should be passed to alloc-runners. - configCopy *config.Config - configLock sync.RWMutex + // config must only be accessed with lock held. To update the config, use the + // Client.UpdateConfig() helper. If you need more fine grained control use + // the following pattern: + // + // c.configLock.Lock() + // newConfig := c.config.Copy() + // // + // c.config = newConfig + // c.configLock.Unlock() + config *config.Config + configLock sync.Mutex logger hclog.InterceptLogger rpcLogger hclog.Logger @@ -432,14 +441,8 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxie return nil, fmt.Errorf("node setup failed: %v", err) } - // Store the config copy before restoring state but after it has been - // initialized. - c.configLock.Lock() - c.configCopy = c.config.Copy() - c.configLock.Unlock() - fingerprintManager := NewFingerprintManager( - c.configCopy.PluginSingletonLoader, c.GetConfig, c.configCopy.Node, + cfg.PluginSingletonLoader, c.GetConfig, cfg.Node, c.shutdownCh, c.updateNodeFromFingerprint, c.logger) c.pluginManagers = pluginmanager.New(c.logger) @@ -468,8 +471,8 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxie // Setup the driver manager driverConfig := &drivermanager.Config{ Logger: c.logger, - Loader: c.configCopy.PluginSingletonLoader, - PluginConfig: c.configCopy.NomadPluginConfig(), + Loader: cfg.PluginSingletonLoader, + PluginConfig: cfg.NomadPluginConfig(), Updater: c.batchNodeUpdates.updateNodeFromDriver, EventHandlerFactory: c.GetTaskEventHandler, State: c.stateDB, @@ -483,10 +486,10 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxie // Setup the device manager devConfig := &devicemanager.Config{ Logger: c.logger, - Loader: c.configCopy.PluginSingletonLoader, - PluginConfig: c.configCopy.NomadPluginConfig(), + Loader: cfg.PluginSingletonLoader, + PluginConfig: cfg.NomadPluginConfig(), Updater: c.batchNodeUpdates.updateNodeFromDevices, - StatsInterval: c.configCopy.StatsCollectionInterval, + StatsInterval: cfg.StatsCollectionInterval, State: c.stateDB, } devManager := devicemanager.New(devConfig) @@ -512,7 +515,7 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxie go c.heartbeatStop.watch() // Add the stats collector - statsCollector := stats.NewHostStatsCollector(c.logger, c.config.AllocDir, c.devicemanager.AllStats) + statsCollector := stats.NewHostStatsCollector(c.logger, c.GetConfig().AllocDir, c.devicemanager.AllStats) c.hostStatsCollector = statsCollector // Add the garbage collector @@ -528,16 +531,14 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxie go c.garbageCollector.Run() // Set the preconfigured list of static servers - c.configLock.RLock() - if len(c.configCopy.Servers) > 0 { - if _, err := c.setServersImpl(c.configCopy.Servers, true); err != nil { + if len(cfg.Servers) > 0 { + if _, err := c.setServersImpl(cfg.Servers, true); err != nil { logger.Warn("none of the configured servers are valid", "error", err) } } - c.configLock.RUnlock() // Setup Consul discovery if enabled - if c.configCopy.ConsulConfig.ClientAutoJoin != nil && *c.configCopy.ConsulConfig.ClientAutoJoin { + if cfg.ConsulConfig.ClientAutoJoin != nil && *cfg.ConsulConfig.ClientAutoJoin { c.shutdownGroup.Go(c.consulDiscovery) if c.servers.NumServers() == 0 { // No configured servers; trigger discovery manually @@ -571,7 +572,7 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxie "The safest way to proceed is to manually stop running task processes "+ "and remove Nomad's state and alloc directories before "+ "restarting. Lost allocations will be rescheduled.", - "state_dir", c.config.StateDir, "alloc_dir", c.config.AllocDir) + "state_dir", cfg.StateDir, "alloc_dir", cfg.AllocDir) logger.Error("Corrupt state is often caused by a bug. Please " + "report as much information as possible to " + "https://github.com/hashicorp/nomad/issues") @@ -605,8 +606,9 @@ func (c *Client) Ready() <-chan struct{} { // needed before we begin starting its various components. func (c *Client) init() error { // Ensure the state dir exists if we have one - if c.config.StateDir != "" { - if err := os.MkdirAll(c.config.StateDir, 0700); err != nil { + conf := c.GetConfig() + if conf.StateDir != "" { + if err := os.MkdirAll(conf.StateDir, 0700); err != nil { return fmt.Errorf("failed creating state dir: %s", err) } @@ -622,12 +624,14 @@ func (c *Client) init() error { return fmt.Errorf("failed to find temporary directory for the StateDir: %v", err) } - c.config.StateDir = p + conf = c.UpdateConfig(func(c *config.Config) { + c.StateDir = p + }) } - c.logger.Info("using state directory", "state_dir", c.config.StateDir) + c.logger.Info("using state directory", "state_dir", conf.StateDir) // Open the state database - db, err := c.config.StateDBFactory(c.logger, c.config.StateDir) + db, err := conf.StateDBFactory(c.logger, conf.StateDir) if err != nil { return fmt.Errorf("failed to open state database: %v", err) } @@ -645,8 +649,8 @@ func (c *Client) init() error { c.stateDB = db // Ensure the alloc dir exists if we have one - if c.config.AllocDir != "" { - if err := os.MkdirAll(c.config.AllocDir, 0711); err != nil { + if conf.AllocDir != "" { + if err := os.MkdirAll(conf.AllocDir, 0711); err != nil { return fmt.Errorf("failed creating alloc dir: %s", err) } } else { @@ -666,30 +670,36 @@ func (c *Client) init() error { return fmt.Errorf("failed to change directory permissions for the AllocDir: %v", err) } - c.config.AllocDir = p + conf = c.UpdateConfig(func(c *config.Config) { + c.AllocDir = p + }) } - c.logger.Info("using alloc directory", "alloc_dir", c.config.AllocDir) + c.logger.Info("using alloc directory", "alloc_dir", conf.AllocDir) reserved := "" - if c.config.Node != nil && c.config.Node.ReservedResources != nil { + if conf.Node != nil && conf.Node.ReservedResources != nil { // Node should always be non-nil due to initialization in the // agent package, but don't risk a panic just for a long line. - reserved = c.config.Node.ReservedResources.Networks.ReservedHostPorts + reserved = conf.Node.ReservedResources.Networks.ReservedHostPorts } c.logger.Info("using dynamic ports", - "min", c.config.MinDynamicPort, - "max", c.config.MaxDynamicPort, + "min", conf.MinDynamicPort, + "max", conf.MaxDynamicPort, "reserved", reserved, ) // Ensure cgroups are created on linux platform if runtime.GOOS == "linux" && c.cpusetManager != nil { // use the client configuration for reservable_cores if set - cores := c.config.ReservableCores + cores := conf.ReservableCores if len(cores) == 0 { // otherwise lookup the effective cores from the parent cgroup - cores, _ = cgutil.GetCPUsFromCgroup(c.config.CgroupParent) + cores, err = cgutil.GetCPUsFromCgroup(conf.CgroupParent) + if err != nil { + c.logger.Warn("failed to lookup cpuset from cgroup parent, and not set as reservable_cores", "parent", conf.CgroupParent) + // will continue with a disabled cpuset manager + } } if cpuErr := c.cpusetManager.Init(cores); cpuErr != nil { // If the client cannot initialize the cgroup then reserved cores will not be reported and the cpuset manager @@ -728,9 +738,9 @@ func (c *Client) reloadTLSConnections(newConfig *nconfig.TLSConfig) error { // Keep the client configuration up to date as we use configuration values to // decide on what type of connections to accept - c.configLock.Lock() - c.config.TLSConfig = newConfig - c.configLock.Unlock() + c.UpdateConfig(func(c *config.Config) { + c.TLSConfig = newConfig + }) c.connPool.ReloadTLS(tlsWrap) @@ -739,7 +749,8 @@ func (c *Client) reloadTLSConnections(newConfig *nconfig.TLSConfig) error { // Reload allows a client to reload its configuration on the fly func (c *Client) Reload(newConfig *config.Config) error { - shouldReloadTLS, err := tlsutil.ShouldReloadRPCConnections(c.config.TLSConfig, newConfig.TLSConfig) + existing := c.GetConfig() + shouldReloadTLS, err := tlsutil.ShouldReloadRPCConnections(existing.TLSConfig, newConfig.TLSConfig) if err != nil { c.logger.Error("error parsing TLS configuration", "error", err) return err @@ -758,31 +769,50 @@ func (c *Client) Leave() error { return nil } -// GetConfig returns the config of the client +// GetConfig returns the config of the client. Do *not* mutate without first +// calling Copy(). func (c *Client) GetConfig() *config.Config { c.configLock.Lock() defer c.configLock.Unlock() - return c.configCopy + return c.config +} + +// UpdateConfig allows mutating the configuration. The updated configuration is +// returned. +func (c *Client) UpdateConfig(cb func(*config.Config)) *config.Config { + c.configLock.Lock() + defer c.configLock.Unlock() + + // Create a copy of the active config + newConfig := c.config.Copy() + + // Pass the copy to the supplied callback for mutation + cb(newConfig) + + // Set new config struct + c.config = newConfig + + return newConfig } // Datacenter returns the datacenter for the given client func (c *Client) Datacenter() string { - return c.config.Node.Datacenter + return c.GetConfig().Node.Datacenter } // Region returns the region for the given client func (c *Client) Region() string { - return c.config.Region + return c.GetConfig().Region } // NodeID returns the node ID for the given client func (c *Client) NodeID() string { - return c.config.Node.ID + return c.GetConfig().Node.ID } // secretNodeID returns the secret node ID for the given client func (c *Client) secretNodeID() string { - return c.config.Node.SecretID + return c.GetConfig().Node.SecretID } // Shutdown is used to tear down the client @@ -805,7 +835,7 @@ func (c *Client) Shutdown() error { c.garbageCollector.Stop() arGroup := group{} - if c.config.DevMode { + if c.GetConfig().DevMode { // In DevMode destroy all the running allocations. for _, ar := range c.getAllocRunners() { ar.Destroy() @@ -896,27 +926,36 @@ func (c *Client) CollectAllAllocs() { c.garbageCollector.CollectAll() } -func (c *Client) RestartAllocation(allocID, taskName string) error { +func (c *Client) RestartAllocation(allocID, taskName string, allTasks bool) error { + if allTasks && taskName != "" { + return fmt.Errorf("task name cannot be set when restarting all tasks") + } + ar, err := c.getAllocRunner(allocID) if err != nil { return err } - event := structs.NewTaskEvent(structs.TaskRestartSignal). - SetRestartReason("User requested restart") - if taskName != "" { + event := structs.NewTaskEvent(structs.TaskRestartSignal). + SetRestartReason("User requested task to restart") return ar.RestartTask(taskName, event) } - return ar.RestartAll(event) + if allTasks { + event := structs.NewTaskEvent(structs.TaskRestartSignal). + SetRestartReason("User requested all tasks to restart") + return ar.RestartAll(event) + } + + event := structs.NewTaskEvent(structs.TaskRestartSignal). + SetRestartReason("User requested running tasks to restart") + return ar.RestartRunning(event) } // Node returns the locally registered node func (c *Client) Node() *structs.Node { - c.configLock.RLock() - defer c.configLock.RUnlock() - return c.configCopy.Node + return c.GetConfig().Node } // getAllocRunner returns an AllocRunner or an UnknownAllocation error if the @@ -1012,11 +1051,12 @@ func (c *Client) computeAllocatedDeviceGroupStats(devices []*structs.AllocatedDe // allocation, and has been created by a trusted party that has privileged // knowledge of the client's secret identifier func (c *Client) ValidateMigrateToken(allocID, migrateToken string) bool { - if !c.config.ACLEnabled { + conf := c.GetConfig() + if !conf.ACLEnabled { return true } - return structs.CompareMigrateToken(allocID, c.secretNodeID(), migrateToken) + return structs.CompareMigrateToken(allocID, conf.Node.SecretID, migrateToken) } // GetAllocFS returns the AllocFS interface for the alloc dir of an allocation @@ -1119,7 +1159,8 @@ func (c *Client) setServersImpl(in []string, force bool) (int, error) { // If there are errors restoring a specific allocation it is marked // as failed whenever possible. func (c *Client) restoreState() error { - if c.config.DevMode { + conf := c.GetConfig() + if conf.DevMode { return nil } @@ -1163,11 +1204,10 @@ func (c *Client) restoreState() error { prevAllocWatcher := allocwatcher.NoopPrevAlloc{} prevAllocMigrator := allocwatcher.NoopPrevAlloc{} - c.configLock.RLock() arConf := &allocrunner.Config{ Alloc: alloc, Logger: c.logger, - ClientConfig: c.configCopy, + ClientConfig: conf, StateDB: c.stateDB, StateUpdater: c, DeviceStatsReporter: c, @@ -1188,7 +1228,6 @@ func (c *Client) restoreState() error { RPCClient: c, Getter: c.getter, } - c.configLock.RUnlock() ar, err := allocrunner.NewAllocRunner(arConf) if err != nil { @@ -1248,8 +1287,8 @@ func (c *Client) restoreState() error { // wait until it gets allocs from server to launch them. // // See: -// * https://github.com/hashicorp/nomad/pull/6207 -// * https://github.com/hashicorp/nomad/issues/5984 +// - https://github.com/hashicorp/nomad/pull/6207 +// - https://github.com/hashicorp/nomad/issues/5984 // // COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported func (c *Client) hasLocalState(alloc *structs.Allocation) bool { @@ -1332,13 +1371,13 @@ func (c *Client) NumAllocs() int { return n } -// nodeID restores, or generates if necessary, a unique node ID and SecretID. -// The node ID is, if available, a persistent unique ID. The secret ID is a -// high-entropy random UUID. -func (c *Client) nodeID() (id, secret string, err error) { +// ensureNodeID restores, or generates if necessary, a unique node ID and +// SecretID. The node ID is, if available, a persistent unique ID. The secret +// ID is a high-entropy random UUID. +func ensureNodeID(conf *config.Config) (id, secret string, err error) { var hostID string hostInfo, err := host.Info() - if !c.config.NoHostUUID && err == nil { + if !conf.NoHostUUID && err == nil { if hashed, ok := helper.HashUUID(hostInfo.HostID); ok { hostID = hashed } @@ -1351,19 +1390,19 @@ func (c *Client) nodeID() (id, secret string, err error) { } // Do not persist in dev mode - if c.config.DevMode { + if conf.DevMode { return hostID, uuid.Generate(), nil } // Attempt to read existing ID - idPath := filepath.Join(c.config.StateDir, "client-id") + idPath := filepath.Join(conf.StateDir, "client-id") idBuf, err := ioutil.ReadFile(idPath) if err != nil && !os.IsNotExist(err) { return "", "", err } // Attempt to read existing secret ID - secretPath := filepath.Join(c.config.StateDir, "secret-id") + secretPath := filepath.Join(conf.StateDir, "secret-id") secretBuf, err := ioutil.ReadFile(secretPath) if err != nil && !os.IsNotExist(err) { return "", "", err @@ -1398,13 +1437,18 @@ func (c *Client) nodeID() (id, secret string, err error) { // setupNode is used to setup the initial node func (c *Client) setupNode() error { - node := c.config.Node + c.configLock.Lock() + defer c.configLock.Unlock() + + newConfig := c.config.Copy() + node := newConfig.Node if node == nil { node = &structs.Node{} - c.config.Node = node + newConfig.Node = node } + // Generate an ID and secret for the node - id, secretID, err := c.nodeID() + id, secretID, err := ensureNodeID(newConfig) if err != nil { return fmt.Errorf("node ID setup failed: %v", err) } @@ -1431,8 +1475,8 @@ func (c *Client) setupNode() error { } if node.NodeResources == nil { node.NodeResources = &structs.NodeResources{} - node.NodeResources.MinDynamicPort = c.config.MinDynamicPort - node.NodeResources.MaxDynamicPort = c.config.MaxDynamicPort + node.NodeResources.MinDynamicPort = newConfig.MinDynamicPort + node.NodeResources.MaxDynamicPort = newConfig.MaxDynamicPort } if node.ReservedResources == nil { node.ReservedResources = &structs.NodeReservedResources{} @@ -1449,11 +1493,11 @@ func (c *Client) setupNode() error { if node.Name == "" { node.Name, _ = os.Hostname() } - node.CgroupParent = c.config.CgroupParent + node.CgroupParent = newConfig.CgroupParent if node.HostVolumes == nil { - if l := len(c.config.HostVolumes); l != 0 { + if l := len(newConfig.HostVolumes); l != 0 { node.HostVolumes = make(map[string]*structs.ClientHostVolumeConfig, l) - for k, v := range c.config.HostVolumes { + for k, v := range newConfig.HostVolumes { if _, err := os.Stat(v.Path); err != nil { return fmt.Errorf("failed to validate volume %s, err: %v", v.Name, err) } @@ -1462,9 +1506,9 @@ func (c *Client) setupNode() error { } } if node.HostNetworks == nil { - if l := len(c.config.HostNetworks); l != 0 { + if l := len(newConfig.HostNetworks); l != 0 { node.HostNetworks = make(map[string]*structs.ClientHostNetworkConfig, l) - for k, v := range c.config.HostNetworks { + for k, v := range newConfig.HostNetworks { node.HostNetworks[k] = v.Copy() } } @@ -1489,6 +1533,7 @@ func (c *Client) setupNode() error { node.Meta["connect.proxy_concurrency"] = defaultConnectProxyConcurrency } + c.config = newConfig return nil } @@ -1499,34 +1544,35 @@ func (c *Client) updateNodeFromFingerprint(response *fingerprint.FingerprintResp defer c.configLock.Unlock() nodeHasChanged := false + newConfig := c.config.Copy() for name, newVal := range response.Attributes { - oldVal := c.config.Node.Attributes[name] + oldVal := newConfig.Node.Attributes[name] if oldVal == newVal { continue } nodeHasChanged = true if newVal == "" { - delete(c.config.Node.Attributes, name) + delete(newConfig.Node.Attributes, name) } else { - c.config.Node.Attributes[name] = newVal + newConfig.Node.Attributes[name] = newVal } } // update node links and resources from the diff created from // fingerprinting for name, newVal := range response.Links { - oldVal := c.config.Node.Links[name] + oldVal := newConfig.Node.Links[name] if oldVal == newVal { continue } nodeHasChanged = true if newVal == "" { - delete(c.config.Node.Links, name) + delete(newConfig.Node.Links, name) } else { - c.config.Node.Links[name] = newVal + newConfig.Node.Links[name] = newVal } } @@ -1536,9 +1582,9 @@ func (c *Client) updateNodeFromFingerprint(response *fingerprint.FingerprintResp if response.Resources != nil { response.Resources.Networks = updateNetworks( response.Resources.Networks, - c.config) - if !c.config.Node.Resources.Equals(response.Resources) { - c.config.Node.Resources.Merge(response.Resources) + newConfig) + if !newConfig.Node.Resources.Equals(response.Resources) { + newConfig.Node.Resources.Merge(response.Resources) nodeHasChanged = true } } @@ -1548,26 +1594,27 @@ func (c *Client) updateNodeFromFingerprint(response *fingerprint.FingerprintResp if response.NodeResources != nil { response.NodeResources.Networks = updateNetworks( response.NodeResources.Networks, - c.config) - if !c.config.Node.NodeResources.Equals(response.NodeResources) { - c.config.Node.NodeResources.Merge(response.NodeResources) + newConfig) + if !newConfig.Node.NodeResources.Equals(response.NodeResources) { + newConfig.Node.NodeResources.Merge(response.NodeResources) nodeHasChanged = true } - response.NodeResources.MinDynamicPort = c.config.MinDynamicPort - response.NodeResources.MaxDynamicPort = c.config.MaxDynamicPort - if c.config.Node.NodeResources.MinDynamicPort != response.NodeResources.MinDynamicPort || - c.config.Node.NodeResources.MaxDynamicPort != response.NodeResources.MaxDynamicPort { + response.NodeResources.MinDynamicPort = newConfig.MinDynamicPort + response.NodeResources.MaxDynamicPort = newConfig.MaxDynamicPort + if newConfig.Node.NodeResources.MinDynamicPort != response.NodeResources.MinDynamicPort || + newConfig.Node.NodeResources.MaxDynamicPort != response.NodeResources.MaxDynamicPort { nodeHasChanged = true } } if nodeHasChanged { - c.updateNodeLocked() + c.config = newConfig + c.updateNode() } - return c.configCopy.Node + return newConfig.Node } // updateNetworks filters and overrides network speed of host networks based @@ -1608,7 +1655,7 @@ func updateNetworks(up structs.Networks, c *config.Config) structs.Networks { // retryIntv calculates a retry interval value given the base func (c *Client) retryIntv(base time.Duration) time.Duration { - if c.config.DevMode { + if c.GetConfig().DevMode { return devModeRetryIntv } return base + helper.RandomStagger(base) @@ -1630,7 +1677,7 @@ func (c *Client) registerAndHeartbeat() { // we want to do this quickly. We want to do it extra quickly // in development mode. var heartbeat <-chan time.Time - if c.config.DevMode { + if c.GetConfig().DevMode { heartbeat = time.After(0) } else { heartbeat = time.After(helper.RandomStagger(initialHeartbeatStagger)) @@ -1674,7 +1721,7 @@ func (c *Client) lastHeartbeat() time.Time { // getHeartbeatRetryIntv is used to retrieve the time to wait before attempting // another heartbeat. func (c *Client) getHeartbeatRetryIntv(err error) time.Duration { - if c.config.DevMode { + if c.GetConfig().DevMode { return devModeRetryIntv } @@ -1860,9 +1907,8 @@ func (c *Client) retryRegisterNode() { // registerNode is used to register the node or update the registration func (c *Client) registerNode() error { - node := c.Node() req := structs.NodeRegisterRequest{ - Node: node, + Node: c.Node(), WriteRequest: structs.WriteRequest{Region: c.Region()}, } var resp structs.NodeUpdateResponse @@ -1871,10 +1917,9 @@ func (c *Client) registerNode() error { } // Update the node status to ready after we register. - c.configLock.Lock() - node.Status = structs.NodeStatusReady - c.config.Node.Status = structs.NodeStatusReady - c.configLock.Unlock() + c.UpdateConfig(func(c *config.Config) { + c.Node.Status = structs.NodeStatusReady + }) c.logger.Info("node registration complete") if len(resp.EvalIDs) != 0 { @@ -2257,14 +2302,9 @@ OUTER: } } -// updateNode updates the Node copy and triggers the client to send the updated -// Node to the server. This should be done while the caller holds the -// configLock lock. -func (c *Client) updateNodeLocked() { - // Update the config copy. - node := c.config.Node.Copy() - c.configCopy.Node = node - +// updateNode signals the client to send the updated +// Node to the server. +func (c *Client) updateNode() { select { case c.triggerNodeUpdate <- struct{}{}: // Node update goroutine was released to execute @@ -2382,7 +2422,7 @@ func makeFailedAlloc(add *structs.Allocation, err error) *structs.Allocation { stripped.DeploymentStatus = add.DeploymentStatus.Copy() } else { stripped.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Timestamp: failTime, } } @@ -2494,20 +2534,16 @@ func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error PreviousRunner: c.allocs[alloc.PreviousAllocation], PreemptedRunners: preemptedAllocs, RPC: c, - Config: c.configCopy, + Config: c.GetConfig(), MigrateToken: migrateToken, Logger: c.logger, } prevAllocWatcher, prevAllocMigrator := allocwatcher.NewAllocWatcher(watcherConfig) - // Copy the config since the node can be swapped out as it is being updated. - // The long term fix is to pass in the config and node separately and then - // we don't have to do a copy. - c.configLock.RLock() arConf := &allocrunner.Config{ Alloc: alloc, Logger: c.logger, - ClientConfig: c.configCopy, + ClientConfig: c.GetConfig(), StateDB: c.stateDB, Consul: c.consulService, ConsulProxies: c.consulProxies, @@ -2527,7 +2563,6 @@ func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error RPCClient: c, Getter: c.getter, } - c.configLock.RUnlock() ar, err := allocrunner.NewAllocRunner(arConf) if err != nil { @@ -2556,7 +2591,7 @@ func (c *Client) setupConsulTokenClient() error { // with vault. func (c *Client) setupVaultClient() error { var err error - c.vaultClient, err = vaultclient.NewVaultClient(c.config.VaultConfig, c.logger, c.deriveToken) + c.vaultClient, err = vaultclient.NewVaultClient(c.GetConfig().VaultConfig, c.logger, c.deriveToken) if err != nil { return err } @@ -2577,7 +2612,7 @@ func (c *Client) setupVaultClient() error { func (c *Client) setupNomadServiceRegistrationHandler() { cfg := nsd.ServiceRegistrationHandlerCfg{ Datacenter: c.Datacenter(), - Enabled: c.config.NomadServiceDiscovery, + Enabled: c.GetConfig().NomadServiceDiscovery, NodeID: c.NodeID(), NodeSecret: c.secretNodeID(), Region: c.Region(), @@ -2757,7 +2792,8 @@ func taskIsPresent(taskName string, tasks []*structs.Task) bool { // triggerDiscovery causes a Consul discovery to begin (if one hasn't already) func (c *Client) triggerDiscovery() { - if c.configCopy.ConsulConfig.ClientAutoJoin != nil && *c.configCopy.ConsulConfig.ClientAutoJoin { + config := c.GetConfig() + if config.ConsulConfig.ClientAutoJoin != nil && *config.ConsulConfig.ClientAutoJoin { select { case c.triggerDiscoveryCh <- struct{}{}: // Discovery goroutine was released to execute @@ -2810,7 +2846,7 @@ func (c *Client) consulDiscoveryImpl() error { }, } - serviceName := c.configCopy.ConsulConfig.ServerServiceName + serviceName := c.GetConfig().ConsulConfig.ServerServiceName var mErr multierror.Error var nomadServers servers.Servers consulLogger.Debug("bootstrap contacting Consul DCs", "consul_dcs", dcs) @@ -2904,13 +2940,14 @@ func (c *Client) emitStats() { next := time.NewTimer(0) defer next.Stop() for { + config := c.GetConfig() select { case <-next.C: err := c.hostStatsCollector.Collect() - next.Reset(c.config.StatsCollectionInterval) + next.Reset(config.StatsCollectionInterval) if err != nil { c.logger.Warn("error fetching host resource usage stats", "error", err) - } else if c.config.PublishNodeMetrics { + } else if config.PublishNodeMetrics { // Publish Node metrics if operator has opted in c.emitHostStats() } @@ -2971,9 +3008,7 @@ func (c *Client) setGaugeForDiskStats(nodeID string, hStats *stats.HostStats, ba // setGaugeForAllocationStats proxies metrics for allocation specific statistics func (c *Client) setGaugeForAllocationStats(nodeID string, baseLabels []metrics.Label) { - c.configLock.RLock() - node := c.configCopy.Node - c.configLock.RUnlock() + node := c.GetConfig().Node total := node.NodeResources res := node.ReservedResources allocated := c.getAllocatedResources(node) @@ -3072,14 +3107,11 @@ func (c *Client) emitClientMetrics() { // labels takes the base labels and appends the node state func (c *Client) labels() []metrics.Label { - c.configLock.RLock() - nodeStatus := c.configCopy.Node.Status - nodeEligibility := c.configCopy.Node.SchedulingEligibility - c.configLock.RUnlock() + node := c.Node() return append(c.baseLabels, - metrics.Label{Name: "node_status", Value: nodeStatus}, - metrics.Label{Name: "node_scheduling_eligibility", Value: nodeEligibility}, + metrics.Label{Name: "node_status", Value: node.Status}, + metrics.Label{Name: "node_scheduling_eligibility", Value: node.SchedulingEligibility}, ) } diff --git a/client/client_test.go b/client/client_test.go index 8d7fa6c435c..f41021a754d 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -191,63 +191,55 @@ func TestClient_Fingerprint_Periodic(t *testing.T) { }) defer cleanup() - node := c1.config.Node - { - // Ensure the mock driver is registered on the client - testutil.WaitForResult(func() (bool, error) { - c1.configLock.Lock() - defer c1.configLock.Unlock() + // Ensure the mock driver is registered on the client + testutil.WaitForResult(func() (bool, error) { + node := c1.Node() - // assert that the driver is set on the node attributes - mockDriverInfoAttr := node.Attributes["driver.mock_driver"] - if mockDriverInfoAttr == "" { - return false, fmt.Errorf("mock driver is empty when it should be set on the node attributes") - } + // assert that the driver is set on the node attributes + mockDriverInfoAttr := node.Attributes["driver.mock_driver"] + if mockDriverInfoAttr == "" { + return false, fmt.Errorf("mock driver is empty when it should be set on the node attributes") + } - mockDriverInfo := node.Drivers["mock_driver"] + mockDriverInfo := node.Drivers["mock_driver"] - // assert that the Driver information for the node is also set correctly - if mockDriverInfo == nil { - return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers") - } - if !mockDriverInfo.Detected { - return false, fmt.Errorf("mock driver should be set as detected") - } - if !mockDriverInfo.Healthy { - return false, fmt.Errorf("mock driver should be set as healthy") - } - if mockDriverInfo.HealthDescription == "" { - return false, fmt.Errorf("mock driver description should not be empty") - } - return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) - } + // assert that the Driver information for the node is also set correctly + if mockDriverInfo == nil { + return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers") + } + if !mockDriverInfo.Detected { + return false, fmt.Errorf("mock driver should be set as detected") + } + if !mockDriverInfo.Healthy { + return false, fmt.Errorf("mock driver should be set as healthy") + } + if mockDriverInfo.HealthDescription == "" { + return false, fmt.Errorf("mock driver description should not be empty") + } + return true, nil + }, func(err error) { + t.Fatalf("err: %v", err) + }) - { - testutil.WaitForResult(func() (bool, error) { - c1.configLock.Lock() - defer c1.configLock.Unlock() - mockDriverInfo := node.Drivers["mock_driver"] - // assert that the Driver information for the node is also set correctly - if mockDriverInfo == nil { - return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers") - } - if mockDriverInfo.Detected { - return false, fmt.Errorf("mock driver should not be set as detected") - } - if mockDriverInfo.Healthy { - return false, fmt.Errorf("mock driver should not be set as healthy") - } - if mockDriverInfo.HealthDescription == "" { - return false, fmt.Errorf("mock driver description should not be empty") - } - return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) - } + testutil.WaitForResult(func() (bool, error) { + mockDriverInfo := c1.Node().Drivers["mock_driver"] + // assert that the Driver information for the node is also set correctly + if mockDriverInfo == nil { + return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers") + } + if mockDriverInfo.Detected { + return false, fmt.Errorf("mock driver should not be set as detected") + } + if mockDriverInfo.Healthy { + return false, fmt.Errorf("mock driver should not be set as healthy") + } + if mockDriverInfo.HealthDescription == "" { + return false, fmt.Errorf("mock driver description should not be empty") + } + return true, nil + }, func(err error) { + t.Fatalf("err: %v", err) + }) } // TestClient_MixedTLS asserts that when a server is running with TLS enabled @@ -1115,17 +1107,18 @@ func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) { }) // initial check + conf := client.GetConfig() expectedResources := &structs.NodeResources{ // computed through test client initialization - Networks: client.configCopy.Node.NodeResources.Networks, - NodeNetworks: client.configCopy.Node.NodeResources.NodeNetworks, - Disk: client.configCopy.Node.NodeResources.Disk, + Networks: conf.Node.NodeResources.Networks, + NodeNetworks: conf.Node.NodeResources.NodeNetworks, + Disk: conf.Node.NodeResources.Disk, // injected Cpu: structs.NodeCpuResources{ CpuShares: 123, - ReservableCpuCores: client.configCopy.Node.NodeResources.Cpu.ReservableCpuCores, - TotalCpuCores: client.configCopy.Node.NodeResources.Cpu.TotalCpuCores, + ReservableCpuCores: conf.Node.NodeResources.Cpu.ReservableCpuCores, + TotalCpuCores: conf.Node.NodeResources.Cpu.TotalCpuCores, }, Memory: structs.NodeMemoryResources{MemoryMB: 1024}, Devices: []*structs.NodeDeviceResource{ @@ -1136,7 +1129,7 @@ func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) { }, } - assert.EqualValues(t, expectedResources, client.configCopy.Node.NodeResources) + assert.EqualValues(t, expectedResources, conf.Node.NodeResources) // overrides of values @@ -1157,17 +1150,19 @@ func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) { }, }) + conf = client.GetConfig() + expectedResources2 := &structs.NodeResources{ // computed through test client initialization - Networks: client.configCopy.Node.NodeResources.Networks, - NodeNetworks: client.configCopy.Node.NodeResources.NodeNetworks, - Disk: client.configCopy.Node.NodeResources.Disk, + Networks: conf.Node.NodeResources.Networks, + NodeNetworks: conf.Node.NodeResources.NodeNetworks, + Disk: conf.Node.NodeResources.Disk, // injected Cpu: structs.NodeCpuResources{ CpuShares: 123, - ReservableCpuCores: client.configCopy.Node.NodeResources.Cpu.ReservableCpuCores, - TotalCpuCores: client.configCopy.Node.NodeResources.Cpu.TotalCpuCores, + ReservableCpuCores: conf.Node.NodeResources.Cpu.ReservableCpuCores, + TotalCpuCores: conf.Node.NodeResources.Cpu.TotalCpuCores, }, Memory: structs.NodeMemoryResources{MemoryMB: 2048}, Devices: []*structs.NodeDeviceResource{ @@ -1182,7 +1177,7 @@ func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) { }, } - assert.EqualValues(t, expectedResources2, client.configCopy.Node.NodeResources) + assert.EqualValues(t, expectedResources2, conf.Node.NodeResources) } diff --git a/client/config/artifact_test.go b/client/config/artifact_test.go index 0b296f8f86f..a79b4b2b72f 100644 --- a/client/config/artifact_test.go +++ b/client/config/artifact_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/stretchr/testify/require" ) @@ -34,72 +34,72 @@ func TestArtifactConfigFromAgent(t *testing.T) { { name: "invalid http read timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("invalid"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("invalid"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing HTTPReadTimeout", }, { name: "invalid http max size", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("invalid"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("invalid"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing HTTPMaxSize", }, { name: "invalid gcs timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("invalid"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("invalid"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing GCSTimeout", }, { name: "invalid git timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("invalid"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("invalid"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing GitTimeout", }, { name: "invalid hg timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("invalid"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("invalid"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing HgTimeout", }, { name: "invalid s3 timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("invalid"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("invalid"), }, expectedError: "error parsing S3Timeout", }, diff --git a/client/config/config.go b/client/config/config.go index 01d843e029b..9fde991006c 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -13,12 +13,14 @@ import ( "github.com/hashicorp/consul-template/config" "github.com/hashicorp/nomad/client/lib/cgutil" "github.com/hashicorp/nomad/command/agent/host" + "golang.org/x/exp/slices" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/bufconndialer" "github.com/hashicorp/nomad/helper/pluginutils/loader" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" structsc "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/plugins/base" @@ -358,6 +360,13 @@ type ClientTemplateConfig struct { // to wait for the cluster to become available, as is customary in distributed // systems. VaultRetry *RetryConfig `hcl:"vault_retry,optional"` + + // This controls the retry behavior when an error is returned from Nomad. + // Consul Template is highly fault tolerant, meaning it does not exit in the + // face of failure. Instead, it uses exponential back-off and retry functions + // to wait for the cluster to become available, as is customary in distributed + // systems. + NomadRetry *RetryConfig `hcl:"nomad_retry,optional"` } // Copy returns a deep copy of a ClientTemplateConfig @@ -396,6 +405,10 @@ func (c *ClientTemplateConfig) Copy() *ClientTemplateConfig { nc.VaultRetry = c.VaultRetry.Copy() } + if c.NomadRetry != nil { + nc.NomadRetry = c.NomadRetry.Copy() + } + return nc } @@ -413,7 +426,8 @@ func (c *ClientTemplateConfig) IsEmpty() bool { c.MaxStaleHCL == "" && c.Wait.IsEmpty() && c.ConsulRetry.IsEmpty() && - c.VaultRetry.IsEmpty() + c.VaultRetry.IsEmpty() && + c.NomadRetry.IsEmpty() } // WaitConfig is mirrored from templateconfig.WaitConfig because we need to handle @@ -524,7 +538,7 @@ func (wc *WaitConfig) ToConsulTemplate() (*config.WaitConfig, error) { return nil, err } - result := &config.WaitConfig{Enabled: helper.BoolToPtr(true)} + result := &config.WaitConfig{Enabled: pointer.Of(true)} if wc.Min != nil { result.Min = wc.Min @@ -667,7 +681,7 @@ func (rc *RetryConfig) ToConsulTemplate() (*config.RetryConfig, error) { return nil, err } - result := &config.RetryConfig{Enabled: helper.BoolToPtr(true)} + result := &config.RetryConfig{Enabled: pointer.Of(true)} if rc.Attempts != nil { result.Attempts = rc.Attempts @@ -685,8 +699,11 @@ func (rc *RetryConfig) ToConsulTemplate() (*config.RetryConfig, error) { } func (c *Config) Copy() *Config { - nc := new(Config) - *nc = *c + if c == nil { + return nil + } + + nc := *c nc.Node = nc.Node.Copy() nc.Servers = helper.CopySliceString(nc.Servers) nc.Options = helper.CopyMapStringString(nc.Options) @@ -694,12 +711,9 @@ func (c *Config) Copy() *Config { nc.ConsulConfig = c.ConsulConfig.Copy() nc.VaultConfig = c.VaultConfig.Copy() nc.TemplateConfig = c.TemplateConfig.Copy() - if c.ReservableCores != nil { - nc.ReservableCores = make([]uint16, len(c.ReservableCores)) - copy(nc.ReservableCores, c.ReservableCores) - } + nc.ReservableCores = slices.Clone(c.ReservableCores) nc.Artifact = c.Artifact.Copy() - return nc + return &nc } // DefaultConfig returns the default configuration @@ -723,17 +737,20 @@ func DefaultConfig() *Config { TemplateConfig: &ClientTemplateConfig{ FunctionDenylist: DefaultTemplateFunctionDenylist, DisableSandbox: false, - BlockQueryWaitTime: helper.TimeToPtr(5 * time.Minute), // match Consul default - MaxStale: helper.TimeToPtr(DefaultTemplateMaxStale), // match Consul default + BlockQueryWaitTime: pointer.Of(5 * time.Minute), // match Consul default + MaxStale: pointer.Of(DefaultTemplateMaxStale), // match Consul default Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(4 * time.Minute), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(4 * time.Minute), }, ConsulRetry: &RetryConfig{ - Attempts: helper.IntToPtr(0), // unlimited + Attempts: pointer.Of(0), // unlimited }, VaultRetry: &RetryConfig{ - Attempts: helper.IntToPtr(0), // unlimited + Attempts: pointer.Of(0), // unlimited + }, + NomadRetry: &RetryConfig{ + Attempts: pointer.Of(0), // unlimited }, }, RPCHoldTimeout: 5 * time.Second, diff --git a/client/config/config_test.go b/client/config/config_test.go index 88f5bd1b741..fada640e25b 100644 --- a/client/config/config_test.go +++ b/client/config/config_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/consul-template/config" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -49,8 +49,8 @@ func TestConfigReadDefault(t *testing.T) { func mockWaitConfig() *WaitConfig { return &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), } } @@ -66,26 +66,26 @@ func TestWaitConfig_Copy(t *testing.T) { "fully-populated", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, { "min-only", &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), }, &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), }, }, { "max-only", &WaitConfig{ - Max: helper.TimeToPtr(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, &WaitConfig{ - Max: helper.TimeToPtr(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, } @@ -122,7 +122,7 @@ func TestWaitConfig_IsEmpty(t *testing.T) { { "is-not-empty", &WaitConfig{ - Min: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(10 * time.Second), }, false, }, @@ -148,8 +148,8 @@ func TestWaitConfig_IsEqual(t *testing.T) { "are-equal", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, true, }, @@ -157,8 +157,8 @@ func TestWaitConfig_IsEqual(t *testing.T) { "min-different", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(10 * time.Second), }, false, }, @@ -166,8 +166,8 @@ func TestWaitConfig_IsEqual(t *testing.T) { "max-different", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(9 * time.Second), }, false, }, @@ -191,8 +191,8 @@ func TestWaitConfig_IsValid(t *testing.T) { { "is-valid", &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, "", }, @@ -209,15 +209,15 @@ func TestWaitConfig_IsValid(t *testing.T) { { "min-greater-than-max", &WaitConfig{ - Min: helper.TimeToPtr(10 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(10 * time.Second), + Max: pointer.Of(5 * time.Second), }, "greater than", }, { "max-not-set", &WaitConfig{ - Min: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(10 * time.Second), }, "", }, @@ -248,36 +248,36 @@ func TestWaitConfig_Merge(t *testing.T) { "all-fields", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(9 * time.Second), }, &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(9 * time.Second), }, }, { "min-only", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(10 * time.Second), }, &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, { "max-only", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(9 * time.Second), }, &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(9 * time.Second), }, }, } @@ -298,14 +298,14 @@ func TestWaitConfig_ToConsulTemplate(t *testing.T) { ci.Parallel(t) expected := config.WaitConfig{ - Enabled: helper.BoolToPtr(true), - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Enabled: pointer.Of(true), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), } clientWaitConfig := &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), } actual, err := clientWaitConfig.ToConsulTemplate() @@ -316,10 +316,10 @@ func TestWaitConfig_ToConsulTemplate(t *testing.T) { func mockRetryConfig() *RetryConfig { return &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", } } @@ -335,29 +335,29 @@ func TestRetryConfig_Copy(t *testing.T) { "fully-populated", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, }, { "attempts-only", &RetryConfig{ - Attempts: helper.IntToPtr(5), + Attempts: pointer.Of(5), }, &RetryConfig{ - Attempts: helper.IntToPtr(5), + Attempts: pointer.Of(5), }, }, { "backoff-only", &RetryConfig{ - Backoff: helper.TimeToPtr(5 * time.Second), + Backoff: pointer.Of(5 * time.Second), }, &RetryConfig{ - Backoff: helper.TimeToPtr(5 * time.Second), + Backoff: pointer.Of(5 * time.Second), }, }, { @@ -372,10 +372,10 @@ func TestRetryConfig_Copy(t *testing.T) { { "max-backoff-only", &RetryConfig{ - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), }, &RetryConfig{ - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), }, }, { @@ -421,7 +421,7 @@ func TestRetryConfig_IsEmpty(t *testing.T) { { "is-not-empty", &RetryConfig{ - Attempts: helper.IntToPtr(12), + Attempts: pointer.Of(12), }, false, }, @@ -447,10 +447,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "are-equal", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, true, @@ -459,10 +459,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "attempts-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, false, @@ -471,10 +471,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "backoff-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, false, @@ -483,10 +483,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "backoff-hcl-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, false, @@ -495,10 +495,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "max-backoff-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "10s", }, false, @@ -507,10 +507,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "max-backoff-hcl-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "9s", }, false, @@ -535,8 +535,8 @@ func TestRetryConfig_IsValid(t *testing.T) { { "is-valid", &RetryConfig{ - Backoff: helper.TimeToPtr(5 * time.Second), - MaxBackoff: helper.TimeToPtr(10 * time.Second), + Backoff: pointer.Of(5 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), }, "", }, @@ -553,30 +553,30 @@ func TestRetryConfig_IsValid(t *testing.T) { { "backoff-greater-than-max-backoff", &RetryConfig{ - Backoff: helper.TimeToPtr(10 * time.Second), - MaxBackoff: helper.TimeToPtr(5 * time.Second), + Backoff: pointer.Of(10 * time.Second), + MaxBackoff: pointer.Of(5 * time.Second), }, "greater than max_backoff", }, { "backoff-not-set", &RetryConfig{ - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), }, "", }, { "max-backoff-not-set", &RetryConfig{ - Backoff: helper.TimeToPtr(2 * time.Minute), + Backoff: pointer.Of(2 * time.Minute), }, "greater than default", }, { "max-backoff-unbounded", &RetryConfig{ - Backoff: helper.TimeToPtr(10 * time.Second), - MaxBackoff: helper.TimeToPtr(0 * time.Second), + Backoff: pointer.Of(10 * time.Second), + MaxBackoff: pointer.Of(0 * time.Second), }, "", }, @@ -607,17 +607,17 @@ func TestRetryConfig_Merge(t *testing.T) { "all-fields", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "9s", }, &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "9s", }, }, @@ -625,17 +625,17 @@ func TestRetryConfig_Merge(t *testing.T) { "attempts-only", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, }, @@ -643,17 +643,17 @@ func TestRetryConfig_Merge(t *testing.T) { "multi-field", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "9s", }, &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "9s", }, }, @@ -675,10 +675,10 @@ func TestRetryConfig_ToConsulTemplate(t *testing.T) { ci.Parallel(t) expected := config.RetryConfig{ - Enabled: helper.BoolToPtr(true), - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), - MaxBackoff: helper.TimeToPtr(10 * time.Second), + Enabled: pointer.Of(true), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), } actual := mockRetryConfig() diff --git a/client/config/testing.go b/client/config/testing.go index 0204073fb57..51150504492 100644 --- a/client/config/testing.go +++ b/client/config/testing.go @@ -7,7 +7,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" testing "github.com/mitchellh/go-testing-interface" @@ -59,7 +59,7 @@ func TestClientConfig(t testing.T) (*Config, func()) { // Helps make sure we are respecting configured parent conf.CgroupParent = "testing.slice" - conf.VaultConfig.Enabled = helper.BoolToPtr(false) + conf.VaultConfig.Enabled = pointer.Of(false) conf.DevMode = true // Loosen GC threshold diff --git a/client/devicemanager/manager_test.go b/client/devicemanager/manager_test.go index c3da419e204..337f5c79525 100644 --- a/client/devicemanager/manager_test.go +++ b/client/devicemanager/manager_test.go @@ -11,8 +11,8 @@ import ( plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/state" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pluginutils/loader" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -42,7 +42,7 @@ var ( }, Attributes: map[string]*psstructs.Attribute{ "memory": { - Int: helper.Int64ToPtr(4), + Int: pointer.Of(int64(4)), Unit: "GB", }, }, @@ -61,7 +61,7 @@ var ( }, Attributes: map[string]*psstructs.Attribute{ "memory": { - Int: helper.Int64ToPtr(2), + Int: pointer.Of(int64(2)), Unit: "GB", }, }, @@ -74,14 +74,14 @@ var ( InstanceStats: map[string]*device.DeviceStats{ nvidiaDevice0ID: { Summary: &psstructs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(212), + IntNumeratorVal: pointer.Of(int64(212)), Unit: "F", Desc: "Temperature", }, }, nvidiaDevice1ID: { Summary: &psstructs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(218), + IntNumeratorVal: pointer.Of(int64(218)), Unit: "F", Desc: "Temperature", }, @@ -96,7 +96,7 @@ var ( InstanceStats: map[string]*device.DeviceStats{ intelDeviceID: { Summary: &psstructs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(220), + IntNumeratorVal: pointer.Of(int64(220)), Unit: "F", Desc: "Temperature", }, diff --git a/client/driver_manager_test.go b/client/driver_manager_test.go index 8a930b75eba..996b64ff1e6 100644 --- a/client/driver_manager_test.go +++ b/client/driver_manager_test.go @@ -22,10 +22,11 @@ func TestDriverManager_Fingerprint_Run(t *testing.T) { testClient, cleanup := TestClient(t, nil) defer cleanup() + conf := testClient.GetConfig() dm := drivermanager.New(&drivermanager.Config{ Logger: testClient.logger, - Loader: testClient.config.PluginSingletonLoader, - PluginConfig: testClient.configCopy.NomadPluginConfig(), + Loader: conf.PluginSingletonLoader, + PluginConfig: conf.NomadPluginConfig(), Updater: testClient.updateNodeFromDriver, EventHandlerFactory: testClient.GetTaskEventHandler, State: testClient.stateDB, @@ -35,7 +36,7 @@ func TestDriverManager_Fingerprint_Run(t *testing.T) { defer dm.Shutdown() testutil.WaitForResult(func() (bool, error) { - node := testClient.configCopy.Node + node := testClient.Node() d, ok := node.Drivers["mock_driver"] if !ok { @@ -73,10 +74,11 @@ func TestDriverManager_Fingerprint_Periodic(t *testing.T) { }) defer cleanup() + conf := testClient.GetConfig() dm := drivermanager.New(&drivermanager.Config{ Logger: testClient.logger, - Loader: testClient.config.PluginSingletonLoader, - PluginConfig: testClient.configCopy.NomadPluginConfig(), + Loader: conf.PluginSingletonLoader, + PluginConfig: conf.NomadPluginConfig(), Updater: testClient.updateNodeFromDriver, EventHandlerFactory: testClient.GetTaskEventHandler, State: testClient.stateDB, @@ -134,10 +136,11 @@ func TestDriverManager_NodeAttributes_Run(t *testing.T) { }) defer cleanup() + conf := testClient.GetConfig() dm := drivermanager.New(&drivermanager.Config{ Logger: testClient.logger, - Loader: testClient.config.PluginSingletonLoader, - PluginConfig: testClient.configCopy.NomadPluginConfig(), + Loader: conf.PluginSingletonLoader, + PluginConfig: conf.NomadPluginConfig(), Updater: testClient.updateNodeFromDriver, EventHandlerFactory: testClient.GetTaskEventHandler, State: testClient.stateDB, diff --git a/client/dynamicplugins/registry.go b/client/dynamicplugins/registry.go index 65f8c355cbc..3059c1a2763 100644 --- a/client/dynamicplugins/registry.go +++ b/client/dynamicplugins/registry.go @@ -98,7 +98,8 @@ type PluginInfo struct { // PluginConnectionInfo is the data required to connect to the plugin. // note: We currently only support Unix Domain Sockets, but this may be expanded -// to support other connection modes in the future. +// +// to support other connection modes in the future. type PluginConnectionInfo struct { // SocketPath is the path to the plugins api socket. SocketPath string diff --git a/client/fingerprint/env_aws_cpu.go b/client/fingerprint/env_aws_cpu.go index d27c80aa797..5678a1105f8 100644 --- a/client/fingerprint/env_aws_cpu.go +++ b/client/fingerprint/env_aws_cpu.go @@ -382,6 +382,7 @@ var instanceTypeCPU = map[string]CPU{ "m6id.metal": newCPU(128, 3.5), "m6id.xlarge": newCPU(4, 3.5), "mac1.metal": newCPU(12, 3.2), + "mac2.metal": newCPU(8, 3.2), "p2.16xlarge": newCPU(64, 2.3), "p2.8xlarge": newCPU(32, 2.7), "p2.xlarge": newCPU(4, 2.7), @@ -462,6 +463,17 @@ var instanceTypeCPU = map[string]CPU{ "r5n.large": newCPU(2, 3.1), "r5n.metal": newCPU(96, 3.1), "r5n.xlarge": newCPU(4, 3.1), + "r6a.12xlarge": newCPU(48, 3.6), + "r6a.16xlarge": newCPU(64, 3.6), + "r6a.24xlarge": newCPU(96, 3.6), + "r6a.2xlarge": newCPU(8, 3.6), + "r6a.32xlarge": newCPU(128, 3.6), + "r6a.48xlarge": newCPU(192, 3.6), + "r6a.4xlarge": newCPU(16, 3.6), + "r6a.8xlarge": newCPU(32, 3.6), + "r6a.large": newCPU(2, 3.6), + "r6a.metal": newCPU(192, 3.6), + "r6a.xlarge": newCPU(4, 3.6), "r6g.12xlarge": newCPU(48, 2.5), "r6g.16xlarge": newCPU(64, 2.5), "r6g.2xlarge": newCPU(8, 2.5), @@ -490,6 +502,16 @@ var instanceTypeCPU = map[string]CPU{ "r6i.large": newCPU(2, 3.5), "r6i.metal": newCPU(128, 3.5), "r6i.xlarge": newCPU(4, 3.5), + "r6id.12xlarge": newCPU(48, 3.5), + "r6id.16xlarge": newCPU(64, 3.5), + "r6id.24xlarge": newCPU(96, 3.5), + "r6id.2xlarge": newCPU(8, 3.5), + "r6id.32xlarge": newCPU(128, 3.5), + "r6id.4xlarge": newCPU(16, 3.5), + "r6id.8xlarge": newCPU(32, 3.5), + "r6id.large": newCPU(2, 3.5), + "r6id.metal": newCPU(128, 3.5), + "r6id.xlarge": newCPU(4, 3.5), "t2.2xlarge": newCPU(8, 2.3), "t2.large": newCPU(2, 2.3), "t2.medium": newCPU(2, 2.3), diff --git a/client/fs_endpoint.go b/client/fs_endpoint.go index 2796c45c2b3..3d988520ac4 100644 --- a/client/fs_endpoint.go +++ b/client/fs_endpoint.go @@ -23,7 +23,7 @@ import ( "github.com/hashicorp/nomad/client/allocdir" sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" ) @@ -166,32 +166,32 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&req); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if req.AllocID == "" { - handleStreamResultError(allocIDNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(allocIDNotPresentErr, pointer.Of(int64(400)), encoder) return } alloc, err := f.c.GetAlloc(req.AllocID) if err != nil { - handleStreamResultError(structs.NewErrUnknownAllocation(req.AllocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(req.AllocID), pointer.Of(int64(404)), encoder) return } // Check read permissions if aclObj, err := f.c.ResolveToken(req.QueryOptions.AuthToken); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(403), encoder) + handleStreamResultError(err, pointer.Of(int64(403)), encoder) return } else if aclObj != nil && !aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilityReadFS) { - handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) + handleStreamResultError(structs.ErrPermissionDenied, pointer.Of(int64(403)), encoder) return } // Validate the arguments if req.Path == "" { - handleStreamResultError(pathNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(pathNotPresentErr, pointer.Of(int64(400)), encoder) return } switch req.Origin { @@ -199,15 +199,15 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { case "": req.Origin = "start" default: - handleStreamResultError(invalidOrigin, helper.Int64ToPtr(400), encoder) + handleStreamResultError(invalidOrigin, pointer.Of(int64(400)), encoder) return } fs, err := f.c.GetAllocFS(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if structs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) @@ -217,13 +217,13 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { // Calculate the offset fileInfo, err := fs.Stat(req.Path) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if fileInfo.IsDir { handleStreamResultError( fmt.Errorf("file %q is a directory", req.Path), - helper.Int64ToPtr(400), encoder) + pointer.Of(int64(400)), encoder) return } @@ -325,7 +325,7 @@ OUTER: } if streamErr != nil { - handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) return } } @@ -341,17 +341,17 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&req); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if req.AllocID == "" { - handleStreamResultError(allocIDNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(allocIDNotPresentErr, pointer.Of(int64(400)), encoder) return } alloc, err := f.c.GetAlloc(req.AllocID) if err != nil { - handleStreamResultError(structs.NewErrUnknownAllocation(req.AllocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(req.AllocID), pointer.Of(int64(404)), encoder) return } @@ -370,13 +370,13 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { // Validate the arguments if req.Task == "" { - handleStreamResultError(taskNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(taskNotPresentErr, pointer.Of(int64(400)), encoder) return } switch req.LogType { case "stdout", "stderr": default: - handleStreamResultError(logTypeNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(logTypeNotPresentErr, pointer.Of(int64(400)), encoder) return } switch req.Origin { @@ -384,15 +384,15 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { case "": req.Origin = "start" default: - handleStreamResultError(invalidOrigin, helper.Int64ToPtr(400), encoder) + handleStreamResultError(invalidOrigin, pointer.Of(int64(400)), encoder) return } fs, err := f.c.GetAllocFS(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if structs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) @@ -401,9 +401,9 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { allocState, err := f.c.GetAllocState(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if structs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) @@ -415,7 +415,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { if taskState == nil { handleStreamResultError( fmt.Errorf("unknown task name %q", req.Task), - helper.Int64ToPtr(400), + pointer.Of(int64(400)), encoder) return } @@ -423,7 +423,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { if taskState.StartedAt.IsZero() { handleStreamResultError( fmt.Errorf("task %q not started yet. No logs available", req.Task), - helper.Int64ToPtr(404), + pointer.Of(int64(404)), encoder) return } diff --git a/client/heartbeatstop.go b/client/heartbeatstop.go index a3e57b10e1f..b75dea9c11b 100644 --- a/client/heartbeatstop.go +++ b/client/heartbeatstop.go @@ -70,7 +70,7 @@ func (h *heartbeatStop) shouldStopAfter(now time.Time, interval time.Duration) b func (h *heartbeatStop) watch() { // If we never manage to successfully contact the server, we want to stop our allocs // after duration + start time - h.lastOk = time.Now() + h.setLastOk(time.Now()) stop := make(chan string, 1) var now time.Time var interval time.Duration diff --git a/client/lib/cgutil/cgutil_linux.go b/client/lib/cgutil/cgutil_linux.go index 1333d0cc13b..b2cc9a4268c 100644 --- a/client/lib/cgutil/cgutil_linux.go +++ b/client/lib/cgutil/cgutil_linux.go @@ -78,9 +78,7 @@ func ConfigureBasicCgroups(config *lcc.Config) error { if err = os.MkdirAll(path, 0755); err != nil { return err } - config.Cgroups.Paths = map[string]string{ - subsystem: path, - } + config.Cgroups.Path = path return nil } diff --git a/client/lib/cgutil/cgutil_linux_test.go b/client/lib/cgutil/cgutil_linux_test.go index ed3ae87bd85..2def4dcbae8 100644 --- a/client/lib/cgutil/cgutil_linux_test.go +++ b/client/lib/cgutil/cgutil_linux_test.go @@ -86,7 +86,7 @@ func TestUtil_GetCPUsFromCgroup(t *testing.T) { } func create(t *testing.T, name string) { - mgr, err := fs2.NewManager(nil, filepath.Join(CgroupRoot, name), rootless) + mgr, err := fs2.NewManager(nil, filepath.Join(CgroupRoot, name)) require.NoError(t, err) if err = mgr.Apply(CreationPID); err != nil { _ = cgroups.RemovePath(name) diff --git a/client/lib/cgutil/cpuset_manager_v1.go b/client/lib/cgutil/cpuset_manager_v1.go index f0fa3252746..ce89cd2bfdf 100644 --- a/client/lib/cgutil/cpuset_manager_v1.go +++ b/client/lib/cgutil/cpuset_manager_v1.go @@ -298,21 +298,6 @@ func (c *cpusetManagerV1) signalReconcile() { } } -func (c *cpusetManagerV1) getCpuset(group string) (cpuset.CPUSet, error) { - man := fs.NewManager( - &configs.Cgroup{ - Path: filepath.Join(c.cgroupParent, group), - }, - map[string]string{"cpuset": filepath.Join(c.cgroupParentPath, group)}, - false, - ) - stats, err := man.GetStats() - if err != nil { - return cpuset.CPUSet{}, err - } - return cpuset.New(stats.CPUSetStats.CPUs...), nil -} - func (c *cpusetManagerV1) getCgroupPathsForTask(allocID, task string) (absolute, relative string) { return filepath.Join(c.reservedCpusetPath(), fmt.Sprintf("%s-%s", allocID, task)), filepath.Join(c.cgroupParent, ReservedCpusetCgroupName, fmt.Sprintf("%s-%s", allocID, task)) @@ -332,11 +317,25 @@ func getCPUsFromCgroupV1(group string) ([]uint16, error) { return nil, err } - man := fs.NewManager(&configs.Cgroup{Path: group}, map[string]string{"cpuset": cgroupPath}, false) + cgroup := &configs.Cgroup{ + Path: group, + Resources: new(configs.Resources), + } + + paths := map[string]string{ + "cpuset": cgroupPath, + } + + man, err := fs.NewManager(cgroup, paths) + if err != nil { + return nil, err + } + stats, err := man.GetStats() if err != nil { return nil, err } + return stats.CPUSetStats.CPUs, nil } diff --git a/client/lib/cgutil/cpuset_manager_v2.go b/client/lib/cgutil/cpuset_manager_v2.go index 74a8a4f4f74..1dd5a92b0ab 100644 --- a/client/lib/cgutil/cpuset_manager_v2.go +++ b/client/lib/cgutil/cpuset_manager_v2.go @@ -31,10 +31,6 @@ const ( // in case for e.g. Nomad tasks should be further constrained by an externally // configured systemd cgroup. DefaultCgroupParentV2 = "nomad.slice" - - // rootless is (for now) always false; Nomad clients require root, so we - // assume to not need to do the extra plumbing for rootless cgroups. - rootless = false ) // nothing is used for treating a map like a set with no values @@ -145,7 +141,7 @@ func (c *cpusetManagerV2) CgroupPathFor(allocID, task string) CgroupPathGetter { for { path := c.pathOf(makeID(allocID, task)) - mgr, err := fs2.NewManager(nil, path, rootless) + mgr, err := fs2.NewManager(nil, path) if err != nil { return "", err } @@ -229,7 +225,7 @@ func (c *cpusetManagerV2) cleanup() { } } -//pathOf returns the absolute path to a task with identity id. +// pathOf returns the absolute path to a task with identity id. func (c *cpusetManagerV2) pathOf(id identity) string { return filepath.Join(c.parentAbs, makeScope(id)) } @@ -239,7 +235,7 @@ func (c *cpusetManagerV2) pathOf(id identity) string { // We avoid removing a cgroup if it still contains a PID, as the cpuset manager // may be initially empty on a Nomad client restart. func (c *cpusetManagerV2) remove(path string) { - mgr, err := fs2.NewManager(nil, path, rootless) + mgr, err := fs2.NewManager(nil, path) if err != nil { c.logger.Warn("failed to create manager", "path", path, "err", err) return @@ -267,14 +263,16 @@ func (c *cpusetManagerV2) write(id identity, set cpuset.CPUSet) { path := c.pathOf(id) // make a manager for the cgroup - m, err := fs2.NewManager(nil, path, rootless) + m, err := fs2.NewManager(new(configs.Cgroup), path) if err != nil { c.logger.Error("failed to manage cgroup", "path", path, "err", err) + return } // create the cgroup if err = m.Apply(CreationPID); err != nil { c.logger.Error("failed to apply cgroup", "path", path, "err", err) + return } // set the cpuset value for the cgroup @@ -282,13 +280,14 @@ func (c *cpusetManagerV2) write(id identity, set cpuset.CPUSet) { CpusetCpus: set.String(), }); err != nil { c.logger.Error("failed to set cgroup", "path", path, "err", err) + return } } // ensureParentCgroup will create parent cgroup for the manager if it does not // exist yet. No PIDs are added to any cgroup yet. func (c *cpusetManagerV2) ensureParent() error { - mgr, err := fs2.NewManager(nil, c.parentAbs, rootless) + mgr, err := fs2.NewManager(nil, c.parentAbs) if err != nil { return err } diff --git a/client/lib/cgutil/group_killer.go b/client/lib/cgutil/group_killer.go index bcfefe632dd..9f966c49902 100644 --- a/client/lib/cgutil/group_killer.go +++ b/client/lib/cgutil/group_killer.go @@ -60,7 +60,7 @@ func (d *killer) v1(cgroup *configs.Cgroup) error { } // the actual path to our tasks freezer cgroup - path := cgroup.Paths[freezer] + path := cgroup.Path d.logger.Trace("killing processes", "cgroup_path", path, "cgroup_version", "v1", "executor_pid", d.pid) @@ -109,7 +109,7 @@ func (d *killer) v2(cgroup *configs.Cgroup) error { d.logger.Trace("killing processes", "cgroup_path", path, "cgroup_version", "v2", "executor_pid", d.pid, "existing_pids", existingPIDs) - mgr, err := fs2.NewManager(cgroup, "", rootless) + mgr, err := fs2.NewManager(cgroup, "") if err != nil { return fmt.Errorf("failed to create v2 cgroup manager: %w", err) } @@ -117,7 +117,7 @@ func (d *killer) v2(cgroup *configs.Cgroup) error { // move executor PID into the root init.scope so we can kill the task pids // without killing the executor (which is the process running this code, doing // the killing) - init, err := fs2.NewManager(nil, filepath.Join(CgroupRoot, "init.scope"), rootless) + init, err := fs2.NewManager(nil, filepath.Join(CgroupRoot, "init.scope")) if err != nil { return fmt.Errorf("failed to create v2 init cgroup manager: %w", err) } diff --git a/client/lib/resources/containment_linux.go b/client/lib/resources/containment_linux.go index 279e03e6c3a..4c878f7d7f3 100644 --- a/client/lib/resources/containment_linux.go +++ b/client/lib/resources/containment_linux.go @@ -36,7 +36,7 @@ func (c *containment) Apply(pid int) error { // for v2 use manager to create and enter the cgroup if cgutil.UseV2 { - mgr, err := fs2.NewManager(c.cgroup, "", false) + mgr, err := fs2.NewManager(c.cgroup, "") if err != nil { return fmt.Errorf("failed to create v2 cgroup manager for containment: %w", err) } @@ -55,7 +55,7 @@ func (c *containment) Apply(pid int) error { } // for v1 a random cgroup was created already; just enter it - if err := cgroups.EnterPid(c.cgroup.Paths, pid); err != nil { + if err := cgroups.EnterPid(map[string]string{"freezer": c.cgroup.Path}, pid); err != nil { return fmt.Errorf("failed to add pid to v1 cgroup: %w", err) } @@ -89,7 +89,7 @@ func (c *containment) GetPIDs() PIDs { if cgutil.UseV2 { path = filepath.Join(cgutil.CgroupRoot, c.cgroup.Path) } else { - path = c.cgroup.Paths["freezer"] + path = c.cgroup.Path } // find the pids in the cgroup under containment diff --git a/client/logmon/plugin.go b/client/logmon/plugin.go index 5d97b751d10..a21777357cb 100644 --- a/client/logmon/plugin.go +++ b/client/logmon/plugin.go @@ -5,22 +5,27 @@ import ( "os" "os/exec" - hclog "github.com/hashicorp/go-hclog" - plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" "github.com/hashicorp/nomad/client/logmon/proto" "github.com/hashicorp/nomad/plugins/base" "google.golang.org/grpc" ) +var bin = getBin() + +func getBin() string { + b, err := os.Executable() + if err != nil { + panic(err) + } + return b +} + // LaunchLogMon launches a new logmon or reattaches to an existing one. // TODO: Integrate with base plugin loader func LaunchLogMon(logger hclog.Logger, reattachConfig *plugin.ReattachConfig) (LogMon, *plugin.Client, error) { logger = logger.Named("logmon") - bin, err := os.Executable() - if err != nil { - return nil, nil, err - } - conf := &plugin.ClientConfig{ HandshakeConfig: base.Handshake, Plugins: map[string]plugin.Plugin{ diff --git a/client/node_updater.go b/client/node_updater.go index 1be3e15d418..a2746cf189a 100644 --- a/client/node_updater.go +++ b/client/node_updater.go @@ -41,18 +41,20 @@ SEND_BATCH: c.configLock.Lock() defer c.configLock.Unlock() + newConfig := c.config.Copy() + // csi updates var csiChanged bool c.batchNodeUpdates.batchCSIUpdates(func(name string, info *structs.CSIInfo) { - if c.updateNodeFromCSIControllerLocked(name, info) { - if c.config.Node.CSIControllerPlugins[name].UpdateTime.IsZero() { - c.config.Node.CSIControllerPlugins[name].UpdateTime = time.Now() + if c.updateNodeFromCSIControllerLocked(name, info, newConfig.Node) { + if newConfig.Node.CSIControllerPlugins[name].UpdateTime.IsZero() { + newConfig.Node.CSIControllerPlugins[name].UpdateTime = time.Now() } csiChanged = true } - if c.updateNodeFromCSINodeLocked(name, info) { - if c.config.Node.CSINodePlugins[name].UpdateTime.IsZero() { - c.config.Node.CSINodePlugins[name].UpdateTime = time.Now() + if c.updateNodeFromCSINodeLocked(name, info, newConfig.Node) { + if newConfig.Node.CSINodePlugins[name].UpdateTime.IsZero() { + newConfig.Node.CSINodePlugins[name].UpdateTime = time.Now() } csiChanged = true } @@ -61,10 +63,10 @@ SEND_BATCH: // driver node updates var driverChanged bool c.batchNodeUpdates.batchDriverUpdates(func(driver string, info *structs.DriverInfo) { - if c.updateNodeFromDriverLocked(driver, info) { - c.config.Node.Drivers[driver] = info - if c.config.Node.Drivers[driver].UpdateTime.IsZero() { - c.config.Node.Drivers[driver].UpdateTime = time.Now() + if c.applyNodeUpdatesFromDriver(driver, info, newConfig.Node) { + newConfig.Node.Drivers[driver] = info + if newConfig.Node.Drivers[driver].UpdateTime.IsZero() { + newConfig.Node.Drivers[driver].UpdateTime = time.Now() } driverChanged = true } @@ -80,7 +82,8 @@ SEND_BATCH: // only update the node if changes occurred if driverChanged || devicesChanged || csiChanged { - c.updateNodeLocked() + c.config = newConfig + c.updateNode() } close(c.fpInitialized) @@ -92,24 +95,27 @@ func (c *Client) updateNodeFromCSI(name string, info *structs.CSIInfo) { c.configLock.Lock() defer c.configLock.Unlock() + newConfig := c.config.Copy() + changed := false - if c.updateNodeFromCSIControllerLocked(name, info) { - if c.config.Node.CSIControllerPlugins[name].UpdateTime.IsZero() { - c.config.Node.CSIControllerPlugins[name].UpdateTime = time.Now() + if c.updateNodeFromCSIControllerLocked(name, info, newConfig.Node) { + if newConfig.Node.CSIControllerPlugins[name].UpdateTime.IsZero() { + newConfig.Node.CSIControllerPlugins[name].UpdateTime = time.Now() } changed = true } - if c.updateNodeFromCSINodeLocked(name, info) { - if c.config.Node.CSINodePlugins[name].UpdateTime.IsZero() { - c.config.Node.CSINodePlugins[name].UpdateTime = time.Now() + if c.updateNodeFromCSINodeLocked(name, info, newConfig.Node) { + if newConfig.Node.CSINodePlugins[name].UpdateTime.IsZero() { + newConfig.Node.CSINodePlugins[name].UpdateTime = time.Now() } changed = true } if changed { - c.updateNodeLocked() + c.config = newConfig + c.updateNode() } } @@ -119,7 +125,7 @@ func (c *Client) updateNodeFromCSI(name string, info *structs.CSIInfo) { // // It is safe to call for all CSI Updates, but will only perform changes when // a ControllerInfo field is present. -func (c *Client) updateNodeFromCSIControllerLocked(name string, info *structs.CSIInfo) bool { +func (c *Client) updateNodeFromCSIControllerLocked(name string, info *structs.CSIInfo, node *structs.Node) bool { var changed bool if info.ControllerInfo == nil { return false @@ -127,15 +133,15 @@ func (c *Client) updateNodeFromCSIControllerLocked(name string, info *structs.CS i := info.Copy() i.NodeInfo = nil - oldController, hadController := c.config.Node.CSIControllerPlugins[name] + oldController, hadController := node.CSIControllerPlugins[name] if !hadController { // If the controller info has not yet been set, do that here changed = true - c.config.Node.CSIControllerPlugins[name] = i + node.CSIControllerPlugins[name] = i } else { // The controller info has already been set, fix it up if !oldController.Equal(i) { - c.config.Node.CSIControllerPlugins[name] = i + node.CSIControllerPlugins[name] = i changed = true } @@ -162,7 +168,7 @@ func (c *Client) updateNodeFromCSIControllerLocked(name string, info *structs.CS // // It is safe to call for all CSI Updates, but will only perform changes when // a NodeInfo field is present. -func (c *Client) updateNodeFromCSINodeLocked(name string, info *structs.CSIInfo) bool { +func (c *Client) updateNodeFromCSINodeLocked(name string, info *structs.CSIInfo, node *structs.Node) bool { var changed bool if info.NodeInfo == nil { return false @@ -170,15 +176,15 @@ func (c *Client) updateNodeFromCSINodeLocked(name string, info *structs.CSIInfo) i := info.Copy() i.ControllerInfo = nil - oldNode, hadNode := c.config.Node.CSINodePlugins[name] + oldNode, hadNode := node.CSINodePlugins[name] if !hadNode { // If the Node info has not yet been set, do that here changed = true - c.config.Node.CSINodePlugins[name] = i + node.CSINodePlugins[name] = i } else { // The node info has already been set, fix it up if !oldNode.Equal(info) { - c.config.Node.CSINodePlugins[name] = i + node.CSINodePlugins[name] = i changed = true } @@ -205,30 +211,33 @@ func (c *Client) updateNodeFromDriver(name string, info *structs.DriverInfo) { c.configLock.Lock() defer c.configLock.Unlock() - if c.updateNodeFromDriverLocked(name, info) { - c.config.Node.Drivers[name] = info - if c.config.Node.Drivers[name].UpdateTime.IsZero() { - c.config.Node.Drivers[name].UpdateTime = time.Now() + newConfig := c.config.Copy() + + if c.applyNodeUpdatesFromDriver(name, info, newConfig.Node) { + newConfig.Node.Drivers[name] = info + if newConfig.Node.Drivers[name].UpdateTime.IsZero() { + newConfig.Node.Drivers[name].UpdateTime = time.Now() } - c.updateNodeLocked() + + c.config = newConfig + c.updateNode() } } -// updateNodeFromDriverLocked makes the changes to the node from a driver update -// but does not send the update to the server. c.configLock must be held before -// calling this func -func (c *Client) updateNodeFromDriverLocked(name string, info *structs.DriverInfo) bool { +// applyNodeUpdatesFromDriver applies changes to the passed in node. true is +// returned if the node has changed. +func (c *Client) applyNodeUpdatesFromDriver(name string, info *structs.DriverInfo, node *structs.Node) bool { var hasChanged bool - hadDriver := c.config.Node.Drivers[name] != nil + hadDriver := node.Drivers[name] != nil if !hadDriver { // If the driver info has not yet been set, do that here hasChanged = true for attrName, newVal := range info.Attributes { - c.config.Node.Attributes[attrName] = newVal + node.Attributes[attrName] = newVal } } else { - oldVal := c.config.Node.Drivers[name] + oldVal := node.Drivers[name] // The driver info has already been set, fix it up if oldVal.Detected != info.Detected { hasChanged = true @@ -247,16 +256,16 @@ func (c *Client) updateNodeFromDriverLocked(name string, info *structs.DriverInf } for attrName, newVal := range info.Attributes { - oldVal := c.config.Node.Drivers[name].Attributes[attrName] + oldVal := node.Drivers[name].Attributes[attrName] if oldVal == newVal { continue } hasChanged = true if newVal == "" { - delete(c.config.Node.Attributes, attrName) + delete(node.Attributes, attrName) } else { - c.config.Node.Attributes[attrName] = newVal + node.Attributes[attrName] = newVal } } } @@ -266,16 +275,14 @@ func (c *Client) updateNodeFromDriverLocked(name string, info *structs.DriverInf // their attributes as DriverInfo driverName := fmt.Sprintf("driver.%s", name) if info.Detected { - c.config.Node.Attributes[driverName] = "1" + node.Attributes[driverName] = "1" } else { - delete(c.config.Node.Attributes, driverName) + delete(node.Attributes, driverName) } return hasChanged } -// updateNodeFromFingerprint updates the node with the result of -// fingerprinting the node from the diff that was created func (c *Client) updateNodeFromDevices(devices []*structs.NodeDeviceResource) { c.configLock.Lock() defer c.configLock.Unlock() @@ -284,7 +291,7 @@ func (c *Client) updateNodeFromDevices(devices []*structs.NodeDeviceResource) { // dispatched task resources and not appropriate for expressing // node available device resources if c.updateNodeFromDevicesLocked(devices) { - c.updateNodeLocked() + c.updateNode() } } @@ -294,7 +301,9 @@ func (c *Client) updateNodeFromDevices(devices []*structs.NodeDeviceResource) { func (c *Client) updateNodeFromDevicesLocked(devices []*structs.NodeDeviceResource) bool { if !structs.DevicesEquals(c.config.Node.NodeResources.Devices, devices) { c.logger.Debug("new devices detected", "devices", len(devices)) - c.config.Node.NodeResources.Devices = devices + newConfig := c.config.Copy() + newConfig.Node.NodeResources.Devices = devices + c.config = newConfig return true } diff --git a/client/pluginmanager/csimanager/doc.go b/client/pluginmanager/csimanager/doc.go index 42400a09241..298bf18bf7a 100644 --- a/client/pluginmanager/csimanager/doc.go +++ b/client/pluginmanager/csimanager/doc.go @@ -1,15 +1,16 @@ -/** +/* +* csimanager manages locally running CSI Plugins on a Nomad host, and provides a few different interfaces. It provides: -- a pluginmanager.PluginManager implementation that is used to fingerprint and - heartbeat local node plugins -- (TODO) a csimanager.AttachmentWaiter implementation that can be used to wait for an - external CSIVolume to be attached to the node before returning -- (TODO) a csimanager.NodeController implementation that is used to manage the node-local - portions of the CSI specification, and encompassess volume staging/publishing -- (TODO) a csimanager.VolumeChecker implementation that can be used by hooks to ensure - their volumes are healthy(ish) + - a pluginmanager.PluginManager implementation that is used to fingerprint and + heartbeat local node plugins + - (TODO) a csimanager.AttachmentWaiter implementation that can be used to wait for an + external CSIVolume to be attached to the node before returning + - (TODO) a csimanager.NodeController implementation that is used to manage the node-local + portions of the CSI specification, and encompassess volume staging/publishing + - (TODO) a csimanager.VolumeChecker implementation that can be used by hooks to ensure + their volumes are healthy(ish) */ package csimanager diff --git a/client/rpc.go b/client/rpc.go index 7d1dfb6e40f..9bc439ad03d 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -47,9 +47,11 @@ func (c *Client) StreamingRpcHandler(method string) (structs.StreamingRpcHandler // RPC is used to forward an RPC call to a nomad server, or fail if no servers. func (c *Client) RPC(method string, args interface{}, reply interface{}) error { + conf := c.GetConfig() + // Invoke the RPCHandler if it exists - if c.config.RPCHandler != nil { - return c.config.RPCHandler.RPC(method, args, reply) + if conf.RPCHandler != nil { + return conf.RPCHandler.RPC(method, args, reply) } // We will try to automatically retry requests that fail due to things like server unavailability @@ -60,7 +62,7 @@ func (c *Client) RPC(method string, args interface{}, reply interface{}) error { // to the leader they may also allow for an RPCHoldTimeout while waiting for leader election. // That's OK, we won't double up because we are using it here not as a sleep but // as a hint to give up - deadline = deadline.Add(c.config.RPCHoldTimeout) + deadline = deadline.Add(conf.RPCHoldTimeout) // If its a blocking query, allow the time specified by the request if info, ok := args.(structs.RPCInfo); ok { @@ -109,7 +111,7 @@ TRY: } // Wait to avoid thundering herd - timer, cancel := helper.NewSafeTimer(helper.RandomStagger(c.config.RPCHoldTimeout / structs.JitterFraction)) + timer, cancel := helper.NewSafeTimer(helper.RandomStagger(conf.RPCHoldTimeout / structs.JitterFraction)) defer cancel() select { diff --git a/client/serviceregistration/checks/client.go b/client/serviceregistration/checks/client.go index c26431436f4..caf823848f8 100644 --- a/client/serviceregistration/checks/client.go +++ b/client/serviceregistration/checks/client.go @@ -9,6 +9,7 @@ import ( "net/http" "net/url" "strconv" + "strings" "time" "github.com/hashicorp/go-cleanhttp" @@ -162,6 +163,8 @@ func (c *checker) checkHTTP(ctx context.Context, qc *QueryContext, q *Query) *st qr.Status = structs.CheckFailure return qr } + request.Header = q.Headers + request.Body = io.NopCloser(strings.NewReader(q.Body)) request = request.WithContext(ctx) result, err := c.httpClient.Do(request) diff --git a/client/serviceregistration/checks/client_test.go b/client/serviceregistration/checks/client_test.go index 2f6cb7fb47f..2553bb9ce50 100644 --- a/client/serviceregistration/checks/client_test.go +++ b/client/serviceregistration/checks/client_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/freeport" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -20,6 +21,13 @@ import ( "oss.indeed.com/go/libtime/libtimetest" ) +func splitURL(u string) (string, string) { + // get the address and port for http server + tokens := strings.Split(u, ":") + addr, port := strings.TrimPrefix(tokens[1], "//"), tokens[2] + return addr, port +} + func TestChecker_Do_HTTP(t *testing.T) { ci.Parallel(t) @@ -49,8 +57,7 @@ func TestChecker_Do_HTTP(t *testing.T) { defer ts.Close() // get the address and port for http server - tokens := strings.Split(ts.URL, ":") - addr, port := strings.TrimPrefix(tokens[1], "//"), tokens[2] + addr, port := splitURL(ts.URL) // create a mock clock so we can assert time is set now := time.Date(2022, 1, 2, 3, 4, 5, 6, time.UTC) @@ -200,6 +207,119 @@ func bigResponse() (string, string) { return s, s[:outputSizeLimit] } +func TestChecker_Do_HTTP_extras(t *testing.T) { + ci.Parallel(t) + + // record the method, body, and headers of the request + var ( + method string + body []byte + headers map[string][]string + ) + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + method = r.Method + body, _ = io.ReadAll(r.Body) + headers = helper.CopyMap(r.Header) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + // get the address and port for http server + addr, port := splitURL(ts.URL) + + // make headers from key-value pairs + makeHeaders := func(more ...[2]string) http.Header { + h := make(http.Header) + for _, extra := range more { + h.Set(extra[0], extra[1]) + } + return h + } + + encoding := [2]string{"Accept-Encoding", "gzip"} + agent := [2]string{"User-Agent", "Go-http-client/1.1"} + + cases := []struct { + name string + method string + body string + headers map[string][]string + }{ + { + name: "method GET", + method: "GET", + headers: makeHeaders(encoding, agent), + }, + { + name: "method Get", + method: "Get", + headers: makeHeaders(encoding, agent), + }, + { + name: "method HEAD", + method: "HEAD", + headers: makeHeaders(agent), + }, + { + name: "extra headers", + method: "GET", + headers: makeHeaders(encoding, agent, + [2]string{"X-My-Header", "hello"}, + [2]string{"Authorization", "Basic ZWxhc3RpYzpjaGFuZ2VtZQ=="}, + ), + }, + { + name: "with body", + method: "POST", + headers: makeHeaders(encoding, agent), + body: "some payload", + }, + } + + for _, tc := range cases { + qc := &QueryContext{ + ID: "abc123", + CustomAddress: addr, + ServicePortLabel: port, + Networks: nil, + NetworkStatus: mock.NewNetworkStatus(addr), + Ports: nil, + Group: "group", + Task: "task", + Service: "service", + Check: "check", + } + + q := &Query{ + Mode: structs.Healthiness, + Type: "http", + Timeout: 1 * time.Second, + AddressMode: "auto", + PortLabel: port, + Protocol: "http", + Path: "/", + Method: tc.method, + Headers: tc.headers, + Body: tc.body, + } + + t.Run(tc.name, func(t *testing.T) { + logger := testlog.HCLogger(t) + c := New(logger) + ctx := context.Background() + result := c.Do(ctx, qc, q) + must.Eq(t, http.StatusOK, result.StatusCode, + must.Sprintf("test.URL: %s", ts.URL), + must.Sprintf("headers: %v", tc.headers), + ) + must.Eq(t, tc.method, method) + must.Eq(t, tc.body, string(body)) + must.Eq(t, tc.headers, headers) + }) + } +} + func TestChecker_Do_TCP(t *testing.T) { ci.Parallel(t) @@ -311,7 +431,7 @@ func TestChecker_Do_TCP(t *testing.T) { switch tc.tcpMode { case "ok": // simulate tcp server by listening - go tcpServer(ctx, tc.tcpPort) + tcpServer(t, ctx, tc.tcpPort) case "hang": // simulate tcp hang by setting an already expired context timeout, stop := context.WithDeadline(ctx, now.Add(-1*time.Second)) @@ -327,16 +447,25 @@ func TestChecker_Do_TCP(t *testing.T) { } } -func tcpServer(ctx context.Context, port int) { +// tcpServer will start a tcp listener that accepts connections and closes them. +// The caller can close the listener by cancelling ctx. +func tcpServer(t *testing.T, ctx context.Context, port int) { var lc net.ListenConfig - l, _ := lc.Listen(ctx, "tcp", net.JoinHostPort( + l, err := lc.Listen(ctx, "tcp", net.JoinHostPort( "localhost", fmt.Sprintf("%d", port), )) - defer func() { + must.NoError(t, err, must.Sprint("port", port)) + t.Cleanup(func() { _ = l.Close() - }() - con, _ := l.Accept() - defer func() { - _ = con.Close() + }) + + go func() { + // caller can stop us by cancelling ctx + for { + _, acceptErr := l.Accept() + if acceptErr != nil { + return + } + } }() } diff --git a/client/serviceregistration/checks/result.go b/client/serviceregistration/checks/result.go index 8ef5859ce70..5b4c359bf95 100644 --- a/client/serviceregistration/checks/result.go +++ b/client/serviceregistration/checks/result.go @@ -1,8 +1,10 @@ package checks import ( + "net/http" "time" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -18,9 +20,11 @@ func GetCheckQuery(c *structs.ServiceCheck) *Query { Timeout: c.Timeout, AddressMode: c.AddressMode, PortLabel: c.PortLabel, + Protocol: protocol, Path: c.Path, Method: c.Method, - Protocol: protocol, + Headers: helper.CopyMap(c.Header), + Body: c.Body, } } @@ -35,9 +39,11 @@ type Query struct { AddressMode string // host, driver, or alloc PortLabel string // label or value - Protocol string // http checks only (http or https) - Path string // http checks only - Method string // http checks only + Protocol string // http checks only (http or https) + Path string // http checks only + Method string // http checks only + Headers http.Header // http checks only + Body string // http checks only } // A QueryContext contains allocation and service parameters necessary for diff --git a/client/state/08types.go b/client/state/08types.go index a10d537f0af..b4cc54bf702 100644 --- a/client/state/08types.go +++ b/client/state/08types.go @@ -16,7 +16,6 @@ import ( // changed over the life-cycle of the alloc_runner in Nomad 0.8. // // https://github.com/hashicorp/nomad/blob/v0.8.6/client/alloc_runner.go#L146-L153 -// type allocRunnerMutableState08 struct { // AllocClientStatus does not need to be upgraded as it is computed // from task states. diff --git a/client/state/upgrade.go b/client/state/upgrade.go index 31f8c3bef6d..b2a1394e114 100644 --- a/client/state/upgrade.go +++ b/client/state/upgrade.go @@ -82,15 +82,14 @@ func backupDB(bdb *bbolt.DB, dst string) error { // UpgradeAllocs upgrades the boltdb schema. Example 0.8 schema: // -// * allocations -// * 15d83e8a-74a2-b4da-3f17-ed5c12895ea8 -// * echo -// - simple-all (342 bytes) -// - alloc (2827 bytes) -// - alloc-dir (166 bytes) -// - immutable (15 bytes) -// - mutable (1294 bytes) -// +// allocations +// 15d83e8a-74a2-b4da-3f17-ed5c12895ea8 +// echo +// simple-all (342 bytes) +// alloc (2827 bytes) +// alloc-dir (166 bytes) +// immutable (15 bytes) +// mutable (1294 bytes) func UpgradeAllocs(logger hclog.Logger, tx *boltdd.Tx) error { btx := tx.BoltTx() allocationsBucket := btx.Bucket(allocationsBucketName) diff --git a/client/taskenv/env.go b/client/taskenv/env.go index d40b52ab95c..7fb6a27cc2e 100644 --- a/client/taskenv/env.go +++ b/client/taskenv/env.go @@ -903,7 +903,6 @@ func (b *Builder) SetDriverNetwork(n *drivers.DriverNetwork) *Builder { // Handled by setAlloc -> otherPorts: // // Task: NOMAD_TASK_{IP,PORT,ADDR}__

Error code explanation: HTTPStatus.NOT_IMPLEMENTED - Server does not support this operation.

`) +} diff --git a/e2e/servicediscovery/service_discovery_test.go b/e2e/servicediscovery/service_discovery_test.go index 7665027300f..00596895ae1 100644 --- a/e2e/servicediscovery/service_discovery_test.go +++ b/e2e/servicediscovery/service_discovery_test.go @@ -20,6 +20,8 @@ const ( jobMultiProvider = "./input/multi_provider.nomad" jobSimpleLBReplicas = "./input/simple_lb_replicas.nomad" jobSimpleLBClients = "./input/simple_lb_clients.nomad" + jobChecksHappy = "./input/checks_happy.nomad" + jobChecksSad = "./input/checks_sad.nomad" ) const ( @@ -41,6 +43,8 @@ func TestServiceDiscovery(t *testing.T) { t.Run("TestServiceDiscovery_MultiProvider", testMultiProvider) t.Run("TestServiceDiscovery_UpdateProvider", testUpdateProvider) t.Run("TestServiceDiscovery_SimpleLoadBalancing", testSimpleLoadBalancing) + t.Run("TestServiceDiscovery_ChecksHappy", testChecksHappy) + t.Run("TestServiceDiscovery_ChecksSad", testChecksSad) } // testMultiProvider tests service discovery where multi providers are used diff --git a/e2e/servicediscovery/simple_lb_test.go b/e2e/servicediscovery/simple_lb_test.go index 2e42956e1b5..d438b06a03d 100644 --- a/e2e/servicediscovery/simple_lb_test.go +++ b/e2e/servicediscovery/simple_lb_test.go @@ -15,8 +15,8 @@ import ( func testSimpleLoadBalancing(t *testing.T) { nomadClient := e2eutil.NomadClient(t) - // Generate our job ID which will be used for the entire test. - jobID := "nsd-simple-lb-replicas" + uuid.Short() + // Generate our unique job ID which will be used for this test. + jobID := "nsd-simple-lb-replicas-" + uuid.Short() jobIDs := []string{jobID} // Defer a cleanup function to remove the job. This will trigger if the diff --git a/e2e/vaultcompat/consts_test.go b/e2e/vaultcompat/consts_test.go index cd09b8d2e47..12e2558c606 100644 --- a/e2e/vaultcompat/consts_test.go +++ b/e2e/vaultcompat/consts_test.go @@ -2,7 +2,7 @@ package vaultcompat import ( "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) const ( @@ -45,12 +45,12 @@ var ( // job is a test job that is used to request a Vault token and cat the token // out before exiting. job = &api.Job{ - ID: helper.StringToPtr("test"), - Type: helper.StringToPtr("batch"), + ID: pointer.Of("test"), + Type: pointer.Of("batch"), Datacenters: []string{"dc1"}, TaskGroups: []*api.TaskGroup{ { - Name: helper.StringToPtr("test"), + Name: pointer.Of("test"), Tasks: []*api.Task{ { Name: "test", @@ -65,8 +65,8 @@ var ( }, }, RestartPolicy: &api.RestartPolicy{ - Attempts: helper.IntToPtr(0), - Mode: helper.StringToPtr("fail"), + Attempts: pointer.Of(0), + Mode: pointer.Of("fail"), }, }, }, diff --git a/e2e/vaultcompat/vault_test.go b/e2e/vaultcompat/vault_test.go index 27446ebbd7b..0fcb25ffdee 100644 --- a/e2e/vaultcompat/vault_test.go +++ b/e2e/vaultcompat/vault_test.go @@ -18,7 +18,7 @@ import ( "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/command/agent" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/testutil" vapi "github.com/hashicorp/vault/api" @@ -331,10 +331,10 @@ func testVaultCompatibility(t *testing.T, vault string, version string) { if c.Vault == nil { c.Vault = &config.VaultConfig{} } - c.Vault.Enabled = helper.BoolToPtr(true) + c.Vault.Enabled = pointer.Of(true) c.Vault.Token = token c.Vault.Role = "nomad-cluster" - c.Vault.AllowUnauthenticated = helper.BoolToPtr(true) + c.Vault.AllowUnauthenticated = pointer.Of(true) c.Vault.Addr = v.HTTPAddr }) defer nomad.Shutdown() diff --git a/go.mod b/go.mod index 49e8c02185e..e564def9b04 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,11 @@ module github.com/hashicorp/nomad -go 1.18 +go 1.19 // Pinned dependencies are noted in github.com/hashicorp/nomad/issues/11826 replace ( github.com/Microsoft/go-winio => github.com/endocrimes/go-winio v0.4.13-0.20190628114223-fb47a8b41948 - github.com/hashicorp/go-discover => github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 + github.com/hashicorp/go-discover => github.com/hashicorp/go-discover v0.0.0-20220621183603-a413e131e836 github.com/hashicorp/hcl => github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee ) @@ -14,37 +14,36 @@ replace github.com/hashicorp/nomad/api => ./api require ( github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5 - github.com/Microsoft/go-winio v0.4.17 + github.com/Microsoft/go-winio v0.5.2 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e - github.com/armon/go-metrics v0.3.10 + github.com/armon/go-metrics v0.4.0 github.com/aws/aws-sdk-go v1.42.27 github.com/container-storage-interface/spec v1.4.0 - github.com/containerd/go-cni v1.1.1 - github.com/containernetworking/cni v1.0.1 - github.com/containernetworking/plugins v1.0.1 + github.com/containerd/go-cni v1.1.6 + github.com/containernetworking/cni v1.1.2 + github.com/containernetworking/plugins v1.1.1 github.com/coreos/go-iptables v0.6.0 - github.com/coreos/go-semver v0.3.0 github.com/creack/pty v1.1.18 github.com/docker/cli v20.10.3-0.20220113150236-6e2838e18645+incompatible github.com/docker/distribution v2.8.1+incompatible - github.com/docker/docker v20.10.12+incompatible + github.com/docker/docker v20.10.17+incompatible github.com/docker/go-units v0.4.0 github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14 github.com/dustin/go-humanize v1.0.0 github.com/elazarl/go-bindata-assetfs v1.0.1-0.20200509193318-234c15e7648f github.com/fatih/color v1.13.0 // indirect - github.com/fsouza/go-dockerclient v1.6.5 + github.com/fsouza/go-dockerclient v1.8.2 github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.8 github.com/gorilla/handlers v1.5.1 github.com/gorilla/websocket v1.5.0 github.com/gosuri/uilive v0.0.4 - github.com/grpc-ecosystem/go-grpc-middleware v1.2.1-0.20200228141219-3ce3d519df39 + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/hashicorp/consul v1.7.8 - github.com/hashicorp/consul-template v0.29.1 - github.com/hashicorp/consul/api v1.13.0 - github.com/hashicorp/consul/sdk v0.9.0 + github.com/hashicorp/consul-template v0.29.2-0.20220803104536-583050a85eea + github.com/hashicorp/consul/api v1.13.1 + github.com/hashicorp/consul/sdk v0.10.0 github.com/hashicorp/cronexpr v1.1.1 github.com/hashicorp/go-bexpr v0.1.11 github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de @@ -76,7 +75,7 @@ require ( github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/memberlist v0.3.1 github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 - github.com/hashicorp/nomad/api v0.0.0-20220407202126-2eba643965c4 + github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 github.com/hashicorp/raft v1.3.9 github.com/hashicorp/raft-boltdb/v2 v2.2.0 github.com/hashicorp/serf v0.9.7 @@ -88,7 +87,7 @@ require ( github.com/kr/text v0.2.0 github.com/mattn/go-colorable v0.1.12 github.com/miekg/dns v1.1.41 - github.com/mitchellh/cli v1.1.2 + github.com/mitchellh/cli v1.1.4 github.com/mitchellh/colorstring v0.0.0-20150917214807-8631ce90f286 github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/go-glint v0.0.0-20210722152315-6515ceb4a127 @@ -97,10 +96,10 @@ require ( github.com/mitchellh/hashstructure v1.1.0 github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/reflectwalk v1.0.2 - github.com/moby/sys/mount v0.3.0 - github.com/moby/sys/mountinfo v0.6.0 + github.com/moby/sys/mount v0.3.3 + github.com/moby/sys/mountinfo v0.6.2 github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 - github.com/opencontainers/runc v1.0.3 + github.com/opencontainers/runc v1.1.3 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 github.com/posener/complete v1.2.3 github.com/prometheus/client_golang v1.12.0 @@ -116,18 +115,18 @@ require ( github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 github.com/zclconf/go-cty v1.8.0 github.com/zclconf/go-cty-yaml v1.0.2 - go.etcd.io/bbolt v1.3.5 + go.etcd.io/bbolt v1.3.6 go.uber.org/goleak v1.1.12 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d golang.org/x/exp v0.0.0-20220609121020-a51bd0440498 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e + golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704 golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 - google.golang.org/grpc v1.45.0 - google.golang.org/protobuf v1.27.1 + google.golang.org/grpc v1.48.0 + google.golang.org/protobuf v1.28.1 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/tomb.v2 v2.0.0-20140626144623-14b3d72120e8 - oss.indeed.com/go/libtime v1.5.0 + oss.indeed.com/go/libtime v1.6.0 ) require ( @@ -149,8 +148,10 @@ require ( github.com/DataDog/datadog-go v3.2.0+incompatible // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/semver/v3 v3.1.1 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect - github.com/Microsoft/hcsshim v0.8.23 // indirect + github.com/Masterminds/sprig/v3 v3.2.0 // indirect + github.com/Microsoft/hcsshim v0.9.3 // indirect github.com/VividCortex/ewma v1.1.1 // indirect github.com/agext/levenshtein v1.2.1 // indirect github.com/apparentlymart/go-cidr v1.0.1 // indirect @@ -159,7 +160,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/bits-and-blooms/bitset v1.2.0 // indirect github.com/bmatcuk/doublestar v1.1.5 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/brianvoe/gofakeit/v6 v6.16.0 @@ -168,14 +168,14 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect github.com/cheggaaa/pb/v3 v3.0.5 // indirect - github.com/cilium/ebpf v0.8.1 // indirect + github.com/cilium/ebpf v0.9.1 // indirect github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect github.com/circonus-labs/circonusllhist v0.1.3 // indirect github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect - github.com/containerd/cgroups v1.0.2 // indirect + github.com/containerd/cgroups v1.0.3 // indirect github.com/containerd/console v1.0.3 // indirect - github.com/containerd/containerd v1.5.9 // indirect + github.com/containerd/containerd v1.6.6 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -186,7 +186,7 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/envoyproxy/go-control-plane v0.10.0 // indirect + github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -214,7 +214,7 @@ require ( github.com/hashicorp/vault/api/auth/kubernetes v0.1.0 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -223,19 +223,19 @@ require ( github.com/linode/linodego v0.7.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mattn/go-runewidth v0.0.7 // indirect + github.com/mattn/go-runewidth v0.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/pointerstructure v1.2.1 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/mrunalp/fileutils v0.5.0 // indirect + github.com/muesli/reflow v0.3.0 github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/oklog/run v1.1.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/opencontainers/selinux v1.10.0 // indirect + github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect + github.com/opencontainers/selinux v1.10.1 // indirect github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect @@ -245,9 +245,11 @@ require ( github.com/prometheus/procfs v0.7.3 // indirect github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect - github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect + github.com/seccomp/libseccomp-golang v0.10.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect + github.com/spf13/cast v1.3.1 // indirect github.com/stretchr/objx v0.4.0 // indirect github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 // indirect github.com/tj/go-spin v1.1.0 // indirect @@ -255,26 +257,27 @@ require ( github.com/tklauser/numcpus v0.3.0 // indirect github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect github.com/ulikunitz/xz v0.5.10 // indirect - github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 // indirect - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect + github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/vmware/govmomi v0.18.0 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.opencensus.io v0.23.0 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/api v0.60.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 // indirect - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/resty.v1 v1.12.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +require github.com/rivo/uniseg v0.2.0 // indirect diff --git a/go.sum b/go.sum index 99906ce56a4..63988acbeec 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,7 @@ github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M= github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= @@ -108,8 +109,12 @@ github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJ github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig/v3 v3.2.0 h1:P1ekkbuU73Ui/wS0nK1HOM37hh4xdfZo485UPf8rc+Y= +github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -117,9 +122,9 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo= +github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -143,7 +148,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= @@ -160,8 +164,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.4/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= +github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -181,7 +185,6 @@ github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -194,7 +197,6 @@ github.com/brianvoe/gofakeit/v6 v6.16.0 h1:EelCqtfArd8ppJ0z+TpOxXH8sVWNPBadPNdCD github.com/brianvoe/gofakeit/v6 v6.16.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= @@ -224,8 +226,9 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao= -github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= @@ -260,8 +263,8 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.2 h1:mZBclaSgNDfPWtfhj2xJY28LZ9nYIgzB0pwSURPl6JM= -github.com/containerd/cgroups v1.0.2/go.mod h1:qpbpJ1jmlqsR9f2IyaLPsdkCdnt0rbDVqIDlhuu5tRY= +github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -277,23 +280,22 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= -github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0= +github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= @@ -302,8 +304,8 @@ github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1S github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.1 h1:UV64yhzDgs27mBIVUrlzG8Z2bc1K0/zokOW5vDNkI4c= -github.com/containerd/go-cni v1.1.1/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.6 h1:el5WPymG5nRRLQF1EfB97FWob4Tdc8INg8RZMaXWZlo= +github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -316,6 +318,7 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -334,12 +337,12 @@ github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNR github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v1.0.1 h1:9OIL/sZmMYDBe+G8svzILAlulUpaDTUjeAbtH/JNLBo= -github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containernetworking/plugins v1.0.1 h1:wwCfYbTCj5FC0EJgyzyjTXmqysOiJE9r712Z+2KVZAk= -github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= +github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= @@ -352,7 +355,6 @@ github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -394,6 +396,7 @@ github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TR github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.3-0.20220113150236-6e2838e18645+incompatible h1:cPkz7NutD1DZvwN6B+8ZF9d4Pp/rXxAhiyT1NhQFyyY= github.com/docker/cli v20.10.3-0.20220113150236-6e2838e18645+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= @@ -401,9 +404,10 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= -github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -443,8 +447,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.0 h1:WVt4HEPbdRbRD/PKKPbPnIVavO6gk/h673jWyIJ016k= -github.com/envoyproxy/go-control-plane v0.10.0/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1+f30UtwtXoFUPzE= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= @@ -464,12 +468,11 @@ github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsouza/go-dockerclient v1.6.5 h1:vuFDnPcds3LvTWGYb9h0Rty14FLgkjHZdwLDROCdgsw= -github.com/fsouza/go-dockerclient v1.6.5/go.mod h1:GOdftxWLWIbIWKbIMDroKFJzPdg6Iw7r+jX1DDZdVsA= +github.com/fsouza/go-dockerclient v1.8.2 h1:8A2/xvHxpe5ivfQ2XDJ5EpLBTaxmsFZUJpfX1zSORRY= +github.com/fsouza/go-dockerclient v1.8.2/go.mod h1:oenNB8JjNKY4o8I/sug4Qah9si/7OxH4MjL+u7oBxP8= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -514,6 +517,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= @@ -590,6 +594,7 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -611,6 +616,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -639,7 +645,7 @@ github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -652,23 +658,22 @@ github.com/gosuri/uilive v0.0.4/go.mod h1:V/epo5LjjlDE5RJUcqx8dbw+zc93y5Ya3yg8tf github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.1-0.20200228141219-3ce3d519df39 h1:MqvH60+R2JhSdvVgGxmExOndrkRQtGW7w4+gcrymN64= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.1-0.20200228141219-3ce3d519df39/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul v1.7.8 h1:hp308KxAf3zWoGuwp2e+0UUhrm6qHjeBQk3jCZ+bjcY= github.com/hashicorp/consul v1.7.8/go.mod h1:urbfGaVZDmnXC6geg0LYPh/SRUk1E8nfmDHpz+Q0nLw= -github.com/hashicorp/consul-template v0.29.1 h1:icm/H7klHYlxpUoWqSmTIWaSLEfGqUJJBsZA/2JhTLU= -github.com/hashicorp/consul-template v0.29.1/go.mod h1:QIohwBuXlKXtsmGGQdWrISlUy4E6LFg5tLZyrw4MyoU= +github.com/hashicorp/consul-template v0.29.2-0.20220803104536-583050a85eea h1:d9frD3+sqQOG/4hOXLEfcXnNz+au0owaRUmM2WuzCBk= +github.com/hashicorp/consul-template v0.29.2-0.20220803104536-583050a85eea/go.mod h1:i2oqMe0jIyHAKuimz7Q3sJU3vnwVx3QzDdDmrRrz5RI= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= -github.com/hashicorp/consul/api v1.13.0 h1:2hnLQ0GjQvw7f3O61jMO8gbasZviZTrt9R8WzgiirHc= -github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= +github.com/hashicorp/consul/api v1.13.1 h1:r5cPdVFUy+pFF7nt+0ArLD9hm+E39OewJkvNdjKXcL4= +github.com/hashicorp/consul/api v1.13.1/go.mod h1:+1VcOos0TVdQFqXxphG4zmGcwQB4KVGkp1maPqnkDpE= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.9.0 h1:NGSHAU7X3yDCjo8WBUbNOtD3BSqv8u0vu3+zNxgmxQI= -github.com/hashicorp/consul/sdk v0.9.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/consul/sdk v0.10.0 h1:rGLEh2AWK4K0KCMvqWAz2EYxQqgciIfMagWZ0nVe5MI= +github.com/hashicorp/consul/sdk v0.10.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -689,8 +694,8 @@ github.com/hashicorp/go-connlimit v0.3.0 h1:oAojHGjFxUTTTA8c5XXnDqWJ2HLuWbDiBPTp github.com/hashicorp/go-connlimit v0.3.0/go.mod h1:OUj9FGL1tPIhl/2RCfzYHrIiWj+VVPGNyVPnUX8AqS0= github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840 h1:kgvybwEeu0SXktbB2y3uLHX9lklLo+nzUwh59A3jzQc= github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA= -github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 h1:eje2KOX8Sf7aYPiAsLnpWdAIrGRMcpFjN/Go/Exb7Zo= -github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192/go.mod h1:3/4dzY4lR1Hzt9bBqMhBzG7lngZ0GKx/nL6G/ad62wE= +github.com/hashicorp/go-discover v0.0.0-20220621183603-a413e131e836 h1:4wEh+GhB7WtM3ZBlqx7DJ32m4fVt4rK1XeEEez3aook= +github.com/hashicorp/go-discover v0.0.0-20220621183603-a413e131e836/go.mod h1:1xfdKvc3pe5WKxfUUHHOGaKMk7NLGhHY1jkyhKo6098= github.com/hashicorp/go-envparse v0.0.0-20180119215841-310ca1881b22 h1:HTmDIaSN95gbdMyrsbNiXSdW4fbGctGQwEqv0H7OhDQ= github.com/hashicorp/go-envparse v0.0.0-20180119215841-310ca1881b22/go.mod h1:/NlxCzN2D4C4L2uDE6ux/h6jM+n98VFQM14nnCIfHJU= github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg= @@ -828,6 +833,7 @@ github.com/hexdigest/gowrap v1.1.7/go.mod h1:Z+nBFUDLa01iaNM+/jzoOA1JJ7sm51rnYFa github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745 h1:8as8OQ+RF1QrsHvWWsKBtBKINhD9QaD1iozA1wrO4aA= github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -838,13 +844,13 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE= github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da h1:FjHUJJ7oBW4G/9j1KzlHaXL09LyMVM9rupS39lncbXk= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f h1:E87tDTVS5W65euzixn7clSzK66puSt1H4I5SC0EmHH4= @@ -859,6 +865,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869 h1:BvV6PYcRz0yGnWXNZrd5wginNT1GfFfPvvWpPbjfFL8= @@ -904,6 +911,7 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3v github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= @@ -930,13 +938,15 @@ github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9 github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= @@ -945,8 +955,9 @@ github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= +github.com/mitchellh/cli v1.1.4 h1:qj8czE26AU4PbiaPXK5uVmMSM+V5BYsFBiM9HhGRLUA= +github.com/mitchellh/cli v1.1.4/go.mod h1:vTLESy5mRhKOs9KDp0/RATawxP1UqBmdrpVRMnpcvKQ= github.com/mitchellh/colorstring v0.0.0-20150917214807-8631ce90f286 h1:KHyL+3mQOF9sPfs26lsefckcFNDcIZtiACQiECzIUkw= github.com/mitchellh/colorstring v0.0.0-20150917214807-8631ce90f286/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -988,13 +999,13 @@ github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/sys/mount v0.3.0 h1:bXZYMmq7DBQPwHRxH/MG+u9+XF90ZOwoXpHTOznMGp0= -github.com/moby/sys/mount v0.3.0/go.mod h1:U2Z3ur2rXPFrFmy4q6WMwWrBOAQGYtYTRVM8BIvzbwk= +github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= +github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo= -github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= @@ -1008,6 +1019,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -1029,21 +1042,26 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1052,16 +1070,16 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= -github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1073,8 +1091,9 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= +github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= @@ -1149,6 +1168,9 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1164,14 +1186,14 @@ github.com/ryanuber/columnize v2.1.1-0.20170703205827-abc90934186a+incompatible/ github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= +github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v0.0.0-20181107111621-48177ef5f880/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -1180,6 +1202,8 @@ github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8 github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shoenig/test v0.3.1 h1:dhGZztS6nQuvJ0o0RtUiQHaEO4hhArh/WmWwik3Ols0= github.com/shoenig/test v0.3.1/go.mod h1:xYtyGBC5Q3kzCNyJg/SjgNpfAa2kvmgA0i5+lQso8x0= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -1188,8 +1212,9 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c h1:fyKiXKO1/I/B6Y2U8T7WdQGWzwehOuGIrljPtt7YTTI= github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1203,6 +1228,8 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1259,13 +1286,13 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= @@ -1300,8 +1327,9 @@ github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd5 github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1336,6 +1364,7 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191106202628-ed6320f186d4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1443,10 +1472,11 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b h1:3ogNYyK4oIQdIKzTu68hQrr4iuVxF3AxKl9Aj/eDrw0= +golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1503,6 +1533,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1537,7 +1568,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1550,6 +1580,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1578,7 +1609,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1588,12 +1618,18 @@ golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e h1:w36l2Uw3dRan1K3TyXriXvY+6T56GNmlKGcqiQUJDfM= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704 h1:Y7NOhdqIOU8kYI7BxsgL38d0ot0raxvcW+EMQU2QrT4= +golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1633,6 +1669,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1656,14 +1693,17 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1749,11 +1789,13 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1823,8 +1865,9 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1838,8 +1881,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -1879,6 +1923,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -1911,6 +1956,7 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -1920,18 +1966,22 @@ k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -oss.indeed.com/go/libtime v1.5.0 h1:wulKS+oHhb3P2wFi1fcA+g8CXiC8+ygFECUQea5ZqLU= -oss.indeed.com/go/libtime v1.5.0/go.mod h1:B2sdEcuzB0zhTKkAuHy4JInKRc7Al3tME4qWam6R7mA= +oss.indeed.com/go/libtime v1.6.0 h1:XQyczJihse/wQGo59OfPF3f4f+Sywv4R8vdGB3S9BfU= +oss.indeed.com/go/libtime v1.6.0/go.mod h1:B2sdEcuzB0zhTKkAuHy4JInKRc7Al3tME4qWam6R7mA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -1939,6 +1989,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyz sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/helper/escapingio/reader.go b/helper/escapingio/reader.go index edb10514196..e1cdb75539b 100644 --- a/helper/escapingio/reader.go +++ b/helper/escapingio/reader.go @@ -16,10 +16,10 @@ type Handler func(c byte) bool // For illustrative purposes, we use `~` in documentation as a shorthand for escaping character. // // If following a new line, reader sees: -// * `~~`, only one is emitted -// * `~.` (or any character), the handler is invoked with the character. +// - `~~`, only one is emitted +// - `~.` (or any character), the handler is invoked with the character. // If handler returns true, `~.` will be skipped; otherwise, it's propagated. -// * `~` and it's the last character in stream, it's propagated +// - `~` and it's the last character in stream, it's propagated // // Appearances of `~` when not preceded by a new line are propagated unmodified. func NewReader(r io.Reader, c byte, h Handler) io.Reader { diff --git a/helper/funcs.go b/helper/funcs.go index 24aa0159e33..dc5563d2ee8 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -3,6 +3,7 @@ package helper import ( "crypto/sha512" "fmt" + "net/http" "path/filepath" "reflect" "regexp" @@ -32,6 +33,10 @@ var invalidFilenameNonASCII = regexp.MustCompile(`[[:^ascii:]/\\<>:"|?*]`) // invalidFilenameStrict = invalidFilename plus additional punctuation var invalidFilenameStrict = regexp.MustCompile(`[/\\<>:"|?*$()+=[\];#@~,&']`) +type Copyable[T any] interface { + Copy() T +} + // IsUUID returns true if the given string is a valid UUID. func IsUUID(str string) bool { const uuidLen = 36 @@ -69,69 +74,6 @@ func HashUUID(input string) (output string, hashed bool) { return output, true } -// BoolToPtr returns the pointer to a boolean. -// -// Deprecated; use pointer.Of instead. -func BoolToPtr(b bool) *bool { - return &b -} - -// IntToPtr returns the pointer to an int. -// -// Deprecated; use pointer.Of instead. -func IntToPtr(i int) *int { - return &i -} - -// Int8ToPtr returns the pointer to an int8. -// -// Deprecated; use pointer.Of instead. -func Int8ToPtr(i int8) *int8 { - return &i -} - -// Int32ToPtr returns the pointer to an int32. -// -// Deprecated; use pointer.Of instead. -func Int32ToPtr(i int32) *int32 { - return &i -} - -// Int64ToPtr returns the pointer to an int64. -// -// Deprecated; use pointer.Of instead. -func Int64ToPtr(i int64) *int64 { - return &i -} - -// Uint64ToPtr returns the pointer to an uint64. -// -// Deprecated; use pointer.Of instead. -func Uint64ToPtr(u uint64) *uint64 { - return &u -} - -// UintToPtr returns the pointer to an uint. -// -// Deprecated; use pointer.Of instead. -func UintToPtr(u uint) *uint { - return &u -} - -// StringToPtr returns the pointer to a string. -// -// Deprecated; use pointer.Of instead. -func StringToPtr(str string) *string { - return &str -} - -// TimeToPtr returns the pointer to a time.Duration. -// -// Deprecated; use pointer.Of instead. -func TimeToPtr(t time.Duration) *time.Duration { - return &t -} - // CompareTimePtrs return true if a is the same as b. func CompareTimePtrs(a, b *time.Duration) bool { if a == nil || b == nil { @@ -140,13 +82,6 @@ func CompareTimePtrs(a, b *time.Duration) bool { return *a == *b } -// Float64ToPtr returns the pointer to an float64. -// -// Deprecated; use pointer.Of instead. -func Float64ToPtr(f float64) *float64 { - return &f -} - // Min returns the minimum of a and b. func Min[T constraints.Ordered](a, b T) T { if a < b { @@ -163,36 +98,6 @@ func Max[T constraints.Ordered](a, b T) T { return b } -// IntMin returns the minimum of a and b. -// -// Deprecated; use Min instead. -func IntMin(a, b int) int { - if a < b { - return a - } - return b -} - -// IntMax returns the maximum of a and b. -// -// Deprecated; use Max instead. -func IntMax(a, b int) int { - if a > b { - return a - } - return b -} - -// Uint64Max returns the maximum of a and b. -// -// Deprecated; use Max instead. -func Uint64Max(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - // MapStringStringSliceValueSet returns the set of values in a map[string][]string func MapStringStringSliceValueSet(m map[string][]string) []string { set := make(map[string]struct{}) @@ -361,9 +266,9 @@ func CompareMapStringString(a, b map[string]string) bool { // CopyMap creates a copy of m. Struct values are not deep copies. // -// If m is nil or contains no elements, the return value is nil. +// If m is nil the return value is nil. func CopyMap[M ~map[K]V, K comparable, V any](m M) M { - if len(m) == 0 { + if m == nil { return nil } @@ -374,16 +279,44 @@ func CopyMap[M ~map[K]V, K comparable, V any](m M) M { return result } +// DeepCopyMap creates a copy of m by calling Copy() on each value. +// +// If m is nil the return value is nil. +func DeepCopyMap[M ~map[K]V, K comparable, V Copyable[V]](m M) M { + if m == nil { + return nil + } + + result := make(M, len(m)) + for k, v := range m { + result[k] = v.Copy() + } + return result +} + +// CopySlice creates a deep copy of s. For slices with elements that do not +// implement Copy(), use slices.Clone. +func CopySlice[S ~[]E, E Copyable[E]](s S) S { + if s == nil { + return nil + } + + result := make(S, len(s)) + for i, v := range s { + result[i] = v.Copy() + } + return result +} + // CopyMapStringString creates a copy of m. // // Deprecated; use CopyMap instead. func CopyMapStringString(m map[string]string) map[string]string { - l := len(m) - if l == 0 { + if m == nil { return nil } - c := make(map[string]string, l) + c := make(map[string]string, len(m)) for k, v := range m { c[k] = v } @@ -394,12 +327,11 @@ func CopyMapStringString(m map[string]string) map[string]string { // // Deprecated; use CopyMap instead. func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} { - l := len(m) - if l == 0 { + if m == nil { return nil } - c := make(map[string]struct{}, l) + c := make(map[string]struct{}, len(m)) for k := range m { c[k] = struct{}{} } @@ -410,12 +342,11 @@ func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} { // // Deprecated; use CopyMap instead. func CopyMapStringInterface(m map[string]interface{}) map[string]interface{} { - l := len(m) - if l == 0 { + if m == nil { return nil } - c := make(map[string]interface{}, l) + c := make(map[string]interface{}, len(m)) for k, v := range m { c[k] = v } @@ -729,3 +660,45 @@ func ConvertSlice[A, B any](original []A, conversion func(a A) B) []B { } return result } + +// IsMethodHTTP returns whether s is a known HTTP method, ignoring case. +func IsMethodHTTP(s string) bool { + switch strings.ToUpper(s) { + case http.MethodGet: + case http.MethodHead: + case http.MethodPost: + case http.MethodPut: + case http.MethodPatch: + case http.MethodDelete: + case http.MethodConnect: + case http.MethodOptions: + case http.MethodTrace: + default: + return false + } + return true +} + +// EqualsFunc represents a type implementing the Equals method. +type EqualsFunc[A any] interface { + Equals(A) bool +} + +// ElementsEquals returns true if slices a and b contain the same elements (in +// no particular order) using the Equals function defined on their type for +// comparison. +func ElementsEquals[T EqualsFunc[T]](a, b []T) bool { + if len(a) != len(b) { + return false + } +OUTER: + for _, item := range a { + for _, other := range b { + if item.Equals(other) { + continue OUTER + } + } + return false + } + return true +} diff --git a/helper/funcs_test.go b/helper/funcs_test.go index be77678d661..cd71cb5c3a1 100644 --- a/helper/funcs_test.go +++ b/helper/funcs_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/helper/pointer" "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -68,7 +69,7 @@ func Test_CopyMap(t *testing.T) { t.Run("empty", func(t *testing.T) { m := make(map[string]int, 10) result := CopyMap(m) - must.Nil(t, result) + must.MapEq(t, map[string]int{}, result) }) t.Run("elements", func(t *testing.T) { @@ -136,15 +137,15 @@ func TestCompareTimePtrs(t *testing.T) { a := (*time.Duration)(nil) b := (*time.Duration)(nil) require.True(t, CompareTimePtrs(a, b)) - c := TimeToPtr(3 * time.Second) + c := pointer.Of(3 * time.Second) require.False(t, CompareTimePtrs(a, c)) require.False(t, CompareTimePtrs(c, a)) }) t.Run("not nil", func(t *testing.T) { - a := TimeToPtr(1 * time.Second) - b := TimeToPtr(1 * time.Second) - c := TimeToPtr(2 * time.Second) + a := pointer.Of(1 * time.Second) + b := pointer.Of(1 * time.Second) + c := pointer.Of(2 * time.Second) require.True(t, CompareTimePtrs(a, b)) require.False(t, CompareTimePtrs(a, c)) }) @@ -568,5 +569,73 @@ func Test_ConvertSlice(t *testing.T) { expectedOutput := []*wrapper{{id: 10}, {id: 13}, {id: 1987}, {id: 2020}} actualOutput := ConvertSlice(input, cFn) require.ElementsMatch(t, expectedOutput, actualOutput) + + }) +} + +func Test_IsMethodHTTP(t *testing.T) { + t.Run("is method", func(t *testing.T) { + cases := []string{ + "GET", "Get", "get", + "HEAD", "Head", "head", + "POST", "Post", "post", + "PUT", "Put", "put", + "PATCH", "Patch", "patch", + "DELETE", "Delete", "delete", + "CONNECT", "Connect", "connect", + "OPTIONS", "Options", "options", + "TRACE", "Trace", "trace", + } + for _, tc := range cases { + result := IsMethodHTTP(tc) + must.True(t, result) + } + }) + + t.Run("is not method", func(t *testing.T) { + not := []string{"GETTER", "!GET", ""} + for _, tc := range not { + result := IsMethodHTTP(tc) + must.False(t, result) + } + }) +} + +type employee struct { + id int + name string +} + +func (e *employee) Equals(o *employee) bool { + return e.id == o.id // name can be different +} + +func Test_ElementsEquals(t *testing.T) { + t.Run("empty", func(t *testing.T) { + a := []*employee(nil) + var b []*employee + must.True(t, ElementsEquals(a, b)) + must.True(t, ElementsEquals(b, a)) + }) + + t.Run("different sizes", func(t *testing.T) { + a := []*employee{{1, "mitchell"}, {2, "armon"}, {3, "jack"}} + b := []*employee{{1, "mitchell"}, {2, "armon"}} + must.False(t, ElementsEquals(a, b)) + must.False(t, ElementsEquals(b, a)) + }) + + t.Run("equal", func(t *testing.T) { + a := []*employee{{1, "mitchell"}, {2, "armon"}, {3, "jack"}} + b := []*employee{{1, "M.H."}, {2, "A.D."}, {3, "J.P."}} + must.True(t, ElementsEquals(a, b)) + must.True(t, ElementsEquals(b, a)) + }) + + t.Run("different", func(t *testing.T) { + a := []*employee{{1, "mitchell"}, {2, "armon"}, {3, "jack"}} + b := []*employee{{0, "mitchell."}, {2, "armon"}, {3, "jack"}} + must.False(t, ElementsEquals(a, b)) + must.False(t, ElementsEquals(b, a)) }) } diff --git a/helper/pluginutils/hclutils/testing.go b/helper/pluginutils/hclutils/testing.go index 469cec7d5b8..49211f010f1 100644 --- a/helper/pluginutils/hclutils/testing.go +++ b/helper/pluginutils/hclutils/testing.go @@ -40,12 +40,8 @@ func (b *HCLParser) WithVars(vars map[string]cty.Value) *HCLParser { // out parameter should be a golang reference to a driver specific TaskConfig reference. // The function terminates and reports errors if any is found during conversion. // -// Sample invocation would be -// -// ``` -// var tc *TaskConfig -// hclutils.NewConfigParser(spec).ParseJson(t, configString, &tc) -// ``` +// var tc *TaskConfig +// hclutils.NewConfigParser(spec).ParseJson(t, configString, &tc) func (b *HCLParser) ParseJson(t *testing.T, configStr string, out interface{}) { config := JsonConfigToInterface(t, configStr) b.parse(t, config, out) @@ -55,7 +51,7 @@ func (b *HCLParser) ParseJson(t *testing.T, configStr string, out interface{}) { // out parameter should be a golang reference to a driver specific TaskConfig reference. // The function terminates and reports errors if any is found during conversion. // -// Sample invocation would be +// # Sample invocation would be // // ``` // var tc *TaskConfig diff --git a/helper/pluginutils/loader/testing.go b/helper/pluginutils/loader/testing.go index f811804ecc7..75d2432fe6d 100644 --- a/helper/pluginutils/loader/testing.go +++ b/helper/pluginutils/loader/testing.go @@ -6,7 +6,7 @@ import ( log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/plugins/base" ) @@ -51,7 +51,7 @@ func (m *MockInstance) ApiVersion() string { return // passed inst as the plugin func MockBasicExternalPlugin(inst interface{}, apiVersion string) *MockInstance { var killedLock sync.Mutex - killed := helper.BoolToPtr(false) + killed := pointer.Of(false) return &MockInstance{ InternalPlugin: false, KillF: func() { diff --git a/helper/pointer/pointer.go b/helper/pointer/pointer.go index 766b4ba60a9..8fa960caf71 100644 --- a/helper/pointer/pointer.go +++ b/helper/pointer/pointer.go @@ -5,3 +5,12 @@ package pointer func Of[A any](a A) *A { return &a } + +// Copy returns a new pointer to a. +func Copy[A any](a *A) *A { + if a == nil { + return nil + } + na := *a + return &na +} diff --git a/helper/pool/pool.go b/helper/pool/pool.go index e554c93d490..8f111b92d71 100644 --- a/helper/pool/pool.go +++ b/helper/pool/pool.go @@ -47,7 +47,7 @@ type Conn struct { addr net.Addr session *yamux.Session - lastUsed time.Time + lastUsed atomic.Pointer[time.Time] pool *ConnPool @@ -58,7 +58,8 @@ type Conn struct { // markForUse does all the bookkeeping required to ready a connection for use, // and ensure that active connections don't get reaped. func (c *Conn) markForUse() { - c.lastUsed = time.Now() + now := time.Now() + c.lastUsed.Store(&now) atomic.AddInt32(&c.refCount, 1) } @@ -402,9 +403,12 @@ func (p *ConnPool) getNewConn(region string, addr net.Addr) (*Conn, error) { addr: addr, session: session, clients: list.New(), - lastUsed: time.Now(), + lastUsed: atomic.Pointer[time.Time]{}, pool: p, } + + now := time.Now() + c.lastUsed.Store(&now) return c, nil } @@ -526,7 +530,7 @@ func (p *ConnPool) reap() { now := time.Now() for host, conn := range p.pool { // Skip recently used connections - if now.Sub(conn.lastUsed) < p.maxTime { + if now.Sub(*conn.lastUsed.Load()) < p.maxTime { continue } diff --git a/helper/raftutil/msgtypes.go b/helper/raftutil/msgtypes.go index d0249625f19..a1156db6e46 100644 --- a/helper/raftutil/msgtypes.go +++ b/helper/raftutil/msgtypes.go @@ -54,8 +54,7 @@ var msgTypeNames = map[structs.MessageType]string{ structs.ServiceRegistrationUpsertRequestType: "ServiceRegistrationUpsertRequestType", structs.ServiceRegistrationDeleteByIDRequestType: "ServiceRegistrationDeleteByIDRequestType", structs.ServiceRegistrationDeleteByNodeIDRequestType: "ServiceRegistrationDeleteByNodeIDRequestType", - structs.SecureVariableUpsertRequestType: "SecureVariableUpsertRequestType", - structs.SecureVariableDeleteRequestType: "SecureVariableDeleteRequestType", + structs.SVApplyStateRequestType: "SVApplyStateRequestType", structs.RootKeyMetaUpsertRequestType: "RootKeyMetaUpsertRequestType", structs.RootKeyMetaDeleteRequestType: "RootKeyMetaDeleteRequestType", structs.ACLRolesUpsertRequestType: "ACLRolesUpsertRequestType", diff --git a/internal/testing/apitests/tasks_test.go b/internal/testing/apitests/tasks_test.go index b4cbe8cf96e..847fb4cd544 100644 --- a/internal/testing/apitests/tasks_test.go +++ b/internal/testing/apitests/tasks_test.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) @@ -27,130 +28,130 @@ func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) { jobReschedulePolicy: nil, taskReschedulePolicy: nil, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts), - Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval), - Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay), - DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction), - MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay), - Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited), + Attempts: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Attempts), + Interval: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Interval), + Delay: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Delay), + DelayFunction: pointer.Of(structs.DefaultBatchJobReschedulePolicy.DelayFunction), + MaxDelay: pointer.Of(structs.DefaultBatchJobReschedulePolicy.MaxDelay), + Unlimited: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Unlimited), }, }, { desc: "Empty job reschedule policy", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - MaxDelay: timeToPtr(0), - DelayFunction: stringToPtr(""), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(0), + Interval: pointer.Of(time.Duration(0)), + Delay: pointer.Of(time.Duration(0)), + MaxDelay: pointer.Of(time.Duration(0)), + DelayFunction: pointer.Of(""), + Unlimited: pointer.Of(false), }, taskReschedulePolicy: nil, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - MaxDelay: timeToPtr(0), - DelayFunction: stringToPtr(""), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(0), + Interval: pointer.Of(time.Duration(0)), + Delay: pointer.Of(time.Duration(0)), + MaxDelay: pointer.Of(time.Duration(0)), + DelayFunction: pointer.Of(""), + Unlimited: pointer.Of(false), }, }, { desc: "Inherit from job", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(20 * time.Second), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(1), + Interval: pointer.Of(20 * time.Second), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, taskReschedulePolicy: nil, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(20 * time.Second), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(1), + Interval: pointer.Of(20 * time.Second), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, }, { desc: "Set in task", jobReschedulePolicy: nil, taskReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(5), - Interval: timeToPtr(2 * time.Minute), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(5), + Interval: pointer.Of(2 * time.Minute), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(5), - Interval: timeToPtr(2 * time.Minute), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(5), + Interval: pointer.Of(2 * time.Minute), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, }, { desc: "Merge from job", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), + Attempts: pointer.Of(1), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), }, taskReschedulePolicy: &api.ReschedulePolicy{ - Interval: timeToPtr(5 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Interval: pointer.Of(5 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(5 * time.Minute), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(1), + Interval: pointer.Of(5 * time.Minute), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, }, { desc: "Override from group", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - MaxDelay: timeToPtr(10 * time.Second), + Attempts: pointer.Of(1), + MaxDelay: pointer.Of(10 * time.Second), }, taskReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(5), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(20 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(5), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(20 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(5), - Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(20 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(5), + Interval: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Interval), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(20 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, }, { desc: "Attempts from job, default interval", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(1), + Attempts: pointer.Of(1), }, taskReschedulePolicy: nil, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval), - Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay), - DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction), - MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay), - Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited), + Attempts: pointer.Of(1), + Interval: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Interval), + Delay: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Delay), + DelayFunction: pointer.Of(structs.DefaultBatchJobReschedulePolicy.DelayFunction), + MaxDelay: pointer.Of(structs.DefaultBatchJobReschedulePolicy.MaxDelay), + Unlimited: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Unlimited), }, }, } @@ -158,13 +159,13 @@ func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { job := &api.Job{ - ID: stringToPtr("test"), + ID: pointer.Of("test"), Reschedule: tc.jobReschedulePolicy, - Type: stringToPtr(api.JobTypeBatch), + Type: pointer.Of(api.JobTypeBatch), } job.Canonicalize() tg := &api.TaskGroup{ - Name: stringToPtr("foo"), + Name: pointer.Of("foo"), ReschedulePolicy: tc.taskReschedulePolicy, } tg.Canonicalize(job) diff --git a/internal/testing/apitests/util_test.go b/internal/testing/apitests/util_test.go index d751c04eb2c..d6046a2e6dd 100644 --- a/internal/testing/apitests/util_test.go +++ b/internal/testing/apitests/util_test.go @@ -2,31 +2,11 @@ package apitests import ( "testing" - "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper/pointer" ) -// boolToPtr returns the pointer to a boolean -func boolToPtr(b bool) *bool { - return &b -} - -// intToPtr returns the pointer to an int -func intToPtr(i int) *int { - return &i -} - -// timeToPtr returns the pointer to a time stamp -func timeToPtr(t time.Duration) *time.Duration { - return &t -} - -// stringToPtr returns the pointer to a string -func stringToPtr(str string) *string { - return &str -} - func assertQueryMeta(t *testing.T, qm *api.QueryMeta) { t.Helper() if qm.LastIndex == 0 { @@ -48,18 +28,18 @@ func testJob() *api.Job { task := api.NewTask("task1", "exec"). SetConfig("command", "/bin/sleep"). Require(&api.Resources{ - CPU: intToPtr(100), - MemoryMB: intToPtr(256), + CPU: pointer.Of(100), + MemoryMB: pointer.Of(256), }). SetLogConfig(&api.LogConfig{ - MaxFiles: intToPtr(1), - MaxFileSizeMB: intToPtr(2), + MaxFiles: pointer.Of(1), + MaxFileSizeMB: pointer.Of(2), }) group := api.NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&api.EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointer.Of(25), }) job := api.NewBatchJob("job1", "redis", "global", 1). diff --git a/jobspec/helper_test.go b/jobspec/helper_test.go index f7854c80195..4f52f4de565 100644 --- a/jobspec/helper_test.go +++ b/jobspec/helper_test.go @@ -7,18 +7,3 @@ package jobspec func intToPtr(i int) *int { return &i } - -// int8ToPtr returns the pointer to an int8 -func int8ToPtr(i int8) *int8 { - return &i -} - -// int64ToPtr returns the pointer to an int -func int64ToPtr(i int64) *int64 { - return &i -} - -// Uint64ToPtr returns the pointer to an uint64 -func uint64ToPtr(u uint64) *uint64 { - return &u -} diff --git a/jobspec/parse_task.go b/jobspec/parse_task.go index 4bc77c310f2..a43ffded643 100644 --- a/jobspec/parse_task.go +++ b/jobspec/parse_task.go @@ -433,14 +433,25 @@ func parseArtifactOption(result map[string]string, list *ast.ObjectList) error { func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error { for _, o := range list.Elem().Items { + // we'll need a list of all ast objects for later + var listVal *ast.ObjectList + if ot, ok := o.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return fmt.Errorf("should be an object") + } + // Check for invalid keys valid := []string{ "change_mode", "change_signal", + "change_script", "data", "destination", "left_delimiter", "perms", + "uid", + "gid", "right_delimiter", "source", "splay", @@ -455,6 +466,7 @@ func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error { if err := hcl.DecodeObject(&m, o.Val); err != nil { return err } + delete(m, "change_script") // change_script is its own object templ := &api.Template{ ChangeMode: stringToPtr("restart"), @@ -474,6 +486,40 @@ func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error { return err } + // If we have change_script, parse it + if o := listVal.Filter("change_script"); len(o.Items) > 0 { + if len(o.Items) != 1 { + return fmt.Errorf( + "change_script -> expected single stanza, got %d", len(o.Items), + ) + } + var m map[string]interface{} + changeScriptBlock := o.Items[0] + + // check for invalid fields + valid := []string{"command", "args", "timeout", "fail_on_error"} + if err := checkHCLKeys(changeScriptBlock.Val, valid); err != nil { + return multierror.Prefix(err, "change_script ->") + } + + if err := hcl.DecodeObject(&m, changeScriptBlock.Val); err != nil { + return err + } + + templ.ChangeScript = &api.ChangeScript{} + dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + WeaklyTypedInput: true, + Result: templ.ChangeScript, + }) + if err != nil { + return err + } + if err := dec.Decode(m); err != nil { + return err + } + } + *result = append(*result, templ) } diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index 45d624aa22f..82819e9a080 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -22,8 +22,23 @@ const ( // templateChangeModeRestart marks that the task should be restarted if the templateChangeModeRestart = "restart" + + // templateChangeModeScript marks that ac script should be executed on + // template re-render + templateChangeModeScript = "script" ) +// Helper functions below are only used by this test suite +func int8ToPtr(i int8) *int8 { + return &i +} +func uint64ToPtr(u uint64) *uint64 { + return &u +} +func int64ToPtr(i int64) *int64 { + return &i +} + func TestParse(t *testing.T) { ci.Parallel(t) @@ -369,9 +384,17 @@ func TestParse(t *testing.T) { { SourcePath: stringToPtr("bar"), DestPath: stringToPtr("bar"), - ChangeMode: stringToPtr(templateChangeModeRestart), + ChangeMode: stringToPtr(templateChangeModeScript), + ChangeScript: &api.ChangeScript{ + Args: []string{"-debug", "-verbose"}, + Command: stringToPtr("/bin/foo"), + Timeout: timeToPtr(5 * time.Second), + FailOnError: boolToPtr(false), + }, Splay: timeToPtr(5 * time.Second), Perms: stringToPtr("777"), + Uid: intToPtr(1001), + Gid: intToPtr(20), LeftDelim: stringToPtr("--"), RightDelim: stringToPtr("__"), }, diff --git a/jobspec/test-fixtures/basic.hcl b/jobspec/test-fixtures/basic.hcl index 20a8171e4d0..a749bf91dda 100644 --- a/jobspec/test-fixtures/basic.hcl +++ b/jobspec/test-fixtures/basic.hcl @@ -315,9 +315,18 @@ job "binstore-storagelocker" { } template { - source = "bar" - destination = "bar" + source = "bar" + destination = "bar" + change_mode = "script" + change_script { + command = "/bin/foo" + args = ["-debug", "-verbose"] + timeout = "5s" + fail_on_error = false + } perms = "777" + uid = 1001 + gid = 20 left_delimiter = "--" right_delimiter = "__" } diff --git a/jobspec2/functions.go b/jobspec2/functions.go index 6cb0949a39a..9e489902d22 100644 --- a/jobspec2/functions.go +++ b/jobspec2/functions.go @@ -22,7 +22,6 @@ import ( // basedir is used with file functions and allows a user to reference a file // using local path. Usually basedir is the directory in which the config file // is located -// func Functions(basedir string, allowFS bool) map[string]function.Function { funcs := map[string]function.Function{ "abs": stdlib.AbsoluteFunc, diff --git a/jobspec2/hcl_conversions.go b/jobspec2/hcl_conversions.go index bb96e354747..2afd71ed2b7 100644 --- a/jobspec2/hcl_conversions.go +++ b/jobspec2/hcl_conversions.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/hcl/v2/gohcl" "github.com/hashicorp/hcl/v2/hcldec" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper/pointer" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/gocty" ) @@ -116,7 +117,7 @@ func decodeAffinity(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Di weight := v.GetAttr("weight") if !weight.IsNull() { w, _ := weight.AsBigFloat().Int64() - a.Weight = int8ToPtr(int8(w)) + a.Weight = pointer.Of(int8(w)) } // If "version" is provided, set the operand @@ -341,9 +342,10 @@ func decodeTask(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagno // // ```hcl // # block assignment -// env { -// ENV = "production" -// } +// +// env { +// ENV = "production" +// } // // # as attribute // env = { ENV: "production" } @@ -357,7 +359,6 @@ func decodeTask(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagno // found map, the remaining body and diagnostics. If the named field is found // with block syntax, it returns a nil map, and caller falls back to reading // with block syntax. -// func decodeAsAttribute(body hcl.Body, ctx *hcl.EvalContext, name string) (map[string]string, hcl.Body, hcl.Diagnostics) { b, remain, diags := body.PartialContent(&hcl.BodySchema{ Attributes: []hcl.AttributeSchema{ diff --git a/jobspec2/hclutil/blockattrs.go b/jobspec2/hclutil/blockattrs.go index dee3bb0c77e..6a70ae1958a 100644 --- a/jobspec2/hclutil/blockattrs.go +++ b/jobspec2/hclutil/blockattrs.go @@ -14,17 +14,21 @@ import ( // More concretely, it changes the following: // // ``` -// config { -// meta { ... } -// } +// +// config { +// meta { ... } +// } +// // ``` // // to // // ``` -// config { -// meta = { ... } # <- attribute now -// } +// +// config { +// meta = { ... } # <- attribute now +// } +// // ``` func BlocksAsAttrs(body hcl.Body) hcl.Body { if hclb, ok := body.(*hcls.Body); ok { diff --git a/jobspec2/parse_job.go b/jobspec2/parse_job.go index 9b533874f50..a4ee5034366 100644 --- a/jobspec2/parse_job.go +++ b/jobspec2/parse_job.go @@ -4,6 +4,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper/pointer" ) func normalizeJob(jc *jobConfig) { @@ -59,10 +60,10 @@ func normalizeVault(v *api.Vault) { } if v.Env == nil { - v.Env = boolToPtr(true) + v.Env = pointer.Of(true) } if v.ChangeMode == nil { - v.ChangeMode = stringToPtr("restart") + v.ChangeMode = pointer.Of("restart") } } @@ -102,29 +103,32 @@ func normalizeTemplates(templates []*api.Template) { for _, t := range templates { if t.ChangeMode == nil { - t.ChangeMode = stringToPtr("restart") + t.ChangeMode = pointer.Of("restart") } if t.Perms == nil { - t.Perms = stringToPtr("0644") + t.Perms = pointer.Of("0644") } if t.Splay == nil { - t.Splay = durationToPtr(5 * time.Second) + t.Splay = pointer.Of(5 * time.Second) } + normalizeChangeScript(t.ChangeScript) } } -func int8ToPtr(v int8) *int8 { - return &v -} +func normalizeChangeScript(ch *api.ChangeScript) { + if ch == nil { + return + } -func boolToPtr(v bool) *bool { - return &v -} + if ch.Args == nil { + ch.Args = []string{} + } -func stringToPtr(v string) *string { - return &v -} + if ch.Timeout == nil { + ch.Timeout = pointer.Of(5 * time.Second) + } -func durationToPtr(v time.Duration) *time.Duration { - return &v + if ch.FailOnError == nil { + ch.FailOnError = pointer.Of(false) + } } diff --git a/jobspec2/parse_test.go b/jobspec2/parse_test.go index b3e7e9963ab..806412cad48 100644 --- a/jobspec2/parse_test.go +++ b/jobspec2/parse_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/jobspec" "github.com/stretchr/testify/require" ) @@ -644,13 +645,13 @@ job "job-webserver" { { "prod", &api.Job{ - ID: stringToPtr("job-webserver"), - Name: stringToPtr("job-webserver"), + ID: pointer.Of("job-webserver"), + Name: pointer.Of("job-webserver"), Datacenters: []string{"prod-dc1", "prod-dc2"}, TaskGroups: []*api.TaskGroup{ { - Name: stringToPtr("group-webserver"), - Count: intToPtr(20), + Name: pointer.Of("group-webserver"), + Count: pointer.Of(20), Tasks: []*api.Task{ { @@ -670,13 +671,13 @@ job "job-webserver" { { "staging", &api.Job{ - ID: stringToPtr("job-webserver"), - Name: stringToPtr("job-webserver"), + ID: pointer.Of("job-webserver"), + Name: pointer.Of("job-webserver"), Datacenters: []string{"dc1"}, TaskGroups: []*api.TaskGroup{ { - Name: stringToPtr("group-webserver"), - Count: intToPtr(3), + Name: pointer.Of("group-webserver"), + Count: pointer.Of(3), Tasks: []*api.Task{ { @@ -696,13 +697,13 @@ job "job-webserver" { { "unknown", &api.Job{ - ID: stringToPtr("job-webserver"), - Name: stringToPtr("job-webserver"), + ID: pointer.Of("job-webserver"), + Name: pointer.Of("job-webserver"), Datacenters: []string{}, TaskGroups: []*api.TaskGroup{ { - Name: stringToPtr("group-webserver"), - Count: intToPtr(0), + Name: pointer.Of("group-webserver"), + Count: pointer.Of(0), Tasks: []*api.Task{ { @@ -1005,11 +1006,11 @@ func TestParseServiceCheck(t *testing.T) { require.NoError(t, err) expectedJob := &api.Job{ - ID: stringToPtr("group_service_check_script"), - Name: stringToPtr("group_service_check_script"), + ID: pointer.Of("group_service_check_script"), + Name: pointer.Of("group_service_check_script"), TaskGroups: []*api.TaskGroup{ { - Name: stringToPtr("group"), + Name: pointer.Of("group"), Services: []*api.Service{ { Name: "foo-service", @@ -1033,7 +1034,7 @@ func TestParseServiceCheck(t *testing.T) { func TestWaitConfig(t *testing.T) { ci.Parallel(t) - + hclBytes, err := os.ReadFile("test-fixtures/template-wait-config.hcl") require.NoError(t, err) diff --git a/jobspec2/types.variables.go b/jobspec2/types.variables.go index 688018a19da..8e32ded2083 100644 --- a/jobspec2/types.variables.go +++ b/jobspec2/types.variables.go @@ -78,7 +78,6 @@ func (v *Variable) GoString() string { // validateValue ensures that all of the configured custom validations for a // variable value are passing. -// func (v *Variable) validateValue(val VariableAssignment) (diags hcl.Diagnostics) { if len(v.Validations) == 0 { return nil diff --git a/lib/circbufwriter/writer_test.go b/lib/circbufwriter/writer_test.go index d48a03ddf15..cad88c7cece 100644 --- a/lib/circbufwriter/writer_test.go +++ b/lib/circbufwriter/writer_test.go @@ -70,7 +70,7 @@ func TestWriter_BlockingWrite(t *testing.T) { func TestWriter_CloseClose(t *testing.T) { ci.Parallel(t) - + require := require.New(t) w := New(ioutil.Discard, 64) require.NoError(w.Close()) diff --git a/lib/delayheap/delay_heap_test.go b/lib/delayheap/delay_heap_test.go index 28d524591e8..08ce83350fb 100644 --- a/lib/delayheap/delay_heap_test.go +++ b/lib/delayheap/delay_heap_test.go @@ -76,7 +76,7 @@ func TestDelayHeap_PushPop(t *testing.T) { func TestDelayHeap_Update(t *testing.T) { ci.Parallel(t) - + delayHeap := NewDelayHeap() now := time.Now() require := require.New(t) diff --git a/nomad/acl.go b/nomad/acl.go index dcdc0799164..7c14bfa1cfb 100644 --- a/nomad/acl.go +++ b/nomad/acl.go @@ -225,27 +225,33 @@ func (s *Server) resolvePoliciesForClaims(claims *structs.IdentityClaims) ([]*st return nil, fmt.Errorf("allocation does not exist") } - // Find any implicit policies associated with this task - policies := []*structs.ACLPolicy{} - implicitPolicyNames := []string{ - fmt.Sprintf("_:%s/%s/%s/%s", alloc.Namespace, alloc.Job.ID, alloc.TaskGroup, claims.TaskName), - fmt.Sprintf("_:%s/%s/%s", alloc.Namespace, alloc.Job.ID, alloc.TaskGroup), - fmt.Sprintf("_:%s/%s", alloc.Namespace, alloc.Job.ID), - fmt.Sprintf("_:%s", alloc.Namespace), + // Find any policies attached to the job + iter, err := snap.ACLPolicyByJob(nil, alloc.Namespace, alloc.Job.ID) + if err != nil { + return nil, err } - - for _, policyName := range implicitPolicyNames { - policy, err := snap.ACLPolicyByName(nil, policyName) - if err != nil { - return nil, err + policies := []*structs.ACLPolicy{} + for { + raw := iter.Next() + if raw == nil { + break } - if policy == nil { - // Ignore policies that don't exist, since they don't - // grant any more privilege + policy := raw.(*structs.ACLPolicy) + if policy.JobACL == nil { continue } - policies = append(policies, policy) + switch { + case policy.JobACL.Group == "": + policies = append(policies, policy) + case policy.JobACL.Group != alloc.TaskGroup: + continue // don't bother checking task + case policy.JobACL.Task == "": + policies = append(policies, policy) + case policy.JobACL.Task == claims.TaskName: + policies = append(policies, policy) + } } + return policies, nil } diff --git a/nomad/acl_test.go b/nomad/acl_test.go index 4be4e1f4b7d..3286ed2653a 100644 --- a/nomad/acl_test.go +++ b/nomad/acl_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -379,3 +380,116 @@ func TestResolveSecretToken(t *testing.T) { }) } } + +func TestResolveClaims(t *testing.T) { + ci.Parallel(t) + + srv, _, cleanup := TestACLServer(t, nil) + defer cleanup() + + store := srv.fsm.State() + index := uint64(100) + + alloc := mock.Alloc() + + claims := &structs.IdentityClaims{ + Namespace: alloc.Namespace, + JobID: alloc.Job.ID, + AllocationID: alloc.ID, + TaskName: alloc.Job.TaskGroups[0].Tasks[0].Name, + } + + // unrelated policy + policy0 := mock.ACLPolicy() + + // policy for job + policy1 := mock.ACLPolicy() + policy1.JobACL = &structs.JobACL{ + Namespace: claims.Namespace, + JobID: claims.JobID, + } + + // policy for job and group + policy2 := mock.ACLPolicy() + policy2.JobACL = &structs.JobACL{ + Namespace: claims.Namespace, + JobID: claims.JobID, + Group: alloc.Job.TaskGroups[0].Name, + } + + // policy for job and group and task + policy3 := mock.ACLPolicy() + policy3.JobACL = &structs.JobACL{ + Namespace: claims.Namespace, + JobID: claims.JobID, + Group: alloc.Job.TaskGroups[0].Name, + Task: claims.TaskName, + } + + // policy for job and group but different task + policy4 := mock.ACLPolicy() + policy4.JobACL = &structs.JobACL{ + Namespace: claims.Namespace, + JobID: claims.JobID, + Group: alloc.Job.TaskGroups[0].Name, + Task: "another", + } + + // policy for job but different group + policy5 := mock.ACLPolicy() + policy5.JobACL = &structs.JobACL{ + Namespace: claims.Namespace, + JobID: claims.JobID, + Group: "another", + } + + // policy for same namespace but different job + policy6 := mock.ACLPolicy() + policy6.JobACL = &structs.JobACL{ + Namespace: claims.Namespace, + JobID: "another", + } + + // policy for same job in different namespace + policy7 := mock.ACLPolicy() + policy7.JobACL = &structs.JobACL{ + Namespace: "another", + JobID: claims.JobID, + } + + index++ + err := store.UpsertACLPolicies(structs.MsgTypeTestSetup, index, []*structs.ACLPolicy{ + policy0, policy1, policy2, policy3, policy4, policy5, policy6, policy7}) + must.NoError(t, err) + + aclObj, err := srv.ResolveClaims(claims) + must.Nil(t, aclObj) + must.EqError(t, err, "allocation does not exist") + + // upsert the allocation + index++ + err = store.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) + must.NoError(t, err) + + aclObj, err = srv.ResolveClaims(claims) + must.NoError(t, err) + must.NotNil(t, aclObj) + + // Check that the ACL object looks reasonable + must.False(t, aclObj.IsManagement()) + must.True(t, aclObj.AllowNamespaceOperation("default", acl.NamespaceCapabilityListJobs)) + must.False(t, aclObj.AllowNamespaceOperation("other", acl.NamespaceCapabilityListJobs)) + + // Resolve the same claim again, should get cache value + aclObj2, err := srv.ResolveClaims(claims) + must.NoError(t, err) + must.NotNil(t, aclObj) + must.Eq(t, aclObj, aclObj2, must.Sprintf("expected cached value")) + + policies, err := srv.resolvePoliciesForClaims(claims) + must.NoError(t, err) + must.Len(t, 3, policies) + must.Contains(t, policies, policy1) + must.Contains(t, policies, policy2) + must.Contains(t, policies, policy3) +} diff --git a/nomad/alloc_endpoint.go b/nomad/alloc_endpoint.go index 30957db0946..79745bb29b1 100644 --- a/nomad/alloc_endpoint.go +++ b/nomad/alloc_endpoint.go @@ -11,7 +11,7 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/acl" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/state/paginator" @@ -311,8 +311,8 @@ func (a *Alloc) Stop(args *structs.AllocStopRequest, reply *structs.AllocStopRes Evals: []*structs.Evaluation{eval}, Allocs: map[string]*structs.DesiredTransition{ args.AllocID: { - Migrate: helper.BoolToPtr(true), - NoShutdownDelay: helper.BoolToPtr(args.NoShutdownDelay), + Migrate: pointer.Of(true), + NoShutdownDelay: pointer.Of(args.NoShutdownDelay), }, }, } diff --git a/nomad/alloc_endpoint_test.go b/nomad/alloc_endpoint_test.go index 63a43774d12..3ed12965b36 100644 --- a/nomad/alloc_endpoint_test.go +++ b/nomad/alloc_endpoint_test.go @@ -8,7 +8,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -1047,7 +1047,7 @@ func TestAllocEndpoint_UpdateDesiredTransition(t *testing.T) { require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc, alloc2})) t1 := &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), } // Update the allocs desired status diff --git a/nomad/blocked_evals.go b/nomad/blocked_evals.go index 2026e6826e3..0943ac0b910 100644 --- a/nomad/blocked_evals.go +++ b/nomad/blocked_evals.go @@ -290,7 +290,7 @@ func latestEvalIndex(eval *structs.Evaluation) uint64 { return 0 } - return helper.Uint64Max(eval.CreateIndex, eval.SnapshotIndex) + return helper.Max(eval.CreateIndex, eval.SnapshotIndex) } // missedUnblock returns whether an evaluation missed an unblock while it was in @@ -413,11 +413,18 @@ func (b *BlockedEvals) Unblock(computedClass string, index uint64) { // block calls in case the evaluation was in the scheduler when a trigger // occurred. b.unblockIndexes[computedClass] = index + + // Capture chan in lock as Flush overwrites it + ch := b.capacityChangeCh + done := b.stopCh b.l.Unlock() - b.capacityChangeCh <- &capacityUpdate{ + select { + case <-done: + case ch <- &capacityUpdate{ computedClass: computedClass, index: index, + }: } } @@ -441,11 +448,16 @@ func (b *BlockedEvals) UnblockQuota(quota string, index uint64) { // block calls in case the evaluation was in the scheduler when a trigger // occurred. b.unblockIndexes[quota] = index + ch := b.capacityChangeCh + done := b.stopCh b.l.Unlock() - b.capacityChangeCh <- &capacityUpdate{ + select { + case <-done: + case ch <- &capacityUpdate{ quotaChange: quota, index: index, + }: } } @@ -472,12 +484,16 @@ func (b *BlockedEvals) UnblockClassAndQuota(class, quota string, index uint64) { // Capture chan inside the lock to prevent a race with it getting reset // in Flush. ch := b.capacityChangeCh + done := b.stopCh b.l.Unlock() - ch <- &capacityUpdate{ + select { + case <-done: + case ch <- &capacityUpdate{ computedClass: class, quotaChange: quota, index: index, + }: } } diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index 4d61b625b54..8a2f29e013c 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -16,7 +16,7 @@ import ( "github.com/hashicorp/nomad/command/agent/host" "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/command/agent/pprof" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/go-msgpack/codec" @@ -121,7 +121,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -130,7 +130,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { handleStreamResultError(err, nil, encoder) return } else if aclObj != nil && !aclObj.AllowAgentRead() { - handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) + handleStreamResultError(structs.ErrPermissionDenied, pointer.Of(int64(403)), encoder) return } @@ -140,7 +140,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { } if logLevel == log.NoLevel { - handleStreamResultError(errors.New("Unknown log level"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(errors.New("Unknown log level"), pointer.Of(int64(400)), encoder) return } @@ -153,7 +153,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { region := args.RequestRegion() if region == "" { - handleStreamResultError(fmt.Errorf("missing target RPC"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(fmt.Errorf("missing target RPC"), pointer.Of(int64(400)), encoder) return } if region != a.srv.config.Region { @@ -165,7 +165,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { if args.ServerID != "" { serverToFwd, err := a.forwardFor(args.ServerID, region) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if serverToFwd != nil { @@ -268,7 +268,7 @@ OUTER: } if streamErr != nil { - handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) return } } @@ -317,7 +317,7 @@ func (a *Agent) forwardMonitorClient(conn io.ReadWriteCloser, args cstructs.Moni state, srv, err := a.findClientConn(args.NodeID) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -357,7 +357,7 @@ func (a *Agent) forwardMonitorServer(conn io.ReadWriteCloser, server *serverPart serverConn, err := a.srv.streamingRpc(server, "Agent.Monitor") if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } defer serverConn.Close() @@ -365,7 +365,7 @@ func (a *Agent) forwardMonitorServer(conn io.ReadWriteCloser, server *serverPart // Send the Request outEncoder := codec.NewEncoder(serverConn, structs.MsgpackHandle) if err := outEncoder.Encode(args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } diff --git a/nomad/client_alloc_endpoint.go b/nomad/client_alloc_endpoint.go index ee57e137342..6443049097f 100644 --- a/nomad/client_alloc_endpoint.go +++ b/nomad/client_alloc_endpoint.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" ) @@ -282,7 +282,7 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -295,7 +295,7 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { // Verify the arguments. if args.AllocID == "" { - handleStreamResultError(errors.New("missing AllocID"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(errors.New("missing AllocID"), pointer.Of(int64(400)), encoder) return } @@ -308,7 +308,7 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { alloc, err := getAlloc(snap, args.AllocID) if structs.IsErrUnknownAllocation(err) { - handleStreamResultError(err, helper.Int64ToPtr(404), encoder) + handleStreamResultError(err, pointer.Of(int64(404)), encoder) return } if err != nil { @@ -331,18 +331,18 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { // Make sure Node is valid and new enough to support RPC node, err := snap.NodeByID(nil, nodeID) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if node == nil { err := fmt.Errorf("Unknown node %q", nodeID) - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if err := nodeSupportsRpc(node); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } @@ -356,7 +356,7 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { if err != nil { var code *int64 if structs.IsErrNoNodeConn(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) return diff --git a/nomad/client_csi_endpoint_test.go b/nomad/client_csi_endpoint_test.go index 9291b2fcfc9..34dc7d6a535 100644 --- a/nomad/client_csi_endpoint_test.go +++ b/nomad/client_csi_endpoint_test.go @@ -541,8 +541,9 @@ func setupLocal(t *testing.T) (rpc.ClientCodec, func()) { require.NoError(t, err, "could not setup test client") } - node1 := c1.Node() - node1.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early versions + node1 := c1.UpdateConfig(func(c *config.Config) { + c.Node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early versions + }).Node req := &structs.NodeRegisterRequest{ Node: node1, @@ -568,7 +569,9 @@ func setupLocal(t *testing.T) (rpc.ClientCodec, func()) { } // update w/ plugin - node1.CSIControllerPlugins = plugins + node1 = c1.UpdateConfig(func(c *config.Config) { + c.Node.CSIControllerPlugins = plugins + }).Node s1.fsm.state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1) cleanup := func() { diff --git a/nomad/client_fs_endpoint.go b/nomad/client_fs_endpoint.go index 6612bfdcedd..9e73fd60e9a 100644 --- a/nomad/client_fs_endpoint.go +++ b/nomad/client_fs_endpoint.go @@ -11,10 +11,10 @@ import ( metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -62,7 +62,7 @@ func forwardRegionStreamingRpc(fsrv *Server, conn io.ReadWriteCloser, } if allocResp.Alloc == nil { - handleStreamResultError(structs.NewErrUnknownAllocation(allocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(allocID), pointer.Of(int64(404)), encoder) return } @@ -71,7 +71,7 @@ func forwardRegionStreamingRpc(fsrv *Server, conn io.ReadWriteCloser, if err != nil { var code *int64 if structs.IsErrNoNodeConn(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) return @@ -213,7 +213,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -226,7 +226,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { // Verify the arguments. if args.AllocID == "" { - handleStreamResultError(errors.New("missing AllocID"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(errors.New("missing AllocID"), pointer.Of(int64(400)), encoder) return } @@ -239,7 +239,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { alloc, err := getAlloc(snap, args.AllocID) if structs.IsErrUnknownAllocation(err) { - handleStreamResultError(structs.NewErrUnknownAllocation(args.AllocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(args.AllocID), pointer.Of(int64(404)), encoder) return } if err != nil { @@ -261,18 +261,18 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { // Make sure Node is valid and new enough to support RPC node, err := snap.NodeByID(nil, nodeID) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if node == nil { err := fmt.Errorf("Unknown node %q", nodeID) - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if err := nodeSupportsRpc(node); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } @@ -286,7 +286,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { if err != nil { var code *int64 if structs.IsErrNoNodeConn(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) return @@ -331,7 +331,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -344,7 +344,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { // Verify the arguments. if args.AllocID == "" { - handleStreamResultError(structs.ErrMissingAllocID, helper.Int64ToPtr(400), encoder) + handleStreamResultError(structs.ErrMissingAllocID, pointer.Of(int64(400)), encoder) return } @@ -357,7 +357,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { alloc, err := getAlloc(snap, args.AllocID) if structs.IsErrUnknownAllocation(err) { - handleStreamResultError(structs.NewErrUnknownAllocation(args.AllocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(args.AllocID), pointer.Of(int64(404)), encoder) return } if err != nil { @@ -382,18 +382,18 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { // Make sure Node is valid and new enough to support RPC node, err := snap.NodeByID(nil, nodeID) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if node == nil { err := fmt.Errorf("Unknown node %q", nodeID) - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if err := nodeSupportsRpc(node); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } @@ -407,7 +407,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { if err != nil { var code *int64 if structs.IsErrNoNodeConn(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) return diff --git a/nomad/config.go b/nomad/config.go index 9e5872b31e0..8533b2ddc10 100644 --- a/nomad/config.go +++ b/nomad/config.go @@ -8,9 +8,11 @@ import ( "time" log "github.com/hashicorp/go-hclog" + "golang.org/x/exp/slices" "github.com/hashicorp/memberlist" "github.com/hashicorp/nomad/helper/pluginutils/loader" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/deploymentwatcher" "github.com/hashicorp/nomad/nomad/structs" @@ -32,11 +34,6 @@ func DefaultRPCAddr() *net.TCPAddr { // Config is used to parameterize the server type Config struct { - // Bootstrapped indicates if Server has bootstrapped or not. - // Its value must be 0 (not bootstrapped) or 1 (bootstrapped). - // All operations on Bootstrapped must be handled via `atomic.*Int32()` calls - Bootstrapped int32 - // BootstrapExpect mode is used to automatically bring up a // collection of Nomad servers. This can be used to automatically // bring up a collection of nodes. @@ -394,6 +391,36 @@ type Config struct { DeploymentQueryRateLimit float64 } +func (c *Config) Copy() *Config { + if c == nil { + return nil + } + + nc := *c + + // Can't deep copy interfaces + // LogOutput io.Writer + // Logger log.InterceptLogger + // PluginLoader loader.PluginCatalog + // PluginSingletonLoader loader.PluginCatalog + + nc.RPCAddr = pointer.Copy(c.RPCAddr) + nc.ClientRPCAdvertise = pointer.Copy(c.ClientRPCAdvertise) + nc.ServerRPCAdvertise = pointer.Copy(c.ServerRPCAdvertise) + nc.RaftConfig = pointer.Copy(c.RaftConfig) + nc.SerfConfig = pointer.Copy(c.SerfConfig) + nc.EnabledSchedulers = slices.Clone(c.EnabledSchedulers) + nc.ConsulConfig = c.ConsulConfig.Copy() + nc.VaultConfig = c.VaultConfig.Copy() + nc.TLSConfig = c.TLSConfig.Copy() + nc.SentinelConfig = c.SentinelConfig.Copy() + nc.AutopilotConfig = c.AutopilotConfig.Copy() + nc.LicenseConfig = c.LicenseConfig.Copy() + nc.SearchConfig = c.SearchConfig.Copy() + + return &nc +} + // DefaultConfig returns the default configuration. Only used as the basis for // merging agent or test parameters. func DefaultConfig() *Config { diff --git a/nomad/consul_policy_test.go b/nomad/consul_policy_test.go index c0e6483995d..fd9884ae7c1 100644 --- a/nomad/consul_policy_test.go +++ b/nomad/consul_policy_test.go @@ -461,7 +461,7 @@ func TestConsulPolicy_namespaceCheck(t *testing.T) { func TestConsulPolicy_allowKeystoreRead(t *testing.T) { ci.Parallel(t) - + t.Run("empty", func(t *testing.T) { require.False(t, new(ConsulPolicy).allowsKeystoreRead(true, "default")) }) diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 84e0f1a2d07..2deb0121863 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -1,6 +1,7 @@ package nomad import ( + "context" "encoding/json" "fmt" "math" @@ -10,9 +11,11 @@ import ( log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" version "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/scheduler" + "golang.org/x/time/rate" ) // CoreScheduler is a special "scheduler" that is registered @@ -1018,7 +1021,7 @@ func (c *CoreScheduler) secureVariablesRekey(eval *structs.Evaluation) error { if err != nil { return err } - err = c.batchRotateVariables(varIter, eval) + err = c.rotateVariables(varIter, eval) if err != nil { return err } @@ -1051,32 +1054,63 @@ func (c *CoreScheduler) secureVariablesRekey(eval *structs.Evaluation) error { return nil } -// rootKeyFullRotatePerKey runs over an iterator of secure variables -// and decrypts them, and then sends them back as batches to be -// re-encrypted with the currently active key. -func (c *CoreScheduler) batchRotateVariables(iter memdb.ResultIterator, eval *structs.Evaluation) error { +// rotateVariables runs over an iterator of secure variables and decrypts them, +// and then sends them back to be re-encrypted with the currently active key, +// checking for conflicts +func (c *CoreScheduler) rotateVariables(iter memdb.ResultIterator, eval *structs.Evaluation) error { - upsertFn := func(variables []*structs.SecureVariableDecrypted) error { - if len(variables) == 0 { - return nil - } - args := &structs.SecureVariablesUpsertRequest{ - Data: variables, - WriteRequest: structs.WriteRequest{ - Region: c.srv.config.Region, - AuthToken: eval.LeaderACL, - }, - } - reply := &structs.SecureVariablesUpsertResponse{} - return c.srv.RPC("SecureVariables.Upsert", args, reply) + args := &structs.SecureVariablesApplyRequest{ + Op: structs.SVOpCAS, + WriteRequest: structs.WriteRequest{ + Region: c.srv.config.Region, + AuthToken: eval.LeaderACL, + }, } - variables := []*structs.SecureVariableDecrypted{} + // We may have to work on a very large number of variables. There's no + // BatchApply RPC because it makes for an awkward API around conflict + // detection, and even if we did, we'd be blocking this scheduler goroutine + // for a very long time using the same snapshot. This would increase the + // risk that any given batch hits a conflict because of a concurrent change + // and make it more likely that we fail the eval. For large sets, this would + // likely mean the eval would run out of retries. + // + // Instead, we'll rate limit RPC requests and have a timeout. If we still + // haven't finished the set by the timeout, emit a new eval. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + limiter := rate.NewLimiter(rate.Limit(100), 100) + for { raw := iter.Next() if raw == nil { break } + + select { + case <-ctx.Done(): + newEval := &structs.Evaluation{ + ID: uuid.Generate(), + Namespace: "-", + Priority: structs.CoreJobPriority, + Type: structs.JobTypeCore, + TriggeredBy: structs.EvalTriggerScheduled, + JobID: eval.JobID, + Status: structs.EvalStatusPending, + LeaderACL: eval.LeaderACL, + } + return c.srv.RPC("Eval.Create", &structs.EvalUpdateRequest{ + Evals: []*structs.Evaluation{newEval}, + EvalToken: uuid.Generate(), + WriteRequest: structs.WriteRequest{ + Region: c.srv.config.Region, + AuthToken: eval.LeaderACL, + }, + }, &structs.GenericResponse{}) + + default: + } + ev := raw.(*structs.SecureVariableEncrypted) cleartext, err := c.srv.encrypter.Decrypt(ev.Data, ev.KeyID) if err != nil { @@ -1090,18 +1124,26 @@ func (c *CoreScheduler) batchRotateVariables(iter memdb.ResultIterator, eval *st if err != nil { return err } - variables = append(variables, dv) - if len(variables) == 20 { - err := upsertFn(variables) - if err != nil { - return err - } - variables = []*structs.SecureVariableDecrypted{} + args.Var = dv + reply := &structs.SecureVariablesApplyResponse{} + + if err := limiter.Wait(ctx); err != nil { + return err + } + + err = c.srv.RPC("SecureVariables.Apply", args, reply) + if err != nil { + return err + } + if reply.IsConflict() { + // we've already rotated the key by the time we took this + // evaluation's snapshot, so any conflict is going to be on a write + // made with the new key, so there's nothing for us to do here + continue } } - // ensure we submit any partial batch - return upsertFn(variables) + return nil } // getThreshold returns the index threshold for determining whether an diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index ae44240a688..74c9495d211 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -2483,8 +2483,12 @@ func TestCoreScheduler_RootKeyGC(t *testing.T) { variable := mock.SecureVariableEncrypted() variable.KeyID = key2.KeyID - require.NoError(t, store.UpsertSecureVariables( - structs.MsgTypeTestSetup, 601, []*structs.SecureVariableEncrypted{variable})) + + setResp := store.SVESet(601, &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: variable, + }) + require.NoError(t, setResp.Error) // insert an allocation alloc := mock.Alloc() @@ -2549,18 +2553,15 @@ func TestCoreScheduler_SecureVariablesRekey(t *testing.T) { require.NotNil(t, key0, "expected keyring to be bootstapped") require.NoError(t, err) - req := &structs.SecureVariablesUpsertRequest{ - Data: []*structs.SecureVariableDecrypted{ - mock.SecureVariable(), - mock.SecureVariable(), - mock.SecureVariable(), - }, - WriteRequest: structs.WriteRequest{ - Region: srv.config.Region, - }, + for i := 0; i < 3; i++ { + req := &structs.SecureVariablesApplyRequest{ + Op: structs.SVOpSet, + Var: mock.SecureVariable(), + WriteRequest: structs.WriteRequest{Region: srv.config.Region}, + } + resp := &structs.SecureVariablesApplyResponse{} + require.NoError(t, srv.RPC("SecureVariables.Apply", req, resp)) } - resp := &structs.SecureVariablesUpsertResponse{} - require.NoError(t, srv.RPC("SecureVariables.Upsert", req, resp)) rotateReq := &structs.KeyringRotateRootKeyRequest{ WriteRequest: structs.WriteRequest{ @@ -2570,17 +2571,15 @@ func TestCoreScheduler_SecureVariablesRekey(t *testing.T) { var rotateResp structs.KeyringRotateRootKeyResponse require.NoError(t, srv.RPC("Keyring.Rotate", rotateReq, &rotateResp)) - req2 := &structs.SecureVariablesUpsertRequest{ - Data: []*structs.SecureVariableDecrypted{ - mock.SecureVariable(), - mock.SecureVariable(), - mock.SecureVariable(), - }, - WriteRequest: structs.WriteRequest{ - Region: srv.config.Region, - }, + for i := 0; i < 3; i++ { + req := &structs.SecureVariablesApplyRequest{ + Op: structs.SVOpSet, + Var: mock.SecureVariable(), + WriteRequest: structs.WriteRequest{Region: srv.config.Region}, + } + resp := &structs.SecureVariablesApplyResponse{} + require.NoError(t, srv.RPC("SecureVariables.Apply", req, resp)) } - require.NoError(t, srv.RPC("SecureVariables.Upsert", req2, resp)) rotateReq.Full = true require.NoError(t, srv.RPC("Keyring.Rotate", rotateReq, &rotateResp)) diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 9646590729f..65e661d35e2 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -993,8 +993,10 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) { ) defer cleanup() - node := client.Node() - node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early versions + node := client.UpdateConfig(func(c *cconfig.Config) { + // client RPCs not supported on early versions + c.Node.Attributes["nomad.version"] = "0.11.0" + }).Node req0 := &structs.NodeRegisterRequest{ Node: node, @@ -1017,24 +1019,26 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) { codec := rpcClient(t, srv) index := uint64(1000) - node.CSIControllerPlugins = map[string]*structs.CSIInfo{ - "minnie": { - PluginID: "minnie", - Healthy: true, - ControllerInfo: &structs.CSIControllerInfo{ - SupportsAttachDetach: true, - SupportsCreateDelete: true, + node = client.UpdateConfig(func(c *cconfig.Config) { + c.Node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsAttachDetach: true, + SupportsCreateDelete: true, + }, + RequiresControllerPlugin: true, }, - RequiresControllerPlugin: true, - }, - } - node.CSINodePlugins = map[string]*structs.CSIInfo{ - "minnie": { - PluginID: "minnie", - Healthy: true, - NodeInfo: &structs.CSINodeInfo{}, - }, - } + } + c.Node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + }).Node index++ require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node)) @@ -1129,8 +1133,10 @@ func TestCSIVolumeEndpoint_Delete(t *testing.T) { ) defer cleanup() - node := client.Node() - node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early versions + node := client.UpdateConfig(func(c *cconfig.Config) { + // client RPCs not supported on early versions + c.Node.Attributes["nomad.version"] = "0.11.0" + }).Node req0 := &structs.NodeRegisterRequest{ Node: node, @@ -1153,23 +1159,25 @@ func TestCSIVolumeEndpoint_Delete(t *testing.T) { codec := rpcClient(t, srv) index := uint64(1000) - node.CSIControllerPlugins = map[string]*structs.CSIInfo{ - "minnie": { - PluginID: "minnie", - Healthy: true, - ControllerInfo: &structs.CSIControllerInfo{ - SupportsAttachDetach: true, + node = client.UpdateConfig(func(c *cconfig.Config) { + c.Node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsAttachDetach: true, + }, + RequiresControllerPlugin: true, }, - RequiresControllerPlugin: true, - }, - } - node.CSINodePlugins = map[string]*structs.CSIInfo{ - "minnie": { - PluginID: "minnie", - Healthy: true, - NodeInfo: &structs.CSINodeInfo{}, - }, - } + } + c.Node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + }).Node index++ require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node)) @@ -1266,8 +1274,10 @@ func TestCSIVolumeEndpoint_ListExternal(t *testing.T) { ) defer cleanup() - node := client.Node() - node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early versions + node := client.UpdateConfig(func(c *cconfig.Config) { + // client RPCs not supported on early versions + c.Node.Attributes["nomad.version"] = "0.11.0" + }).Node req0 := &structs.NodeRegisterRequest{ Node: node, @@ -1288,24 +1298,26 @@ func TestCSIVolumeEndpoint_ListExternal(t *testing.T) { codec := rpcClient(t, srv) index := uint64(1000) - node.CSIControllerPlugins = map[string]*structs.CSIInfo{ - "minnie": { - PluginID: "minnie", - Healthy: true, - ControllerInfo: &structs.CSIControllerInfo{ - SupportsAttachDetach: true, - SupportsListVolumes: true, + node = client.UpdateConfig(func(c *cconfig.Config) { + c.Node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsAttachDetach: true, + SupportsListVolumes: true, + }, + RequiresControllerPlugin: true, }, - RequiresControllerPlugin: true, - }, - } - node.CSINodePlugins = map[string]*structs.CSIInfo{ - "minnie": { - PluginID: "minnie", - Healthy: true, - NodeInfo: &structs.CSINodeInfo{}, - }, - } + } + c.Node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + }).Node index++ require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node)) @@ -1359,10 +1371,8 @@ func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { ) defer cleanup() - node := client.Node() - req0 := &structs.NodeRegisterRequest{ - Node: node, + Node: client.Node(), WriteRequest: structs.WriteRequest{Region: "global"}, } var resp0 structs.NodeUpdateResponse @@ -1382,16 +1392,18 @@ func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { codec := rpcClient(t, srv) index := uint64(1000) - node.CSIControllerPlugins = map[string]*structs.CSIInfo{ - "minnie": { - PluginID: "minnie", - Healthy: true, - ControllerInfo: &structs.CSIControllerInfo{ - SupportsCreateDeleteSnapshot: true, + node := client.UpdateConfig(func(c *cconfig.Config) { + c.Node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsCreateDeleteSnapshot: true, + }, + RequiresControllerPlugin: true, }, - RequiresControllerPlugin: true, - }, - } + } + }).Node index++ require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node)) @@ -1452,10 +1464,8 @@ func TestCSIVolumeEndpoint_DeleteSnapshot(t *testing.T) { ) defer cleanup() - node := client.Node() - req0 := &structs.NodeRegisterRequest{ - Node: node, + Node: client.Node(), WriteRequest: structs.WriteRequest{Region: "global"}, } var resp0 structs.NodeUpdateResponse @@ -1475,16 +1485,18 @@ func TestCSIVolumeEndpoint_DeleteSnapshot(t *testing.T) { codec := rpcClient(t, srv) index := uint64(1000) - node.CSIControllerPlugins = map[string]*structs.CSIInfo{ - "minnie": { - PluginID: "minnie", - Healthy: true, - ControllerInfo: &structs.CSIControllerInfo{ - SupportsCreateDeleteSnapshot: true, + node := client.UpdateConfig(func(c *cconfig.Config) { + c.Node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsCreateDeleteSnapshot: true, + }, + RequiresControllerPlugin: true, }, - RequiresControllerPlugin: true, - }, - } + } + }).Node index++ require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node)) @@ -1551,9 +1563,8 @@ func TestCSIVolumeEndpoint_ListSnapshots(t *testing.T) { ) defer cleanup() - node := client.Node() req0 := &structs.NodeRegisterRequest{ - Node: node, + Node: client.Node(), WriteRequest: structs.WriteRequest{Region: "global"}, } var resp0 structs.NodeUpdateResponse @@ -1571,16 +1582,18 @@ func TestCSIVolumeEndpoint_ListSnapshots(t *testing.T) { codec := rpcClient(t, srv) index := uint64(1000) - node.CSIControllerPlugins = map[string]*structs.CSIInfo{ - "minnie": { - PluginID: "minnie", - Healthy: true, - ControllerInfo: &structs.CSIControllerInfo{ - SupportsListSnapshots: true, + node := client.UpdateConfig(func(c *cconfig.Config) { + c.Node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsListSnapshots: true, + }, + RequiresControllerPlugin: true, }, - RequiresControllerPlugin: true, - }, - } + } + }).Node index++ require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node)) diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index e58c62da3f0..17d4b99806e 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -8,7 +8,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -492,7 +492,7 @@ func TestDeploymentEndpoint_Promote(t *testing.T) { d.TaskGroups[a.TaskGroup].PlacedCanaries = []string{a.ID} a.DeploymentID = d.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } state := s1.fsm.State() @@ -557,7 +557,7 @@ func TestDeploymentEndpoint_Promote_ACL(t *testing.T) { d.TaskGroups[a.TaskGroup].PlacedCanaries = []string{a.ID} a.DeploymentID = d.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } state := s1.fsm.State() diff --git a/nomad/deploymentwatcher/deployment_watcher.go b/nomad/deploymentwatcher/deployment_watcher.go index bb7bc1f5258..36a8f4d7c0f 100644 --- a/nomad/deploymentwatcher/deployment_watcher.go +++ b/nomad/deploymentwatcher/deployment_watcher.go @@ -8,7 +8,7 @@ import ( log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -26,7 +26,7 @@ var ( // allocations part of a deployment to be rescheduled. We create a one off // variable to avoid creating a new object for every request. allowRescheduleTransition = &structs.DesiredTransition{ - Reschedule: helper.BoolToPtr(true), + Reschedule: pointer.Of(true), } ) @@ -233,7 +233,7 @@ func (w *deploymentWatcher) SetAllocHealth( resp.DeploymentModifyIndex = index resp.Index = index if j != nil { - resp.RevertedJobVersion = helper.Uint64ToPtr(j.Version) + resp.RevertedJobVersion = pointer.Of(j.Version) } return nil } @@ -293,18 +293,22 @@ func (w *deploymentWatcher) autoPromoteDeployment(allocs []*structs.AllocListStu continue } - if !dstate.AutoPromote || dstate.DesiredCanaries != len(dstate.PlacedCanaries) { + if !dstate.AutoPromote || len(dstate.PlacedCanaries) < dstate.DesiredCanaries { return nil } + healthyCanaries := 0 // Find the health status of each canary for _, c := range dstate.PlacedCanaries { for _, a := range allocs { - if c == a.ID && !a.DeploymentStatus.IsHealthy() { - return nil + if c == a.ID && a.DeploymentStatus.IsHealthy() { + healthyCanaries += 1 } } } + if healthyCanaries != dstate.DesiredCanaries { + return nil + } } // Send the request @@ -390,7 +394,7 @@ func (w *deploymentWatcher) FailDeployment( resp.DeploymentModifyIndex = i resp.Index = i if rollbackJob != nil { - resp.RevertedJobVersion = helper.Uint64ToPtr(rollbackJob.Version) + resp.RevertedJobVersion = pointer.Of(rollbackJob.Version) } return nil } @@ -836,10 +840,12 @@ func (w *deploymentWatcher) getEval() *structs.Evaluation { // on the previous version that are then "watched" on a leader that's on // the new version. This would result in an eval with its priority set to // zero which would be bad. This therefore protects against that. + w.l.Lock() priority := w.d.EvalPriority if priority == 0 { priority = w.j.Priority } + w.l.Unlock() return &structs.Evaluation{ ID: uuid.Generate(), diff --git a/nomad/deploymentwatcher/deployments_watcher.go b/nomad/deploymentwatcher/deployments_watcher.go index 56617430fde..46db7a73c34 100644 --- a/nomad/deploymentwatcher/deployments_watcher.go +++ b/nomad/deploymentwatcher/deployments_watcher.go @@ -193,7 +193,12 @@ func (w *Watcher) watchDeployments(ctx context.Context) { // getDeploys retrieves all deployments blocking at the given index. func (w *Watcher) getDeploys(ctx context.Context, minIndex uint64) ([]*structs.Deployment, uint64, error) { - resp, index, err := w.state.BlockingQuery(w.getDeploysImpl, minIndex, ctx) + // state can be updated concurrently + w.l.Lock() + stateStore := w.state + w.l.Unlock() + + resp, index, err := stateStore.BlockingQuery(w.getDeploysImpl, minIndex, ctx) if err != nil { return nil, 0, err } diff --git a/nomad/deploymentwatcher/deployments_watcher_test.go b/nomad/deploymentwatcher/deployments_watcher_test.go index 467ffcca20b..73277ac27c8 100644 --- a/nomad/deploymentwatcher/deployments_watcher_test.go +++ b/nomad/deploymentwatcher/deployments_watcher_test.go @@ -7,7 +7,7 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -326,7 +326,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { Status: structs.DeploymentStatusFailed, StatusDescription: structs.DeploymentStatusDescriptionFailedAllocations, }, - JobVersion: helper.Uint64ToPtr(0), + JobVersion: pointer.Of(uint64(0)), } matcher := matchDeploymentAllocHealthRequest(matchConfig) m.On("UpdateDeploymentAllocHealth", mocker.MatchedBy(matcher)).Return(nil) @@ -436,7 +436,7 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { d.TaskGroups[a.TaskGroup].DesiredCanaries = 1 d.TaskGroups[a.TaskGroup].PlacedCanaries = []string{a.ID} a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } a.DeploymentID = d.ID require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") @@ -696,6 +696,142 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { require.False(t, b1.DeploymentStatus.Canary) } +func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { + ci.Parallel(t) + w, m := defaultTestDeploymentWatcher(t) + now := time.Now() + + // Create 1 UpdateStrategy, 1 job (2 TaskGroups), 2 canaries, and 1 deployment + canaryUpd := structs.DefaultUpdateStrategy.Copy() + canaryUpd.AutoPromote = true + canaryUpd.MaxParallel = 2 + canaryUpd.Canary = 2 + canaryUpd.ProgressDeadline = 5 * time.Second + + j := mock.MultiTaskGroupJob() + j.TaskGroups[0].Update = canaryUpd + + d := mock.Deployment() + d.JobID = j.ID + // This is created in scheduler.computeGroup at runtime, where properties from the + // UpdateStrategy are copied in + d.TaskGroups = map[string]*structs.DeploymentState{ + "web": { + AutoPromote: canaryUpd.AutoPromote, + AutoRevert: canaryUpd.AutoRevert, + ProgressDeadline: canaryUpd.ProgressDeadline, + DesiredTotal: 2, + }, + } + + canaryAlloc := func() *structs.Allocation { + a := mock.Alloc() + a.DeploymentID = d.ID + a.CreateTime = now.UnixNano() + a.ModifyTime = now.UnixNano() + a.DeploymentStatus = &structs.AllocDeploymentStatus{ + Canary: true, + } + return a + } + + // Web taskgroup + ca1 := canaryAlloc() + ca2 := canaryAlloc() + ca3 := canaryAlloc() + + d.TaskGroups[ca1.TaskGroup].PlacedCanaries = []string{ca1.ID, ca2.ID, ca3.ID} + d.TaskGroups[ca1.TaskGroup].DesiredCanaries = 2 + require.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.NoError(t, m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.NoError(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{ca1, ca2, ca3}), "UpsertAllocs") + + // ============================================================= + // Support method calls + + // clear UpdateDeploymentStatus default expectation + m.Mock.ExpectedCalls = nil + + matchConfig0 := &matchDeploymentStatusUpdateConfig{ + DeploymentID: d.ID, + Status: structs.DeploymentStatusFailed, + StatusDescription: structs.DeploymentStatusDescriptionProgressDeadline, + Eval: true, + } + matcher0 := matchDeploymentStatusUpdateRequest(matchConfig0) + m.On("UpdateDeploymentStatus", mocker.MatchedBy(matcher0)).Return(nil) + + matchConfig1 := &matchDeploymentAllocHealthRequestConfig{ + DeploymentID: d.ID, + Healthy: []string{ca1.ID, ca2.ID}, + Eval: true, + } + matcher1 := matchDeploymentAllocHealthRequest(matchConfig1) + m.On("UpdateDeploymentAllocHealth", mocker.MatchedBy(matcher1)).Return(nil) + + matchConfig2 := &matchDeploymentPromoteRequestConfig{ + Promotion: &structs.DeploymentPromoteRequest{ + DeploymentID: d.ID, + All: true, + }, + Eval: true, + } + matcher2 := matchDeploymentPromoteRequest(matchConfig2) + m.On("UpdateDeploymentPromotion", mocker.MatchedBy(matcher2)).Return(nil) + // ============================================================= + + // Start the deployment + w.SetEnabled(true, m.state) + testutil.WaitForResult(func() (bool, error) { + w.l.RLock() + defer w.l.RUnlock() + return 1 == len(w.watchers), nil + }, + func(err error) { + w.l.RLock() + defer w.l.RUnlock() + require.Equal(t, 1, len(w.watchers), "Should have 1 deployment") + }, + ) + + // Mark only 2 canaries as healthy + req := &structs.DeploymentAllocHealthRequest{ + DeploymentID: d.ID, + HealthyAllocationIDs: []string{ca1.ID, ca2.ID}, + } + var resp structs.DeploymentUpdateResponse + // Calls w.raft.UpdateDeploymentAllocHealth, which is implemented by StateStore in + // state.UpdateDeploymentAllocHealth via a raft shim? + err := w.SetAllocHealth(req, &resp) + require.NoError(t, err) + + ws := memdb.NewWatchSet() + + testutil.WaitForResult( + func() (bool, error) { + ds, _ := m.state.DeploymentsByJobID(ws, j.Namespace, j.ID, true) + d = ds[0] + return 2 == d.TaskGroups["web"].HealthyAllocs, nil + }, + func(err error) { require.NoError(t, err) }, + ) + + // Verify that a promotion request was submitted. + require.Equal(t, 1, len(w.watchers), "Deployment should still be active") + m.AssertCalled(t, "UpdateDeploymentPromotion", mocker.MatchedBy(matcher2)) + + require.Equal(t, "running", d.Status) + require.True(t, d.TaskGroups["web"].Promoted) + + a1, _ := m.state.AllocByID(ws, ca1.ID) + require.False(t, a1.DeploymentStatus.Canary) + require.Equal(t, "pending", a1.ClientStatus) + require.Equal(t, "run", a1.DesiredStatus) + + b1, _ := m.state.AllocByID(ws, ca2.ID) + require.False(t, b1.DeploymentStatus.Canary) +} + // Test pausing a deployment that is running func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { ci.Parallel(t) @@ -941,7 +1077,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { DeploymentID: d.ID, Status: structs.DeploymentStatusFailed, StatusDescription: structs.DeploymentStatusDescriptionRollback(structs.DeploymentStatusDescriptionFailedAllocations, 0), - JobVersion: helper.Uint64ToPtr(0), + JobVersion: pointer.Of(uint64(0)), Eval: true, } m2 := matchDeploymentStatusUpdateRequest(c) @@ -1014,7 +1150,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { DeploymentID: d.ID, Status: structs.DeploymentStatusFailed, StatusDescription: structs.DeploymentStatusDescriptionRollback(structs.DeploymentStatusDescriptionFailedAllocations, 0), - JobVersion: helper.Uint64ToPtr(2), + JobVersion: pointer.Of(uint64(2)), Eval: true, } m3 := matchDeploymentStatusUpdateRequest(c2) @@ -1063,7 +1199,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { // Update the alloc to be unhealthy and require that nothing happens. a2 := a.Copy() a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Timestamp: now, } require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 100, []*structs.Allocation{a2})) @@ -1167,7 +1303,7 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { // Update the first allocation to be healthy a3 := a.Copy() - a3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} + a3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a3}), "UpsertAllocs") // Get the updated deployment @@ -1186,7 +1322,7 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { // Update the second allocation to be healthy a4 := a2.Copy() - a4.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} + a4.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a4}), "UpsertAllocs") // Get the updated deployment @@ -1247,7 +1383,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { // Update the alloc to be unhealthy and require that nothing happens. a2 := a.Copy() a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) @@ -1320,7 +1456,7 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") @@ -1341,7 +1477,7 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { a2.CreateTime = now.UnixNano() a2.ModifyTime = now.UnixNano() a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } d.TaskGroups["web"].RequireProgressBy = time.Now().Add(2 * time.Second) @@ -1469,7 +1605,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { canary2.ModifyTime = now.UnixNano() canary2.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } @@ -1488,7 +1624,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { canary1.ModifyTime = now.UnixNano() canary1.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } @@ -1544,7 +1680,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { alloc1a.ModifyTime = now.UnixNano() alloc1a.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: false, - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } @@ -1553,7 +1689,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { alloc1b.ModifyTime = now.UnixNano() alloc1b.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: false, - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } @@ -1626,7 +1762,7 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { // Update the alloc to be unhealthy a2 := a.Copy() a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Timestamp: time.Now(), } require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) diff --git a/nomad/drainer/drainer.go b/nomad/drainer/drainer.go index 2e4e8528ffc..f610a0bce36 100644 --- a/nomad/drainer/drainer.go +++ b/nomad/drainer/drainer.go @@ -7,7 +7,7 @@ import ( log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -396,7 +396,7 @@ func (n *NodeDrainer) drainAllocs(future *structs.BatchFuture, allocs []*structs transitions := make(map[string]*structs.DesiredTransition, len(allocs)) for _, alloc := range allocs { transitions[alloc.ID] = &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), } jobs[alloc.JobNamespacedID()] = alloc } diff --git a/nomad/drainer/draining_node_test.go b/nomad/drainer/draining_node_test.go index b93efcb7792..59f258f239a 100644 --- a/nomad/drainer/draining_node_test.go +++ b/nomad/drainer/draining_node_test.go @@ -46,7 +46,7 @@ func assertDrainingNode(t *testing.T, dn *drainingNode, isDone bool, remaining, func TestDrainingNode_Table(t *testing.T) { ci.Parallel(t) - + cases := []struct { name string isDone bool diff --git a/nomad/drainer/watch_jobs.go b/nomad/drainer/watch_jobs.go index b3dbc842e93..36d301cc3b5 100644 --- a/nomad/drainer/watch_jobs.go +++ b/nomad/drainer/watch_jobs.go @@ -411,7 +411,7 @@ func handleTaskGroup(snap *state.StateSnapshot, batch bool, tg *structs.TaskGrou // Determine how many we can drain thresholdCount := tg.Count - tg.Migrate.MaxParallel numToDrain := healthy - thresholdCount - numToDrain = helper.IntMin(len(drainable), numToDrain) + numToDrain = helper.Min(len(drainable), numToDrain) if numToDrain <= 0 { return nil } diff --git a/nomad/drainer/watch_jobs_test.go b/nomad/drainer/watch_jobs_test.go index 192fb112424..9f6f637dd8a 100644 --- a/nomad/drainer/watch_jobs_test.go +++ b/nomad/drainer/watch_jobs_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -138,7 +138,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { for i := 0; i < count; i++ { a := newAlloc(drainingNode, job) a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, a) } @@ -160,7 +160,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // the old ones drainedAllocs := make([]*structs.Allocation, len(drains.Allocs)) for i, a := range drains.Allocs { - a.DesiredTransition.Migrate = helper.BoolToPtr(true) + a.DesiredTransition.Migrate = pointer.Of(true) // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() @@ -203,7 +203,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { for _, a := range replacements { a.ClientStatus = structs.AllocClientStatusRunning a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } } require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) @@ -217,7 +217,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // Fake migrations once more to finish the drain drainedAllocs = make([]*structs.Allocation, len(drains.Allocs)) for i, a := range drains.Allocs { - a.DesiredTransition.Migrate = helper.BoolToPtr(true) + a.DesiredTransition.Migrate = pointer.Of(true) // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() @@ -246,7 +246,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { for _, a := range replacements { a.ClientStatus = structs.AllocClientStatusRunning a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } } require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) @@ -260,7 +260,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // Fake migrations once more to finish the drain drainedAllocs = make([]*structs.Allocation, len(drains.Allocs)) for i, a := range drains.Allocs { - a.DesiredTransition.Migrate = helper.BoolToPtr(true) + a.DesiredTransition.Migrate = pointer.Of(true) // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() @@ -289,7 +289,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { for _, a := range replacements { a.ClientStatus = structs.AllocClientStatusRunning a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } } require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) @@ -382,7 +382,7 @@ func TestHandeTaskGroup_Table(t *testing.T) { ExpectedDone: false, AddAlloc: func(i int, a *structs.Allocation, drainingID, runningID string) { if i == 0 { - a.DesiredTransition.Migrate = helper.BoolToPtr(true) + a.DesiredTransition.Migrate = pointer.Of(true) return } a.NodeID = runningID @@ -549,7 +549,7 @@ func TestHandeTaskGroup_Table(t *testing.T) { func testHandleTaskGroup(t *testing.T, tc handleTaskGroupTestCase) { ci.Parallel(t) - + require := require.New(t) assert := assert.New(t) @@ -583,7 +583,7 @@ func testHandleTaskGroup(t *testing.T, tc handleTaskGroupTestCase) { // Default to being healthy on the draining node a.NodeID = drainingNode.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } if tc.AddAlloc != nil { tc.AddAlloc(i, a, drainingNode.ID, runningNode.ID) @@ -630,7 +630,7 @@ func TestHandleTaskGroup_Migrations(t *testing.T) { a.TaskGroup = job.TaskGroups[0].Name a.NodeID = n.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } if i%2 == 0 { @@ -699,7 +699,7 @@ func TestHandleTaskGroup_GarbageCollectedNode(t *testing.T) { a.TaskGroup = job.TaskGroups[0].Name a.NodeID = n.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } if i%2 == 0 { diff --git a/nomad/drainer_int_test.go b/nomad/drainer_int_test.go index 5c23ac89105..33abf4812e8 100644 --- a/nomad/drainer_int_test.go +++ b/nomad/drainer_int_test.go @@ -12,7 +12,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/drainer" "github.com/hashicorp/nomad/nomad/mock" @@ -52,7 +52,7 @@ func allocPromoter(errCh chan<- error, ctx context.Context, } newAlloc := alloc.Copy() newAlloc.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } updates = append(updates, newAlloc) diff --git a/nomad/eval_broker.go b/nomad/eval_broker.go index b0b0b87d547..e13394b1725 100644 --- a/nomad/eval_broker.go +++ b/nomad/eval_broker.go @@ -200,9 +200,9 @@ func (b *EvalBroker) Enqueue(eval *structs.Evaluation) { // enqueued. The evaluation is handled in one of the following ways: // * Evaluation not outstanding: Process as a normal Enqueue // * Evaluation outstanding: Do not allow the evaluation to be dequeued til: -// * Ack received: Unblock the evaluation allowing it to be dequeued -// * Nack received: Drop the evaluation as it was created as a result of a -// scheduler run that was Nack'd +// - Ack received: Unblock the evaluation allowing it to be dequeued +// - Nack received: Drop the evaluation as it was created as a result of a +// scheduler run that was Nack'd func (b *EvalBroker) EnqueueAll(evals map[*structs.Evaluation]string) { // The lock needs to be held until all evaluations are enqueued. This is so // that when Dequeue operations are unblocked they will pick the highest diff --git a/nomad/event_endpoint.go b/nomad/event_endpoint.go index 522d130e186..2628b11a109 100644 --- a/nomad/event_endpoint.go +++ b/nomad/event_endpoint.go @@ -7,7 +7,7 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" ) @@ -28,7 +28,7 @@ func (e *Event) stream(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleJsonResultError(err, helper.Int64ToPtr(500), encoder) + handleJsonResultError(err, pointer.Of(int64(500)), encoder) return } @@ -36,7 +36,7 @@ func (e *Event) stream(conn io.ReadWriteCloser) { if args.Region != e.srv.config.Region { err := e.forwardStreamingRPC(args.Region, "Event.Stream", args, conn) if err != nil { - handleJsonResultError(err, helper.Int64ToPtr(500), encoder) + handleJsonResultError(err, pointer.Of(int64(500)), encoder) } return } @@ -52,7 +52,7 @@ func (e *Event) stream(conn io.ReadWriteCloser) { // Get the servers broker and subscribe publisher, err := e.srv.State().EventBroker() if err != nil { - handleJsonResultError(err, helper.Int64ToPtr(500), encoder) + handleJsonResultError(err, pointer.Of(int64(500)), encoder) return } @@ -66,7 +66,7 @@ func (e *Event) stream(conn io.ReadWriteCloser) { subscription, subErr = publisher.Subscribe(subReq) } if subErr != nil { - handleJsonResultError(subErr, helper.Int64ToPtr(500), encoder) + handleJsonResultError(subErr, pointer.Of(int64(500)), encoder) return } defer subscription.Unsubscribe() @@ -141,7 +141,7 @@ OUTER: } if streamErr != nil { - handleJsonResultError(streamErr, helper.Int64ToPtr(500), encoder) + handleJsonResultError(streamErr, pointer.Of(int64(500)), encoder) return } diff --git a/nomad/fsm.go b/nomad/fsm.go index 0b923223151..02a2ba39f6a 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-msgpack/codec" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -318,10 +318,8 @@ func (n *nomadFSM) Apply(log *raft.Log) interface{} { return n.applyDeleteServiceRegistrationByID(msgType, buf[1:], log.Index) case structs.ServiceRegistrationDeleteByNodeIDRequestType: return n.applyDeleteServiceRegistrationByNodeID(msgType, buf[1:], log.Index) - case structs.SecureVariableUpsertRequestType: - return n.applySecureVariableUpsert(msgType, buf[1:], log.Index) - case structs.SecureVariableDeleteRequestType: - return n.applySecureVariableDelete(msgType, buf[1:], log.Index) + case structs.SVApplyStateRequestType: + return n.applySecureVariableOperation(msgType, buf[1:], log.Index) case structs.RootKeyMetaUpsertRequestType: return n.applyRootKeyMetaUpsert(msgType, buf[1:], log.Index) case structs.RootKeyMetaDeleteRequestType: @@ -707,7 +705,7 @@ func (n *nomadFSM) handleJobDeregister(index uint64, jobID, namespace string, pu if err != nil { return err } - transition := &structs.DesiredTransition{NoShutdownDelay: helper.BoolToPtr(true)} + transition := &structs.DesiredTransition{NoShutdownDelay: pointer.Of(true)} for _, alloc := range allocs { err := n.state.UpdateAllocDesiredTransitionTxn(tx, index, alloc.ID, transition) if err != nil { @@ -2085,34 +2083,27 @@ func (f *FSMFilter) Include(item interface{}) bool { return true } -func (n *nomadFSM) applySecureVariableUpsert(msgType structs.MessageType, buf []byte, index uint64) interface{} { - defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_secure_variable_upsert"}, time.Now()) - var req structs.SecureVariablesEncryptedUpsertRequest +func (n *nomadFSM) applySecureVariableOperation(msgType structs.MessageType, buf []byte, index uint64) interface{} { + var req structs.SVApplyStateRequest if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } - - if err := n.state.UpsertSecureVariables(msgType, index, req.Data); err != nil { - n.logger.Error("UpsertSecureVariables failed", "error", err) + defer metrics.MeasureSinceWithLabels([]string{"nomad", "fsm", "apply_sv_operation"}, time.Now(), + []metrics.Label{{Name: "op", Value: string(req.Op)}}) + switch req.Op { + case structs.SVOpSet: + return n.state.SVESet(index, &req) + case structs.SVOpDelete: + return n.state.SVEDelete(index, &req) + case structs.SVOpDeleteCAS: + return n.state.SVEDeleteCAS(index, &req) + case structs.SVOpCAS: + return n.state.SVESetCAS(index, &req) + default: + err := fmt.Errorf("Invalid SVE operation '%s'", req.Op) + n.logger.Warn("Invalid SVE operation", "operation", req.Op) return err } - - return nil -} - -func (n *nomadFSM) applySecureVariableDelete(msgType structs.MessageType, buf []byte, index uint64) interface{} { - defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_secure_variable_delete"}, time.Now()) - var req structs.SecureVariablesDeleteRequest - if err := structs.Decode(buf, &req); err != nil { - panic(fmt.Errorf("failed to decode request: %v", err)) - } - - if err := n.state.DeleteSecureVariables(msgType, index, req.Namespace, []string{req.Path}); err != nil { - n.logger.Error("DeleteSecureVariables failed", "error", err) - return err - } - - return nil } func (n *nomadFSM) applyRootKeyMetaUpsert(msgType structs.MessageType, buf []byte, index uint64) interface{} { diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index ff858c2d570..c552d1ed56a 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -12,7 +12,7 @@ import ( "github.com/google/go-cmp/cmp" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -1581,7 +1581,7 @@ func TestFSM_UpdateAllocDesiredTransition(t *testing.T) { state.UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc, alloc2}) t1 := &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), } eval := &structs.Evaluation{ @@ -2082,7 +2082,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } c2 := mock.Alloc() c2.JobID = j.ID @@ -2090,7 +2090,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2}); err != nil { @@ -3415,7 +3415,14 @@ func TestFSM_SnapshotRestore_SecureVariables(t *testing.T) { // Generate and upsert some secure variables. msvs := mock.SecureVariablesEncrypted(3, 3) svs := msvs.List() - require.NoError(t, testState.UpsertSecureVariables(structs.MsgTypeTestSetup, 10, svs)) + + for _, sv := range svs { + setResp := testState.SVESet(10, &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sv, + }) + require.NoError(t, setResp.Error) + } // Update the mock secure variables data with the actual create information iter, err := testState.SecureVariables(memdb.NewWatchSet()) diff --git a/nomad/heartbeat_test.go b/nomad/heartbeat_test.go index 17a5ce08e1f..58879cf9a14 100644 --- a/nomad/heartbeat_test.go +++ b/nomad/heartbeat_test.go @@ -8,7 +8,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -301,18 +301,18 @@ func TestHeartbeat_InvalidateHeartbeat_DisconnectedClient(t *testing.T) { { name: "has-pending-reconnects", now: time.Now().UTC(), - maxClientDisconnect: helper.TimeToPtr(5 * time.Second), + maxClientDisconnect: pointer.Of(5 * time.Second), expectedNodeStatus: structs.NodeStatusDisconnected, }, { name: "has-expired-reconnects", - maxClientDisconnect: helper.TimeToPtr(5 * time.Second), + maxClientDisconnect: pointer.Of(5 * time.Second), now: time.Now().UTC().Add(-10 * time.Second), expectedNodeStatus: structs.NodeStatusDown, }, { name: "has-expired-reconnects-equal-timestamp", - maxClientDisconnect: helper.TimeToPtr(5 * time.Second), + maxClientDisconnect: pointer.Of(5 * time.Second), now: time.Now().UTC().Add(-5 * time.Second), expectedNodeStatus: structs.NodeStatusDown, }, diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index 5c1e62a624f..b94d7205420 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/state/paginator" @@ -41,7 +42,7 @@ var ( // allocations to be force rescheduled. We create a one off // variable to avoid creating a new object for every request. allowForceRescheduleTransition = &structs.DesiredTransition{ - ForceReschedule: helper.BoolToPtr(true), + ForceReschedule: pointer.Of(true), } ) @@ -103,12 +104,12 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis // Attach the Nomad token's accessor ID so that deploymentwatcher // can reference the token later - tokenID, err := j.srv.ResolveSecretToken(args.AuthToken) + nomadACLToken, err := j.srv.ResolveSecretToken(args.AuthToken) if err != nil { return err } - if tokenID != nil { - args.Job.NomadTokenID = tokenID.AccessorID + if nomadACLToken != nil { + args.Job.NomadTokenID = nomadACLToken.AccessorID } // Set the warning message @@ -272,7 +273,11 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis // Enforce Sentinel policies. Pass a copy of the job to prevent // sentinel from altering it. - policyWarnings, err := j.enforceSubmitJob(args.PolicyOverride, args.Job.Copy()) + ns, err := snap.NamespaceByName(nil, args.RequestNamespace()) + if err != nil { + return err + } + policyWarnings, err := j.enforceSubmitJob(args.PolicyOverride, args.Job.Copy(), nomadACLToken, ns) if err != nil { return err } @@ -464,10 +469,8 @@ func getSignalConstraint(signals []string) *structs.Constraint { } } -// Summary retrieves the summary of a job -func (j *Job) Summary(args *structs.JobSummaryRequest, - reply *structs.JobSummaryResponse) error { - +// Summary retrieves the summary of a job. +func (j *Job) Summary(args *structs.JobSummaryRequest, reply *structs.JobSummaryResponse) error { if done, err := j.srv.forward("Job.Summary", args, args, reply); done { return err } @@ -511,8 +514,14 @@ func (j *Job) Summary(args *structs.JobSummaryRequest, return j.srv.blockingRPC(&opts) } -// Validate validates a job +// Validate validates a job. +// +// Must forward to the leader, because only the leader will have a live Vault +// client with which to validate vault tokens. func (j *Job) Validate(args *structs.JobValidateRequest, reply *structs.JobValidateResponse) error { + if done, err := j.srv.forward("Job.Validate", args, args, reply); done { + return err + } defer metrics.MeasureSince([]string{"nomad", "job", "validate"}, time.Now()) // defensive check; http layer and RPC requester should ensure namespaces are set consistently @@ -1014,6 +1023,10 @@ func (j *Job) Scale(args *structs.JobScaleRequest, reply *structs.JobRegisterRes return structs.NewErrRPCCoded(404, fmt.Sprintf("job %q not found", args.JobID)) } + // Since job is going to be mutated we must copy it since state store methods + // return a shared pointer. + job = job.Copy() + // Find target group in job TaskGroups groupName := args.Target[structs.ScalingTargetGroup] var group *structs.TaskGroup @@ -1385,7 +1398,7 @@ func (j *Job) List(args *structs.JobListRequest, reply *structs.JobListResponse) if err != nil { return err } - reply.Index = helper.Uint64Max(jindex, sindex) + reply.Index = helper.Max(jindex, sindex) // Set the query response j.srv.setQueryMeta(&reply.QueryMeta) @@ -1618,8 +1631,22 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse) } } + // Acquire a snapshot of the state + snap, err := j.srv.fsm.State().Snapshot() + if err != nil { + return err + } + // Enforce Sentinel policies - policyWarnings, err := j.enforceSubmitJob(args.PolicyOverride, args.Job) + nomadACLToken, err := snap.ACLTokenBySecretID(nil, args.AuthToken) + if err != nil && !strings.Contains(err.Error(), "missing secret id") { + return err + } + ns, err := snap.NamespaceByName(nil, args.RequestNamespace()) + if err != nil { + return err + } + policyWarnings, err := j.enforceSubmitJob(args.PolicyOverride, args.Job, nomadACLToken, ns) if err != nil { return err } @@ -1628,12 +1655,6 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse) reply.Warnings = structs.MergeMultierrorWarnings(warnings...) } - // Acquire a snapshot of the state - snap, err := j.srv.fsm.State().Snapshot() - if err != nil { - return err - } - // Interpolate the job for this region err = j.interpolateMultiregionFields(args) if err != nil { diff --git a/nomad/job_endpoint_hook_connect.go b/nomad/job_endpoint_hook_connect.go index 0cba5c89573..2af2d7199f1 100644 --- a/nomad/job_endpoint_hook_connect.go +++ b/nomad/job_endpoint_hook_connect.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/envoy" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" ) @@ -391,7 +392,7 @@ func gatewayProxy(gateway *structs.ConsulGateway, mode string) *structs.ConsulGa // set default connect timeout if not set if proxy.ConnectTimeout == nil { - proxy.ConnectTimeout = helper.TimeToPtr(defaultConnectTimeout) + proxy.ConnectTimeout = pointer.Of(defaultConnectTimeout) } if mode == "bridge" { diff --git a/nomad/job_endpoint_hook_connect_test.go b/nomad/job_endpoint_hook_connect_test.go index b665205ca2e..4e5bd7fc9db 100644 --- a/nomad/job_endpoint_hook_connect_test.go +++ b/nomad/job_endpoint_hook_connect_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -705,7 +705,7 @@ func TestJobEndpointConnect_gatewayProxyIsDefault(t *testing.T) { t.Run("unrelated fields set", func(t *testing.T) { result := gatewayProxyIsDefault(&structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), Config: map[string]interface{}{"foo": 1}, }) require.True(t, result) @@ -826,7 +826,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { }, }, "bridge") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(defaultConnectTimeout), + ConnectTimeout: pointer.Of(defaultConnectTimeout), EnvoyGatewayNoDefaultBind: true, EnvoyGatewayBindTaggedAddresses: false, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ @@ -840,7 +840,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { t.Run("ingress set defaults", func(t *testing.T) { result := gatewayProxy(&structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), Config: map[string]interface{}{"foo": 1}, }, Ingress: &structs.ConsulIngressConfigEntry{ @@ -854,7 +854,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { }, }, "bridge") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), Config: map[string]interface{}{"foo": 1}, EnvoyGatewayNoDefaultBind: true, EnvoyGatewayBindTaggedAddresses: false, @@ -894,7 +894,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { t.Run("terminating set defaults", func(t *testing.T) { result := gatewayProxy(&structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), EnvoyDNSDiscoveryType: "STRICT_DNS", }, Terminating: &structs.ConsulTerminatingConfigEntry{ @@ -908,7 +908,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { }, }, "bridge") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), EnvoyGatewayNoDefaultBind: true, EnvoyGatewayBindTaggedAddresses: false, EnvoyDNSDiscoveryType: "STRICT_DNS", @@ -945,14 +945,14 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { t.Run("mesh set defaults in bridge", func(t *testing.T) { result := gatewayProxy(&structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), }, Mesh: &structs.ConsulMeshConfigEntry{ // nothing }, }, "bridge") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), EnvoyGatewayNoDefaultBind: true, EnvoyGatewayBindTaggedAddresses: false, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ @@ -971,14 +971,14 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { t.Run("mesh set defaults in host", func(t *testing.T) { result := gatewayProxy(&structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), }, Mesh: &structs.ConsulMeshConfigEntry{ // nothing }, }, "host") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), }, result) }) diff --git a/nomad/job_endpoint_hook_expose_check.go b/nomad/job_endpoint_hook_expose_check.go index 7ce70aa473b..f5590bbbbe6 100644 --- a/nomad/job_endpoint_hook_expose_check.go +++ b/nomad/job_endpoint_hook_expose_check.go @@ -51,9 +51,9 @@ func (jobExposeCheckHook) Mutate(job *structs.Job) (_ *structs.Job, warnings []e } // Validate will ensure: -// - The job contains valid network configuration for each task group in which -// an expose path is configured. The network must be of type bridge mode. -// - The check Expose field is configured only for connect-enabled group-services. +// - The job contains valid network configuration for each task group in which +// an expose path is configured. The network must be of type bridge mode. +// - The check Expose field is configured only for connect-enabled group-services. func (jobExposeCheckHook) Validate(job *structs.Job) (warnings []error, err error) { for _, tg := range job.TaskGroups { // Make sure any group that contains a group-service that enables expose diff --git a/nomad/job_endpoint_hook_expose_check_test.go b/nomad/job_endpoint_hook_expose_check_test.go index 8e71883ba32..08c78685506 100644 --- a/nomad/job_endpoint_hook_expose_check_test.go +++ b/nomad/job_endpoint_hook_expose_check_test.go @@ -317,7 +317,7 @@ func TestJobExposeCheckHook_exposePathForCheck(t *testing.T) { Name: "group1", Services: []*structs.Service{s}, Networks: structs.Networks{{ - Mode: "bridge", + Mode: "bridge", DynamicPorts: []structs.Port{ // service declares "sPort", but does not exist }, diff --git a/nomad/job_endpoint_oss.go b/nomad/job_endpoint_oss.go index 7f2b56c788d..d80281a3bca 100644 --- a/nomad/job_endpoint_oss.go +++ b/nomad/job_endpoint_oss.go @@ -8,7 +8,7 @@ import ( ) // enforceSubmitJob is used to check any Sentinel policies for the submit-job scope -func (j *Job) enforceSubmitJob(override bool, job *structs.Job) (error, error) { +func (j *Job) enforceSubmitJob(override bool, job *structs.Job, nomadACLToken *structs.ACLToken, ns *structs.Namespace) (error, error) { return nil, nil } diff --git a/nomad/job_endpoint_oss_test.go b/nomad/job_endpoint_oss_test.go index 304422d8155..446abe5d191 100644 --- a/nomad/job_endpoint_oss_test.go +++ b/nomad/job_endpoint_oss_test.go @@ -10,7 +10,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -26,7 +26,7 @@ func TestJobEndpoint_Register_Connect_AllowUnauthenticatedFalse_oss(t *testing.T s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue - c.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(false) + c.ConsulConfig.AllowUnauthenticated = pointer.Of(false) }) defer cleanupS1() codec := rpcClient(t, s1) diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 76113aef094..684c17a3bb6 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -12,7 +12,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -361,7 +361,7 @@ func TestJobEndpoint_Register_ConnectIngressGateway_full(t *testing.T) { job.TaskGroups[0].Services[0].Connect = &structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ "service1": { @@ -2393,7 +2393,7 @@ func TestJobEndpoint_Revert(t *testing.T) { revertReq := &structs.JobRevertRequest{ JobID: job.ID, JobVersion: 0, - EnforcePriorVersion: helper.Uint64ToPtr(10), + EnforcePriorVersion: pointer.Of(uint64(10)), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -2426,7 +2426,7 @@ func TestJobEndpoint_Revert(t *testing.T) { revertReq = &structs.JobRevertRequest{ JobID: job.ID, JobVersion: 0, - EnforcePriorVersion: helper.Uint64ToPtr(1), + EnforcePriorVersion: pointer.Of(uint64(1)), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -2594,7 +2594,7 @@ func TestJobEndpoint_Revert_Vault_NoToken(t *testing.T) { revertReq = &structs.JobRevertRequest{ JobID: job.ID, JobVersion: 0, - EnforcePriorVersion: helper.Uint64ToPtr(1), + EnforcePriorVersion: pointer.Of(uint64(1)), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -4959,7 +4959,6 @@ func TestJobEndpoint_ListJobs(t *testing.T) { // TestJobEndpoint_ListJobs_AllNamespaces_OSS asserts that server // returns all jobs across namespace. -// func TestJobEndpoint_ListJobs_AllNamespaces_OSS(t *testing.T) { ci.Parallel(t) @@ -7104,7 +7103,7 @@ func TestJobEndpoint_Scale(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: groupName, }, - Count: helper.Int64ToPtr(int64(originalCount + 1)), + Count: pointer.Of(int64(originalCount + 1)), Message: "because of the load", Meta: map[string]interface{}{ "metrics": map[string]string{ @@ -7189,7 +7188,7 @@ func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { }, Meta: scalingMetadata, Message: scalingMessage, - Count: helper.Int64ToPtr(newCount), + Count: pointer.Of(newCount), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -7481,7 +7480,7 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: job.TaskGroups[0].Name, }, - Count: helper.Int64ToPtr(int64(count) + 1), + Count: pointer.Of(int64(count) + 1), Message: "this should fail", Meta: map[string]interface{}{ "metrics": map[string]string{ @@ -7505,7 +7504,7 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) { err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) require.Nil(err) - scale.Count = helper.Int64ToPtr(10) + scale.Count = pointer.Of(int64(10)) scale.Message = "error message" scale.Error = true err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp) @@ -7538,7 +7537,7 @@ func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: job.TaskGroups[0].Name, }, - Count: helper.Int64ToPtr(pol.Max + 1), + Count: pointer.Of(pol.Max + 1), Message: "out of bounds", PolicyOverride: false, WriteRequest: structs.WriteRequest{ @@ -7550,7 +7549,7 @@ func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { require.Error(err) require.Contains(err.Error(), "group count was greater than scaling policy maximum: 11 > 10") - scale.Count = helper.Int64ToPtr(2) + scale.Count = pointer.Of(int64(2)) err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp) require.Error(err) require.Contains(err.Error(), "group count was less than scaling policy minimum: 2 < 3") @@ -7644,7 +7643,7 @@ func TestJobEndpoint_Scale_Priority(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: groupName, }, - Count: helper.Int64ToPtr(int64(originalCount + 1)), + Count: pointer.Of(int64(originalCount + 1)), Message: "scotty, we need more power", PolicyOverride: false, WriteRequest: structs.WriteRequest{ @@ -7690,7 +7689,7 @@ func TestJobEndpoint_InvalidCount(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: job.TaskGroups[0].Name, }, - Count: helper.Int64ToPtr(int64(-1)), + Count: pointer.Of(int64(-1)), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -7744,7 +7743,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a1.ClientStatus = structs.AllocClientStatusRunning // healthy a1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } a2 := mock.Alloc() a2.Job = jobV2 @@ -7753,7 +7752,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a2.ClientStatus = structs.AllocClientStatusPending // unhealthy a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } a3 := mock.Alloc() a3.Job = jobV2 @@ -7762,7 +7761,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a3.ClientStatus = structs.AllocClientStatusRunning // canary a3.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: true, } // no health @@ -7776,7 +7775,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { event := &structs.ScalingEvent{ Time: time.Now().Unix(), - Count: helper.Int64ToPtr(5), + Count: pointer.Of(int64(5)), Message: "message", Error: false, Meta: map[string]interface{}{ diff --git a/nomad/job_endpoint_validators_test.go b/nomad/job_endpoint_validators_test.go index de8acaa904b..4d115905615 100644 --- a/nomad/job_endpoint_validators_test.go +++ b/nomad/job_endpoint_validators_test.go @@ -105,6 +105,7 @@ func TestJobNamespaceConstraintCheckHook_validate(t *testing.T) { job.TaskGroups[0].Tasks[0].Driver = "docker" job.TaskGroups[0].Tasks[1].Driver = "qemu" job.TaskGroups[0].Tasks[2].Driver = "docker" + job.TaskGroups[0].Tasks[3].Driver = "qemu" _, err := hook.Validate(job) require.Nil(t, err) diff --git a/nomad/mock/mock.go b/nomad/mock/mock.go index b4afc5de26c..f364a7e562a 100644 --- a/nomad/mock/mock.go +++ b/nomad/mock/mock.go @@ -8,8 +8,8 @@ import ( "time" fake "github.com/brianvoe/gofakeit/v6" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/envoy" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" @@ -592,6 +592,22 @@ func LifecycleJob() *structs.Job { MemoryMB: 256, }, }, + { + Name: "poststart", + Driver: "mock_driver", + Config: map[string]interface{}{ + "run_for": "1s", + }, + Lifecycle: &structs.TaskLifecycleConfig{ + Hook: structs.TaskLifecycleHookPoststart, + Sidecar: false, + }, + LogConfig: structs.DefaultLogConfig(), + Resources: &structs.Resources{ + CPU: 1000, + MemoryMB: 256, + }, + }, }, }, }, @@ -634,6 +650,10 @@ func LifecycleAlloc() *structs.Allocation { CPU: 1000, MemoryMB: 256, }, + "poststart": { + CPU: 1000, + MemoryMB: 256, + }, }, AllocatedResources: &structs.AllocatedResources{ @@ -662,6 +682,14 @@ func LifecycleAlloc() *structs.Allocation { MemoryMB: 256, }, }, + "poststart": { + Cpu: structs.AllocatedCpuResources{ + CpuShares: 1000, + }, + Memory: structs.AllocatedMemoryResources{ + MemoryMB: 256, + }, + }, }, }, Job: LifecycleJob(), @@ -672,6 +700,50 @@ func LifecycleAlloc() *structs.Allocation { return alloc } +type LifecycleTaskDef struct { + Name string + RunFor string + ExitCode int + Hook string + IsSidecar bool +} + +// LifecycleAllocFromTasks generates an Allocation with mock tasks that have +// the provided lifecycles. +func LifecycleAllocFromTasks(tasks []LifecycleTaskDef) *structs.Allocation { + alloc := LifecycleAlloc() + alloc.Job.TaskGroups[0].Tasks = []*structs.Task{} + for _, task := range tasks { + var lc *structs.TaskLifecycleConfig + if task.Hook != "" { + // TODO: task coordinator doesn't treat nil and empty structs the same + lc = &structs.TaskLifecycleConfig{ + Hook: task.Hook, + Sidecar: task.IsSidecar, + } + } + + alloc.Job.TaskGroups[0].Tasks = append(alloc.Job.TaskGroups[0].Tasks, + &structs.Task{ + Name: task.Name, + Driver: "mock_driver", + Config: map[string]interface{}{ + "run_for": task.RunFor, + "exit_code": task.ExitCode}, + Lifecycle: lc, + LogConfig: structs.DefaultLogConfig(), + Resources: &structs.Resources{CPU: 100, MemoryMB: 256}, + }, + ) + alloc.TaskResources[task.Name] = &structs.Resources{CPU: 100, MemoryMB: 256} + alloc.AllocatedResources.Tasks[task.Name] = &structs.AllocatedTaskResources{ + Cpu: structs.AllocatedCpuResources{CpuShares: 100}, + Memory: structs.AllocatedMemoryResources{MemoryMB: 256}, + } + } + return alloc +} + func LifecycleJobWithPoststopDeploy() *structs.Job { job := &structs.Job{ Region: "global", @@ -1198,7 +1270,7 @@ func ConnectIngressGatewayJob(mode string, inject bool) *structs.Job { Connect: &structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindAddresses: make(map[string]*structs.ConsulGatewayBindAddress), }, Ingress: &structs.ConsulIngressConfigEntry{ @@ -1249,7 +1321,7 @@ func ConnectTerminatingGatewayJob(mode string, inject bool) *structs.Job { Connect: &structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindAddresses: make(map[string]*structs.ConsulGatewayBindAddress), }, Terminating: &structs.ConsulTerminatingConfigEntry{ @@ -1300,7 +1372,7 @@ func ConnectMeshGatewayJob(mode string, inject bool) *structs.Job { Connect: &structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindAddresses: make(map[string]*structs.ConsulGatewayBindAddress), }, Mesh: &structs.ConsulMeshConfigEntry{ diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index d4f24153a69..899f51470cb 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -3637,7 +3638,7 @@ func TestClientEndpoint_DeriveSIToken(t *testing.T) { testutil.WaitForLeader(t, s1.RPC) // Set allow unauthenticated (no operator token required) - s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true) + s1.config.ConsulConfig.AllowUnauthenticated = pointer.Of(true) // Create the node node := mock.Node() @@ -3689,7 +3690,7 @@ func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) { testutil.WaitForLeader(t, s1.RPC) // Set allow unauthenticated (no operator token required) - s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true) + s1.config.ConsulConfig.AllowUnauthenticated = pointer.Of(true) // Create the node node := mock.Node() diff --git a/nomad/plan_apply.go b/nomad/plan_apply.go index eebc713d3cf..68d890f8cf7 100644 --- a/nomad/plan_apply.go +++ b/nomad/plan_apply.go @@ -92,7 +92,6 @@ func newPlanner(s *Server) (*planner, error) { // in anticipation of this case we cannot respond to the plan until // the Raft log is updated. This means our schedulers will stall, // but there are many of those and only a single plan verifier. -// func (p *planner) planApply() { // planIndexCh is used to track an outstanding application and receive // its committed index while snap holds an optimistic state which diff --git a/nomad/scaling_endpoint.go b/nomad/scaling_endpoint.go index dd8c5fe1199..a93ddd5a146 100644 --- a/nomad/scaling_endpoint.go +++ b/nomad/scaling_endpoint.go @@ -130,7 +130,7 @@ func (p *Scaling) GetPolicy(args *structs.ScalingPolicySpecificRequest, if err != nil { return err } - reply.Index = helper.Uint64Max(1, index) + reply.Index = helper.Max(1, index) } return nil }} @@ -194,7 +194,7 @@ func (p *Scaling) listAllNamespaces(args *structs.ScalingPolicyListRequest, repl if err != nil { return err } - reply.Index = helper.Uint64Max(1, index) + reply.Index = helper.Max(1, index) // Set the query response p.srv.setQueryMeta(&reply.QueryMeta) diff --git a/nomad/search_endpoint.go b/nomad/search_endpoint.go index 67cd9b0c76f..86b4b861e18 100644 --- a/nomad/search_endpoint.go +++ b/nomad/search_endpoint.go @@ -186,8 +186,9 @@ func (s *Search) getFuzzyMatches(iter memdb.ResultIterator, text string) (map[st } // fuzzyIndex returns the index of text in name, ignoring case. -// text is assumed to be lower case. -// -1 is returned if name does not contain text. +// +// text is assumed to be lower case. +// -1 is returned if name does not contain text. func fuzzyIndex(name, text string) int { lower := strings.ToLower(name) return strings.Index(lower, text) @@ -238,12 +239,12 @@ func (s *Search) fuzzyMatchSingle(raw interface{}, text string) (structs.Context // of matchable Context. Results are categorized by Context and paired with their // score, but are unsorted. // -// job.name -// job|group.name -// job|group|service.name -// job|group|task.name -// job|group|task|service.name -// job|group|task|driver.{image,command,class} +// job.name +// job|group.name +// job|group|service.name +// job|group|task.name +// job|group|task|service.name +// job|group|task|driver.{image,command,class} func (*Search) fuzzyMatchesJob(j *structs.Job, text string) map[structs.Context][]fuzzyMatch { sm := make(map[structs.Context][]fuzzyMatch) ns := j.Namespace @@ -654,17 +655,20 @@ func sufficientSearchPerms(aclObj *acl.ACL, namespace string, context structs.Co // results are limited to policies of the provided ACL token. // // These types are limited to prefix UUID searching: -// Evals, Deployments, ScalingPolicies, Volumes +// +// Evals, Deployments, ScalingPolicies, Volumes // // These types are available for fuzzy searching: -// Nodes, Namespaces, Jobs, Allocs, Plugins +// +// Nodes, Namespaces, Jobs, Allocs, Plugins // // Jobs are a special case that expand into multiple types, and whose return // values include Scope which is a descending list of IDs of parent objects, // starting with the Namespace. The subtypes of jobs are fuzzy searchable. // // The Jobs type expands into these sub types: -// Jobs, Groups, Services, Tasks, Images, Commands, Classes +// +// Jobs, Groups, Services, Tasks, Images, Commands, Classes // // The results are in descending order starting with strongest match, per Context type. func (s *Search) FuzzySearch(args *structs.FuzzySearchRequest, reply *structs.FuzzySearchResponse) error { diff --git a/nomad/secure_variables_endpoint.go b/nomad/secure_variables_endpoint.go index 9db43bf15a7..c1b15446bce 100644 --- a/nomad/secure_variables_endpoint.go +++ b/nomad/secure_variables_endpoint.go @@ -2,7 +2,6 @@ package nomad import ( "encoding/json" - "errors" "fmt" "net/http" "strings" @@ -11,7 +10,7 @@ import ( metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/state" @@ -28,157 +27,180 @@ type SecureVariables struct { encrypter *Encrypter } -// Upsert creates or updates secure variables held within Nomad. Due to ACL -// checking, every element in Data will be checked for namespace and targeted -// to the namespace in the SecureVariable. Therefore, the caller must ensure -// that the provided struct's Namespace is the desired destination. Unset -// Namespace values will default to `args.RequestNamespace` -func (sv *SecureVariables) Upsert( - args *structs.SecureVariablesUpsertRequest, - reply *structs.SecureVariablesUpsertResponse) error { - - if done, err := sv.srv.forward(structs.SecureVariablesUpsertRPCMethod, args, args, reply); done { +// Apply is used to apply a SV update request to the data store. +func (sv *SecureVariables) Apply(args *structs.SecureVariablesApplyRequest, reply *structs.SecureVariablesApplyResponse) error { + if done, err := sv.srv.forward(structs.SecureVariablesApplyRPCMethod, args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"nomad", "secure_variables", "upsert"}, time.Now()) + defer metrics.MeasureSince([]string{ + "nomad", "secure_variables", "apply", string(args.Op)}, time.Now()) - // Use a multierror, so we can capture all validation errors and pass this - // back so they can be addressed by the caller in a single pass. - var mErr multierror.Error - uArgs := structs.SecureVariablesEncryptedUpsertRequest{ - Data: make([]*structs.SecureVariableEncrypted, len(args.Data)), - WriteRequest: args.WriteRequest, + // Check if the Namespace is explicitly set on the secure variable. If + // not, use the RequestNamespace + if args.Var == nil { + return fmt.Errorf("variable must not be nil") + } + targetNS := args.Var.Namespace + if targetNS == "" { + targetNS = args.RequestNamespace() + args.Var.Namespace = targetNS } - // Iterate the secure variables and validate them. Any error results in the - // call failing. - for i, v := range args.Data { + canRead, err := svePreApply(sv, args, args.Var) + if err != nil { + return err + } - // Check if the Namespace is explicitly set on the secure variable. If - // not, use the RequestNamespace - targetNS := v.Namespace - if targetNS == "" { - targetNS = args.RequestNamespace() - v.Namespace = targetNS - } + var ev *structs.SecureVariableEncrypted - // Perform the ACL token resolution. - if aclObj, err := sv.srv.ResolveToken(args.AuthToken); err != nil { - return err - } else if aclObj != nil { - for _, variable := range args.Data { - if !aclObj.AllowSecureVariableOperation(targetNS, - variable.Path, acl.PolicyWrite) { - return structs.ErrPermissionDenied - } - } - } - - v.Canonicalize() - if err := v.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - continue - } - if args.CheckIndex != nil { - var conflict *structs.SecureVariableDecrypted - if err := sv.validateCASUpdate(*args.CheckIndex, v, &conflict); err != nil { - if reply.Conflicts == nil { - reply.Conflicts = make([]*structs.SecureVariableDecrypted, len(args.Data)) - } - reply.Conflicts[i] = conflict - continue - } - } - ev, err := sv.encrypt(v) + switch args.Op { + case structs.SVOpSet, structs.SVOpCAS: + ev, err = sv.encrypt(args.Var) if err != nil { - mErr.Errors = append(mErr.Errors, err) - continue + return fmt.Errorf("secure variable error: encrypt: %w", err) + } + now := time.Now().UnixNano() + ev.CreateTime = now // existing will override if it exists + ev.ModifyTime = now + case structs.SVOpDelete, structs.SVOpDeleteCAS: + ev = &structs.SecureVariableEncrypted{ + SecureVariableMetadata: structs.SecureVariableMetadata{ + Namespace: args.Var.Namespace, + Path: args.Var.Path, + ModifyIndex: args.Var.ModifyIndex, + }, } - uArgs.Data[i] = ev - } - if len(reply.Conflicts) != 0 { - // This is a reply with CAS conflicts so it needs to return here - // "successfully". The caller needs to check to see if Conflicts - // is non-Nil. - return nil - } - if err := mErr.ErrorOrNil(); err != nil { - return &mErr } - // TODO: This should be done on each Data in uArgs. - if err := sv.enforceQuota(uArgs); err != nil { - return err + // Make a SVEArgs + sveArgs := structs.SVApplyStateRequest{ + Op: args.Op, + Var: ev, + WriteRequest: args.WriteRequest, } - // Update via Raft. - out, index, err := sv.srv.raftApply(structs.SecureVariableUpsertRequestType, uArgs) + // Apply the update. + out, index, err := sv.srv.raftApply(structs.SVApplyStateRequestType, sveArgs) if err != nil { - return err + return fmt.Errorf("raft apply failed: %w", err) } - - // Check if the FSM response, which is an interface, contains an error. - if err, ok := out.(error); ok && err != nil { + r, err := sv.makeSecureVariablesApplyResponse(args, out.(*structs.SVApplyStateResponse), canRead) + if err != nil { return err } - - // Update the index. There is no need to floor this as we are writing to - // state and therefore will get a non-zero index response. + *reply = *r reply.Index = index return nil } -// Delete removes a single secure variable, as specified by its namespace and -// path from Nomad. -func (sv *SecureVariables) Delete( - args *structs.SecureVariablesDeleteRequest, - reply *structs.SecureVariablesDeleteResponse) error { +func svePreApply(sv *SecureVariables, args *structs.SecureVariablesApplyRequest, vd *structs.SecureVariableDecrypted) (canRead bool, err error) { - if done, err := sv.srv.forward(structs.SecureVariablesDeleteRPCMethod, args, args, reply); done { - return err - } - defer metrics.MeasureSince([]string{"nomad", "secure_variables", "delete"}, time.Now()) + canRead = false + var aclObj *acl.ACL // Perform the ACL token resolution. - if aclObj, err := sv.srv.ResolveToken(args.AuthToken); err != nil { - return err + if aclObj, err = sv.srv.ResolveToken(args.AuthToken); err != nil { + return } else if aclObj != nil { - if !aclObj.AllowSecureVariableOperation(args.RequestNamespace(), args.Path, acl.PolicyWrite) { - return structs.ErrPermissionDenied + hasPerm := func(perm string) bool { + return aclObj.AllowSecureVariableOperation(args.Var.Namespace, + args.Var.Path, perm) } + canRead = hasPerm(acl.SecureVariablesCapabilityRead) + + switch args.Op { + case structs.SVOpSet, structs.SVOpCAS: + if !hasPerm(acl.SecureVariablesCapabilityWrite) { + err = structs.ErrPermissionDenied + return + } + case structs.SVOpDelete, structs.SVOpDeleteCAS: + if !hasPerm(acl.SecureVariablesCapabilityDestroy) { + err = structs.ErrPermissionDenied + return + } + default: + err = fmt.Errorf("svPreApply: unexpected SVOp received: %q", args.Op) + return + } + } else { + // ACLs are not enabled. + canRead = true } - if args.CheckIndex != nil { - if err := sv.validateCASDelete(*args.CheckIndex, args.Namespace, args.Path, &reply.Conflict); err != nil { + switch args.Op { + case structs.SVOpSet, structs.SVOpCAS: + args.Var.Canonicalize() + if err = args.Var.Validate(); err != nil { + return + } - // If the validateCASDelete func sends back the conflict sentinel - // error value then it will have put the conflict into the reply, - // and we need to "succeed". - if err.Error() == "conflict" { - reply.Index = reply.Conflict.ModifyIndex - return nil - } + case structs.SVOpDelete, structs.SVOpDeleteCAS: + if args.Var == nil || args.Var.Path == "" { + err = fmt.Errorf("delete requires a Path") + return + } + } + + return +} - // There are a few cases where validateCASDelete can error that - // aren't conflicts. - return err +// MakeSecureVariablesApplyResponse merges the output of this SVApplyStateResponse with the +// SecureVariableDataItems +func (sv *SecureVariables) makeSecureVariablesApplyResponse( + req *structs.SecureVariablesApplyRequest, eResp *structs.SVApplyStateResponse, + canRead bool) (*structs.SecureVariablesApplyResponse, error) { + + out := structs.SecureVariablesApplyResponse{ + Op: eResp.Op, + Input: req.Var, + Result: eResp.Result, + Error: eResp.Error, + WriteMeta: eResp.WriteMeta, + } + + if eResp.IsOk() { + if eResp.WrittenSVMeta != nil { + // The writer is allowed to read their own write + out.Output = &structs.SecureVariableDecrypted{ + SecureVariableMetadata: *eResp.WrittenSVMeta, + Items: req.Var.Items.Copy(), + } } + return &out, nil } - // Update via Raft. - out, index, err := sv.srv.raftApply(structs.SecureVariableDeleteRequestType, args) - if err != nil { - return err + + // At this point, the response is necessarily a conflict. + // Prime output from the encrypted responses metadata + out.Conflict = &structs.SecureVariableDecrypted{ + SecureVariableMetadata: eResp.Conflict.SecureVariableMetadata, + Items: nil, } - // Check if the FSM response, which is an interface, contains an error. - if err, ok := out.(error); ok && err != nil { - return err + // If the caller can't read the conflicting value, return the + // metadata, but no items and flag it as redacted + if !canRead { + out.Result = structs.SVOpResultRedacted + return &out, nil } - // Update the index. There is no need to floor this as we are writing to - // state and therefore will get a non-zero index response. - reply.Index = index - return nil + if eResp.Conflict == nil || eResp.Conflict.KeyID == "" { + // zero-value conflicts can be returned for delete-if-set + dv := &structs.SecureVariableDecrypted{} + dv.Namespace = eResp.Conflict.Namespace + dv.Path = eResp.Conflict.Path + out.Conflict = dv + } else { + // At this point, the caller has read access to the conflicting + // value so we can return it in the output; decrypt it. + dv, err := sv.decrypt(eResp.Conflict) + if err != nil { + return nil, err + } + out.Conflict = dv + } + + return &out, nil } // Read is used to get a specific secure variable @@ -188,8 +210,7 @@ func (sv *SecureVariables) Read(args *structs.SecureVariablesReadRequest, reply } defer metrics.MeasureSince([]string{"nomad", "secure_variables", "read"}, time.Now()) - // FIXME: Temporary ACL Test policy. Update once implementation complete - err := sv.handleMixedAuthEndpoint(args.QueryOptions, + _, err := sv.handleMixedAuthEndpoint(args.QueryOptions, acl.PolicyRead, args.Path) if err != nil { return err @@ -240,8 +261,7 @@ func (sv *SecureVariables) List( return sv.listAllSecureVariables(args, reply) } - // FIXME: Temporary ACL Test policy. Update once implementation complete - err := sv.handleMixedAuthEndpoint(args.QueryOptions, + aclObj, err := sv.handleMixedAuthEndpoint(args.QueryOptions, acl.PolicyList, args.Prefix) if err != nil { return err @@ -271,13 +291,23 @@ func (sv *SecureVariables) List( }, ) + filters := []paginator.Filter{ + paginator.GenericFilter{ + Allow: func(raw interface{}) (bool, error) { + sv := raw.(*structs.SecureVariableEncrypted) + return strings.HasPrefix(sv.Path, args.Prefix) && + (aclObj == nil || aclObj.AllowSecureVariableOperation(sv.Namespace, sv.Path, acl.PolicyList)), nil + }, + }, + } + // Set up our output after we have checked the error. var svs []*structs.SecureVariableMetadata // Build the paginator. This includes the function that is // responsible for appending a variable to the secure variables // stubs slice. - paginatorImpl, err := paginator.NewPaginator(iter, tokenizer, nil, args.QueryOptions, + paginatorImpl, err := paginator.NewPaginator(iter, tokenizer, filters, args.QueryOptions, func(raw interface{}) error { sv := raw.(*structs.SecureVariableEncrypted) svStub := sv.SecureVariableMetadata @@ -336,7 +366,7 @@ func (s *SecureVariables) listAllSecureVariables( // Identify which namespaces the caller has access to. If they do // not have access to any, send them an empty response. Otherwise, // handle any error in a traditional manner. - allowedNSes, err := allowedNSes(aclObj, stateStore, allowFunc) + _, err := allowedNSes(aclObj, stateStore, allowFunc) switch err { case structs.ErrPermissionDenied: reply.Data = make([]*structs.SecureVariableMetadata, 0) @@ -364,24 +394,19 @@ func (s *SecureVariables) listAllSecureVariables( }, ) - // Wrap the SecureVariables iterator with a FilterIterator to - // eliminate invalid values before sending them to the paginator. - fltrIter := memdb.NewFilterIterator(iter, func(raw interface{}) bool { - - // Values are filtered when the func returns true. - sv := raw.(*structs.SecureVariableEncrypted) - if allowedNSes != nil && !allowedNSes[sv.Namespace] { - return true - } - if !strings.HasPrefix(sv.Path, args.Prefix) { - return true - } - return false - }) + filters := []paginator.Filter{ + paginator.GenericFilter{ + Allow: func(raw interface{}) (bool, error) { + sv := raw.(*structs.SecureVariableEncrypted) + return strings.HasPrefix(sv.Path, args.Prefix) && + (aclObj == nil || aclObj.AllowSecureVariableOperation(sv.Namespace, sv.Path, acl.PolicyList)), nil + }, + }, + } // Build the paginator. This includes the function that is // responsible for appending a variable to the stubs array. - paginatorImpl, err := paginator.NewPaginator(fltrIter, tokenizer, nil, args.QueryOptions, + paginatorImpl, err := paginator.NewPaginator(iter, tokenizer, filters, args.QueryOptions, func(raw interface{}) error { sv := raw.(*structs.SecureVariableEncrypted) svStub := sv.SecureVariableMetadata @@ -445,7 +470,7 @@ func (sv *SecureVariables) decrypt(v *structs.SecureVariableEncrypted) (*structs // handleMixedAuthEndpoint is a helper to handle auth on RPC endpoints that can // either be called by external clients or by workload identity -func (sv *SecureVariables) handleMixedAuthEndpoint(args structs.QueryOptions, cap, pathOrPrefix string) error { +func (sv *SecureVariables) handleMixedAuthEndpoint(args structs.QueryOptions, cap, pathOrPrefix string) (*acl.ACL, error) { // Perform the initial token resolution. aclObj, err := sv.srv.ResolveToken(args.AuthToken) @@ -454,15 +479,15 @@ func (sv *SecureVariables) handleMixedAuthEndpoint(args structs.QueryOptions, ca // are not enabled, otherwise trigger the allowed namespace function. if aclObj != nil { if !aclObj.AllowSecureVariableOperation(args.RequestNamespace(), pathOrPrefix, cap) { - return structs.ErrPermissionDenied + return nil, structs.ErrPermissionDenied } } - return nil + return aclObj, nil } if helper.IsUUID(args.AuthToken) { // early return for ErrNotFound or other errors if it's formed // like an ACLToken.SecretID - return err + return nil, err } // Attempt to verify the token as a JWT with a workload @@ -472,27 +497,27 @@ func (sv *SecureVariables) handleMixedAuthEndpoint(args structs.QueryOptions, ca metrics.IncrCounter([]string{ "nomad", "secure_variables", "invalid_allocation_identity"}, 1) sv.logger.Trace("allocation identity was not valid", "error", err) - return structs.ErrPermissionDenied + return nil, structs.ErrPermissionDenied } // The workload identity gets access to paths that match its // identity, without having to go thru the ACL system err = sv.authValidatePrefix(claims, args.RequestNamespace(), pathOrPrefix) if err == nil { - return nil + return aclObj, nil } // If the workload identity doesn't match the implicit permissions // given to paths, check for its attached ACL policies aclObj, err = sv.srv.ResolveClaims(claims) if err != nil { - return err // this only returns an error when the state store has gone wrong + return nil, err // this only returns an error when the state store has gone wrong } if aclObj != nil && aclObj.AllowSecureVariableOperation( args.RequestNamespace(), pathOrPrefix, cap) { - return nil + return aclObj, nil } - return structs.ErrPermissionDenied + return nil, structs.ErrPermissionDenied } // authValidatePrefix asserts that the requested path is valid for @@ -527,53 +552,3 @@ func (sv *SecureVariables) authValidatePrefix(claims *structs.IdentityClaims, ns } return nil } - -func (s *SecureVariables) validateCASUpdate(cidx uint64, sv *structs.SecureVariableDecrypted, conflict **structs.SecureVariableDecrypted) error { - return s.validateCAS(cidx, sv.Namespace, sv.Path, conflict) -} - -func (s *SecureVariables) validateCASDelete(cidx uint64, namespace, path string, conflict **structs.SecureVariableDecrypted) error { - return s.validateCAS(cidx, namespace, path, conflict) -} - -func (s *SecureVariables) validateCAS(cidx uint64, namespace, path string, conflictOut **structs.SecureVariableDecrypted) error { - casConflict := errors.New("conflict") - // lookup any existing key and validate the update - snap, err := s.srv.fsm.State().Snapshot() - if err != nil { - return err - } - ws := memdb.NewWatchSet() - exist, err := snap.GetSecureVariable(ws, namespace, path) - if err != nil { - return fmt.Errorf("cas error: %w", err) - } - if exist == nil && cidx != 0 { - // return a zero value with the namespace and path applied - zeroVal := &structs.SecureVariableDecrypted{ - SecureVariableMetadata: structs.SecureVariableMetadata{ - Namespace: namespace, - Path: path, - CreateIndex: 0, - CreateTime: 0, - ModifyIndex: 0, - ModifyTime: 0, - }, - Items: nil, - } - *conflictOut = zeroVal - return casConflict - } - if exist != nil && exist.ModifyIndex != cidx { - dec, err := s.decrypt(exist) - if err != nil { - // we can't return the conflict and we will have to bail out - decErrStr := fmt.Sprintf(". Additional error decrypting conflict: %s", err) - return fmt.Errorf("cas error: requested index %v; found index %v%s", cidx, exist.ModifyIndex, decErrStr) - } - *conflictOut = dec - return casConflict - } - - return nil -} diff --git a/nomad/secure_variables_endpoint_oss.go b/nomad/secure_variables_endpoint_oss.go deleted file mode 100644 index 46c88aeda61..00000000000 --- a/nomad/secure_variables_endpoint_oss.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !ent -// +build !ent - -package nomad - -import "github.com/hashicorp/nomad/nomad/structs" - -func (sv *SecureVariables) enforceQuota(uArgs structs.SecureVariablesEncryptedUpsertRequest) error { - return nil -} diff --git a/nomad/secure_variables_endpoint_test.go b/nomad/secure_variables_endpoint_test.go index 1ab34a4b083..a9779ee2cbe 100644 --- a/nomad/secure_variables_endpoint_test.go +++ b/nomad/secure_variables_endpoint_test.go @@ -1,17 +1,22 @@ package nomad import ( + "encoding/json" "fmt" "math/rand" "strings" "testing" + "time" + + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/shoenig/test" + "github.com/shoenig/test/must" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" - "github.com/stretchr/testify/require" ) func TestSecureVariablesEndpoint_auth(t *testing.T) { @@ -46,25 +51,25 @@ func TestSecureVariablesEndpoint_auth(t *testing.T) { alloc3.Job.ParentID = jobID store := srv.fsm.State() - require.NoError(t, store.UpsertNamespaces(1000, []*structs.Namespace{{Name: ns}})) - require.NoError(t, store.UpsertAllocs( + must.NoError(t, store.UpsertNamespaces(1000, []*structs.Namespace{{Name: ns}})) + must.NoError(t, store.UpsertAllocs( structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2, alloc3})) claims1 := alloc1.ToTaskIdentityClaims(nil, "web") idToken, err := srv.encrypter.SignClaims(claims1) - require.NoError(t, err) + must.NoError(t, err) claims2 := alloc2.ToTaskIdentityClaims(nil, "web") noPermissionsToken, err := srv.encrypter.SignClaims(claims2) - require.NoError(t, err) + must.NoError(t, err) claims3 := alloc3.ToTaskIdentityClaims(alloc3.Job, "web") idDispatchToken, err := srv.encrypter.SignClaims(claims3) - require.NoError(t, err) + must.NoError(t, err) // corrupt the signature of the token idTokenParts := strings.Split(idToken, ".") - require.Len(t, idTokenParts, 3) + must.Len(t, 3, idTokenParts) sig := []string(strings.Split(idTokenParts[2], "")) rand.Shuffle(len(sig), func(i, j int) { sig[i], sig[j] = sig[j], sig[i] @@ -73,38 +78,42 @@ func TestSecureVariablesEndpoint_auth(t *testing.T) { invalidIDToken := strings.Join(idTokenParts, ".") policy := mock.ACLPolicy() - policy.Name = fmt.Sprintf("_:%s/%s/%s", ns, jobID, alloc1.TaskGroup) policy.Rules = `namespace "nondefault-namespace" { secure_variables { - path "nomad/jobs/*" { capabilities = ["read"] } + path "nomad/jobs/*" { capabilities = ["list"] } path "other/path" { capabilities = ["read"] } }}` + policy.JobACL = &structs.JobACL{ + Namespace: ns, + JobID: jobID, + Group: alloc1.TaskGroup, + } policy.SetHash() err = store.UpsertACLPolicies(structs.MsgTypeTestSetup, 1100, []*structs.ACLPolicy{policy}) - require.NoError(t, err) + must.NoError(t, err) aclToken := mock.ACLToken() aclToken.Policies = []string{policy.Name} err = store.UpsertACLTokens(structs.MsgTypeTestSetup, 1150, []*structs.ACLToken{aclToken}) - require.NoError(t, err) + must.NoError(t, err) t.Run("terminal alloc should be denied", func(t *testing.T) { - err = srv.staticEndpoints.SecureVariables.handleMixedAuthEndpoint( + _, err = srv.staticEndpoints.SecureVariables.handleMixedAuthEndpoint( structs.QueryOptions{AuthToken: idToken, Namespace: ns}, "n/a", fmt.Sprintf("nomad/jobs/%s/web/web", jobID)) - require.EqualError(t, err, structs.ErrPermissionDenied.Error()) + must.EqError(t, err, structs.ErrPermissionDenied.Error()) }) // make alloc non-terminal alloc1.ClientStatus = structs.AllocClientStatusRunning - require.NoError(t, store.UpsertAllocs( + must.NoError(t, store.UpsertAllocs( structs.MsgTypeTestSetup, 1200, []*structs.Allocation{alloc1})) t.Run("wrong namespace should be denied", func(t *testing.T) { - err = srv.staticEndpoints.SecureVariables.handleMixedAuthEndpoint( + _, err = srv.staticEndpoints.SecureVariables.handleMixedAuthEndpoint( structs.QueryOptions{AuthToken: idToken, Namespace: structs.DefaultNamespace}, "n/a", fmt.Sprintf("nomad/jobs/%s/web/web", jobID)) - require.EqualError(t, err, structs.ErrPermissionDenied.Error()) + must.EqError(t, err, structs.ErrPermissionDenied.Error()) }) testCases := []struct { @@ -150,26 +159,33 @@ func TestSecureVariablesEndpoint_auth(t *testing.T) { expectedErr: nil, }, { - name: "valid claim for implied policy", + name: "valid claim for job-attached policy", token: idToken, cap: acl.PolicyRead, path: "other/path", expectedErr: nil, }, { - name: "valid claim for implied policy path denied", + name: "valid claim for job-attached policy path denied", token: idToken, cap: acl.PolicyRead, path: "other/not-allowed", expectedErr: structs.ErrPermissionDenied, }, { - name: "valid claim for implied policy capability denied", + name: "valid claim for job-attached policy capability denied", token: idToken, cap: acl.PolicyWrite, path: "other/path", expectedErr: structs.ErrPermissionDenied, }, + { + name: "valid claim for job-attached policy capability with cross-job access", + token: idToken, + cap: acl.PolicyList, + path: "nomad/jobs/some-other", + expectedErr: nil, + }, { name: "valid claim with no permissions denied by path", token: noPermissionsToken, @@ -244,14 +260,449 @@ func TestSecureVariablesEndpoint_auth(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - err := srv.staticEndpoints.SecureVariables.handleMixedAuthEndpoint( + _, err := srv.staticEndpoints.SecureVariables.handleMixedAuthEndpoint( structs.QueryOptions{AuthToken: tc.token, Namespace: ns}, tc.cap, tc.path) if tc.expectedErr == nil { - require.NoError(t, err) + must.NoError(t, err) } else { - require.EqualError(t, err, tc.expectedErr.Error()) + must.EqError(t, err, tc.expectedErr.Error()) } }) } } + +func TestSecureVariablesEndpoint_Apply_ACL(t *testing.T) { + ci.Parallel(t) + srv, rootToken, shutdown := TestACLServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + codec := rpcClient(t, srv) + state := srv.fsm.State() + + pol := mock.NamespacePolicyWithSecureVariables( + structs.DefaultNamespace, "", []string{"list-jobs"}, + map[string][]string{ + "dropbox/*": {"write"}, + }) + writeToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", pol) + + sv1 := mock.SecureVariable() + sv1.ModifyIndex = 0 + var svHold *structs.SecureVariableDecrypted + + opMap := map[string]structs.SVOp{ + "set": structs.SVOpSet, + "cas": structs.SVOpCAS, + "delete": structs.SVOpDelete, + "delete-cas": structs.SVOpDeleteCAS, + } + + for name, op := range opMap { + t.Run(name+"/no token", func(t *testing.T) { + sv1 := sv1 + applyReq := structs.SecureVariablesApplyRequest{ + Op: op, + Var: sv1, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + applyResp := new(structs.SecureVariablesApplyResponse) + err := msgpackrpc.CallWithCodec(codec, structs.SecureVariablesApplyRPCMethod, &applyReq, applyResp) + must.EqError(t, err, structs.ErrPermissionDenied.Error()) + }) + } + + t.Run("cas/management token/new", func(t *testing.T) { + applyReq := structs.SecureVariablesApplyRequest{ + Op: structs.SVOpCAS, + Var: sv1, + WriteRequest: structs.WriteRequest{ + Region: "global", + AuthToken: rootToken.SecretID, + }, + } + applyResp := new(structs.SecureVariablesApplyResponse) + err := msgpackrpc.CallWithCodec(codec, structs.SecureVariablesApplyRPCMethod, &applyReq, applyResp) + + must.NoError(t, err) + must.Eq(t, structs.SVOpResultOk, applyResp.Result) + must.Equals(t, sv1.Items, applyResp.Output.Items) + + svHold = applyResp.Output + }) + + t.Run("cas with current", func(t *testing.T) { + must.NotNil(t, svHold) + sv := svHold + sv.Items["new"] = "newVal" + + applyReq := structs.SecureVariablesApplyRequest{ + Op: structs.SVOpCAS, + Var: sv, + WriteRequest: structs.WriteRequest{ + Region: "global", + AuthToken: rootToken.SecretID, + }, + } + applyResp := new(structs.SecureVariablesApplyResponse) + applyReq.AuthToken = rootToken.SecretID + + err := msgpackrpc.CallWithCodec(codec, structs.SecureVariablesApplyRPCMethod, &applyReq, &applyResp) + + must.NoError(t, err) + must.Eq(t, structs.SVOpResultOk, applyResp.Result) + must.Equals(t, sv.Items, applyResp.Output.Items) + + svHold = applyResp.Output + }) + + t.Run("cas with stale", func(t *testing.T) { + must.NotNil(t, sv1) // TODO: query these directly + must.NotNil(t, svHold) + + sv1 := sv1 + svHold := svHold + + applyReq := structs.SecureVariablesApplyRequest{ + Op: structs.SVOpCAS, + Var: sv1, + WriteRequest: structs.WriteRequest{ + Region: "global", + AuthToken: rootToken.SecretID, + }, + } + applyResp := new(structs.SecureVariablesApplyResponse) + applyReq.AuthToken = rootToken.SecretID + + err := msgpackrpc.CallWithCodec(codec, structs.SecureVariablesApplyRPCMethod, &applyReq, &applyResp) + + must.NoError(t, err) + must.Eq(t, structs.SVOpResultConflict, applyResp.Result) + must.Equals(t, svHold.SecureVariableMetadata, applyResp.Conflict.SecureVariableMetadata) + must.Equals(t, svHold.Items, applyResp.Conflict.Items) + }) + + sv3 := mock.SecureVariable() + sv3.Path = "dropbox/a" + sv3.ModifyIndex = 0 + + t.Run("cas/write-only/read own new", func(t *testing.T) { + sv3 := sv3 + applyReq := structs.SecureVariablesApplyRequest{ + Op: structs.SVOpCAS, + Var: sv3, + WriteRequest: structs.WriteRequest{ + Region: "global", + AuthToken: writeToken.SecretID, + }, + } + applyResp := new(structs.SecureVariablesApplyResponse) + + err := msgpackrpc.CallWithCodec(codec, structs.SecureVariablesApplyRPCMethod, &applyReq, &applyResp) + + must.NoError(t, err) + must.Eq(t, structs.SVOpResultOk, applyResp.Result) + must.Equals(t, sv3.Items, applyResp.Output.Items) + svHold = applyResp.Output + }) + + t.Run("cas/write only/conflict redacted", func(t *testing.T) { + must.NotNil(t, sv3) + must.NotNil(t, svHold) + sv3 := sv3 + svHold := svHold + + applyReq := structs.SecureVariablesApplyRequest{ + Op: structs.SVOpCAS, + Var: sv3, + WriteRequest: structs.WriteRequest{ + Region: "global", + AuthToken: writeToken.SecretID, + }, + } + applyResp := new(structs.SecureVariablesApplyResponse) + err := msgpackrpc.CallWithCodec(codec, structs.SecureVariablesApplyRPCMethod, &applyReq, &applyResp) + + must.NoError(t, err) + must.Eq(t, structs.SVOpResultRedacted, applyResp.Result) + must.Eq(t, svHold.SecureVariableMetadata, applyResp.Conflict.SecureVariableMetadata) + must.Nil(t, applyResp.Conflict.Items) + }) + + t.Run("cas/write only/read own upsert", func(t *testing.T) { + must.NotNil(t, svHold) + sv := svHold + sv.Items["upsert"] = "read" + + applyReq := structs.SecureVariablesApplyRequest{ + Op: structs.SVOpCAS, + Var: sv, + WriteRequest: structs.WriteRequest{ + Region: "global", + AuthToken: writeToken.SecretID, + }, + } + applyResp := new(structs.SecureVariablesApplyResponse) + err := msgpackrpc.CallWithCodec(codec, structs.SecureVariablesApplyRPCMethod, &applyReq, &applyResp) + + must.NoError(t, err) + must.Eq(t, structs.SVOpResultOk, applyResp.Result) + must.Equals(t, sv.Items, applyResp.Output.Items) + }) +} + +func TestSecureVariablesEndpoint_ComplexACLPolicies(t *testing.T) { + + ci.Parallel(t) + srv, _, shutdown := TestACLServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + codec := rpcClient(t, srv) + + idx := uint64(1000) + + policyRules := ` +namespace "dev" { + secure_variables { + path "*" { capabilities = ["list", "read"] } + path "system/*" { capabilities = ["deny"] } + path "config/system/*" { capabilities = ["deny"] } + } +} + +namespace "prod" { + secure_variables { + path "*" { + capabilities = ["list"] + } + } +} + +namespace "*" {} +` + + store := srv.fsm.State() + + must.NoError(t, store.UpsertNamespaces(1000, []*structs.Namespace{ + {Name: "dev"}, {Name: "prod"}, {Name: "other"}})) + + idx++ + token := mock.CreatePolicyAndToken(t, store, idx, "developer", policyRules) + + writeVar := func(ns, path string) { + idx++ + sv := mock.SecureVariableEncrypted() + sv.Namespace = ns + sv.Path = path + resp := store.SVESet(idx, &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sv, + }) + must.NoError(t, resp.Error) + } + + writeVar("dev", "system/never-list") + writeVar("dev", "config/system/never-list") + writeVar("dev", "config/can-read") + writeVar("dev", "project/can-read") + + writeVar("prod", "system/can-list") + writeVar("prod", "config/system/can-list") + writeVar("prod", "config/can-list") + writeVar("prod", "project/can-list") + + writeVar("other", "system/never-list") + writeVar("other", "config/system/never-list") + writeVar("other", "config/never-list") + writeVar("other", "project/never-list") + + testListPrefix := func(ns, prefix string, expectedCount int, expectErr error) { + t.Run(fmt.Sprintf("ns=%s-prefix=%s", ns, prefix), func(t *testing.T) { + req := &structs.SecureVariablesListRequest{ + QueryOptions: structs.QueryOptions{ + Namespace: ns, + Prefix: prefix, + AuthToken: token.SecretID, + Region: "global", + }, + } + var resp structs.SecureVariablesListResponse + + if expectErr != nil { + must.EqError(t, + msgpackrpc.CallWithCodec(codec, "SecureVariables.List", req, &resp), + expectErr.Error()) + return + } + must.NoError(t, msgpackrpc.CallWithCodec(codec, "SecureVariables.List", req, &resp)) + + found := "found:\n" + for _, sv := range resp.Data { + found += fmt.Sprintf(" ns=%s path=%s\n", sv.Namespace, sv.Path) + } + must.Len(t, expectedCount, resp.Data, test.Sprintf("%s", found)) + }) + } + + testListPrefix("dev", "system", 0, nil) + testListPrefix("dev", "config/system", 0, nil) + testListPrefix("dev", "config", 1, nil) + testListPrefix("dev", "project", 1, nil) + testListPrefix("dev", "", 2, nil) + + testListPrefix("prod", "system", 1, nil) + testListPrefix("prod", "config/system", 1, nil) + testListPrefix("prod", "config", 2, nil) + testListPrefix("prod", "project", 1, nil) + testListPrefix("prod", "", 4, nil) + + testListPrefix("other", "system", 0, structs.ErrPermissionDenied) + testListPrefix("other", "config/system", 0, structs.ErrPermissionDenied) + testListPrefix("other", "config", 0, structs.ErrPermissionDenied) + testListPrefix("other", "project", 0, structs.ErrPermissionDenied) + testListPrefix("other", "", 0, structs.ErrPermissionDenied) + + testListPrefix("*", "system", 1, nil) + testListPrefix("*", "config/system", 1, nil) + testListPrefix("*", "config", 3, nil) + testListPrefix("*", "project", 2, nil) + testListPrefix("*", "", 6, nil) + +} + +func TestSecureVariablesEndpoint_GetSecureVariable_Blocking(t *testing.T) { + ci.Parallel(t) + + s1, cleanupS1 := TestServer(t, nil) + defer cleanupS1() + state := s1.fsm.State() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // First create an unrelated variable. + delay := 100 * time.Millisecond + time.AfterFunc(delay, func() { + writeVar(t, s1, 100, "default", "aaa") + }) + + // Upsert the variable we are watching later + delay = 200 * time.Millisecond + time.AfterFunc(delay, func() { + writeVar(t, s1, 200, "default", "bbb") + }) + + // Lookup the variable + req := &structs.SecureVariablesReadRequest{ + Path: "bbb", + QueryOptions: structs.QueryOptions{ + Region: "global", + MinQueryIndex: 150, + MaxQueryTime: 500 * time.Millisecond, + }, + } + var resp structs.SecureVariablesReadResponse + start := time.Now() + if err := msgpackrpc.CallWithCodec(codec, "SecureVariables.Read", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + elapsed := time.Since(start) + + if elapsed < delay { + t.Fatalf("should block (returned in %s) %#v", elapsed, resp) + } + if elapsed > req.MaxQueryTime { + t.Fatalf("blocking query timed out %#v", resp) + } + if resp.Index != 200 { + t.Fatalf("Bad index: %d %d", resp.Index, 200) + } + if resp.Data == nil || resp.Data.Path != "bbb" { + t.Fatalf("bad: %#v", resp.Data) + } + + // Variable update triggers watches + delay = 100 * time.Millisecond + + time.AfterFunc(delay, func() { + writeVar(t, s1, 300, "default", "bbb") + }) + + req.QueryOptions.MinQueryIndex = 250 + var resp2 structs.SecureVariablesReadResponse + start = time.Now() + if err := msgpackrpc.CallWithCodec(codec, "SecureVariables.Read", req, &resp2); err != nil { + t.Fatalf("err: %v", err) + } + elapsed = time.Since(start) + + if elapsed < delay { + t.Fatalf("should block (returned in %s) %#v", elapsed, resp2) + } + if elapsed > req.MaxQueryTime { + t.Fatal("blocking query timed out") + } + if resp2.Index != 300 { + t.Fatalf("Bad index: %d %d", resp2.Index, 300) + } + if resp2.Data == nil || resp2.Data.Path != "bbb" { + t.Fatalf("bad: %#v", resp2.Data) + } + + // Variable delete triggers watches + delay = 100 * time.Millisecond + time.AfterFunc(delay, func() { + sv := mock.SecureVariableEncrypted() + sv.Path = "bbb" + if resp := state.SVEDelete(400, &structs.SVApplyStateRequest{Op: structs.SVOpDelete, Var: sv}); !resp.IsOk() { + t.Fatalf("err: %v", resp.Error) + } + }) + + req.QueryOptions.MinQueryIndex = 350 + var resp3 structs.SecureVariablesReadResponse + start = time.Now() + if err := msgpackrpc.CallWithCodec(codec, "SecureVariables.Read", req, &resp3); err != nil { + t.Fatalf("err: %v", err) + } + elapsed = time.Since(start) + + if elapsed < delay { + t.Fatalf("should block (returned in %s) %#v", elapsed, resp) + } + if elapsed > req.MaxQueryTime { + t.Fatal("blocking query timed out") + } + if resp3.Index != 400 { + t.Fatalf("Bad index: %d %d", resp3.Index, 400) + } + if resp3.Data != nil { + t.Fatalf("bad: %#v", resp3.Data) + } +} + +func writeVar(t *testing.T, s *Server, idx uint64, ns, path string) { + store := s.fsm.State() + sv := mock.SecureVariable() + sv.Namespace = ns + sv.Path = path + bPlain, err := json.Marshal(sv.Items) + must.NoError(t, err) + bEnc, kID, err := s.encrypter.Encrypt(bPlain) + must.NoError(t, err) + sve := &structs.SecureVariableEncrypted{ + SecureVariableMetadata: sv.SecureVariableMetadata, + SecureVariableData: structs.SecureVariableData{ + Data: bEnc, + KeyID: kID, + }, + } + resp := store.SVESet(idx, &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sve, + }) + must.NoError(t, resp.Error) +} diff --git a/nomad/serf.go b/nomad/serf.go index 6e5a0a2d16e..9a6d31a87c4 100644 --- a/nomad/serf.go +++ b/nomad/serf.go @@ -2,7 +2,6 @@ package nomad import ( "strings" - "sync/atomic" "time" log "github.com/hashicorp/go-hclog" @@ -83,7 +82,7 @@ func (s *Server) nodeJoin(me serf.MemberEvent) { s.peerLock.Unlock() // If we still expecting to bootstrap, may need to handle this - if s.config.BootstrapExpect != 0 && atomic.LoadInt32(&s.config.Bootstrapped) == 0 { + if s.config.BootstrapExpect != 0 && !s.bootstrapped.Load() { s.maybeBootstrap() } } @@ -117,7 +116,7 @@ func (s *Server) maybeBootstrap() { // Bootstrap can only be done if there are no committed logs, // remove our expectations of bootstrapping if index != 0 { - atomic.StoreInt32(&s.config.Bootstrapped, 1) + s.bootstrapped.Store(true) return } @@ -188,7 +187,7 @@ func (s *Server) maybeBootstrap() { if len(peers) > 0 { s.logger.Info("disabling bootstrap mode because existing Raft peers being reported by peer", "peer_name", server.Name, "peer_address", server.Addr) - atomic.StoreInt32(&s.config.Bootstrapped, 1) + s.bootstrapped.Store(true) return } } @@ -230,7 +229,7 @@ func (s *Server) maybeBootstrap() { } // Bootstrapping complete, or failed for some reason, don't enter this again - atomic.StoreInt32(&s.config.Bootstrapped, 1) + s.bootstrapped.Store(true) } // nodeFailed is used to handle fail events on the serf cluster diff --git a/nomad/serf_test.go b/nomad/serf_test.go index 273c2ebac58..f5d2f0dcb1a 100644 --- a/nomad/serf_test.go +++ b/nomad/serf_test.go @@ -4,7 +4,6 @@ import ( "fmt" "path" "strings" - "sync/atomic" "testing" "time" @@ -413,7 +412,7 @@ func TestNomad_NonBootstraping_ShouldntBootstap(t *testing.T) { s1.maybeBootstrap() time.Sleep(100 * time.Millisecond) - bootstrapped := atomic.LoadInt32(&s1.config.Bootstrapped) + bootstrapped := s1.bootstrapped.Load() require.Zero(t, bootstrapped, "expecting non-bootstrapped servers") p, _ := s1.numPeers() diff --git a/nomad/server.go b/nomad/server.go index ec4ca4c8ea6..948f4362ff6 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -175,13 +175,16 @@ type Server struct { // and automatic clustering within regions. serf *serf.Serf + // bootstrapped indicates if Server has bootstrapped or not. + bootstrapped *atomic.Bool + // reconcileCh is used to pass events from the serf handler // into the leader manager. Mostly used to handle when servers // join/leave from the region. reconcileCh chan serf.Member // used to track when the server is ready to serve consistent reads, updated atomically - readyForConsistentReads int32 + readyForConsistentReads *atomic.Bool // eventCh is used to receive events from the serf cluster eventCh chan serf.Event @@ -341,24 +344,26 @@ func NewServer(config *Config, consulCatalog consul.CatalogAPI, consulConfigEntr // Create the server s := &Server{ - config: config, - consulCatalog: consulCatalog, - connPool: pool.NewPool(logger, serverRPCCache, serverMaxStreams, tlsWrap), - logger: logger, - tlsWrap: tlsWrap, - rpcServer: rpc.NewServer(), - streamingRpcs: structs.NewStreamingRpcRegistry(), - nodeConns: make(map[string][]*nodeConnState), - peers: make(map[string][]*serverParts), - localPeers: make(map[raft.ServerAddress]*serverParts), - reassertLeaderCh: make(chan chan error), - reconcileCh: make(chan serf.Member, 32), - eventCh: make(chan serf.Event, 256), - evalBroker: evalBroker, - blockedEvals: NewBlockedEvals(evalBroker, logger), - rpcTLS: incomingTLS, - aclCache: aclCache, - workersEventCh: make(chan interface{}, 1), + config: config, + consulCatalog: consulCatalog, + connPool: pool.NewPool(logger, serverRPCCache, serverMaxStreams, tlsWrap), + logger: logger, + tlsWrap: tlsWrap, + rpcServer: rpc.NewServer(), + streamingRpcs: structs.NewStreamingRpcRegistry(), + nodeConns: make(map[string][]*nodeConnState), + peers: make(map[string][]*serverParts), + localPeers: make(map[raft.ServerAddress]*serverParts), + bootstrapped: &atomic.Bool{}, + reassertLeaderCh: make(chan chan error), + reconcileCh: make(chan serf.Member, 32), + readyForConsistentReads: &atomic.Bool{}, + eventCh: make(chan serf.Event, 256), + evalBroker: evalBroker, + blockedEvals: NewBlockedEvals(evalBroker, logger), + rpcTLS: incomingTLS, + aclCache: aclCache, + workersEventCh: make(chan interface{}, 1), } s.shutdownCtx, s.shutdownCancel = context.WithCancel(context.Background()) @@ -1894,17 +1899,17 @@ func (s *Server) getLeaderAcl() string { // Atomically sets a readiness state flag when leadership is obtained, to indicate that server is past its barrier write func (s *Server) setConsistentReadReady() { - atomic.StoreInt32(&s.readyForConsistentReads, 1) + s.readyForConsistentReads.Store(true) } // Atomically reset readiness state flag on leadership revoke func (s *Server) resetConsistentReadReady() { - atomic.StoreInt32(&s.readyForConsistentReads, 0) + s.readyForConsistentReads.Store(false) } // Returns true if this server is ready to serve consistent reads func (s *Server) isReadyForConsistentReads() bool { - return atomic.LoadInt32(&s.readyForConsistentReads) == 1 + return s.readyForConsistentReads.Load() } // Regions returns the known regions in the cluster. @@ -2006,7 +2011,7 @@ func (s *Server) setReplyQueryMeta(stateStore *state.StateStore, table string, r if err != nil { return err } - reply.Index = helper.Uint64Max(1, index) + reply.Index = helper.Max(1, index) // Set the query response. s.setQueryMeta(reply) diff --git a/nomad/server_setup.go b/nomad/server_setup.go new file mode 100644 index 00000000000..e56a9b7458c --- /dev/null +++ b/nomad/server_setup.go @@ -0,0 +1,31 @@ +package nomad + +import ( + "github.com/hashicorp/go-hclog" + "golang.org/x/exp/slices" +) + +// LicenseConfig allows for tunable licensing config +// primarily used for enterprise testing +type LicenseConfig struct { + // LicenseEnvBytes is the license bytes to use for the server's license + LicenseEnvBytes string + + // LicensePath is the path to use for the server's license + LicensePath string + + // AdditionalPubKeys is a set of public keys to + AdditionalPubKeys []string + + Logger hclog.InterceptLogger +} + +func (c *LicenseConfig) Copy() *LicenseConfig { + if c == nil { + return nil + } + + nc := *c + nc.AdditionalPubKeys = slices.Clone(c.AdditionalPubKeys) + return &nc +} diff --git a/nomad/server_setup_oss.go b/nomad/server_setup_oss.go index 2e08a9ea508..7abf7a41ed6 100644 --- a/nomad/server_setup_oss.go +++ b/nomad/server_setup_oss.go @@ -7,12 +7,6 @@ import ( "github.com/hashicorp/consul/agent/consul/autopilot" ) -// LicenseConfig allows for tunable licensing config -// primarily used for enterprise testing -type LicenseConfig struct { - AdditionalPubKeys []string -} - type EnterpriseState struct{} func (es *EnterpriseState) Features() uint64 { diff --git a/nomad/server_test.go b/nomad/server_test.go index 6e73e5c11c0..16f99c2c3e8 100644 --- a/nomad/server_test.go +++ b/nomad/server_test.go @@ -214,6 +214,7 @@ func TestServer_Reload_Vault(t *testing.T) { config := DefaultConfig() config.VaultConfig.Enabled = &tr config.VaultConfig.Token = uuid.Generate() + config.VaultConfig.Namespace = "nondefault" if err := s1.Reload(config); err != nil { t.Fatalf("Reload failed: %v", err) @@ -222,6 +223,10 @@ func TestServer_Reload_Vault(t *testing.T) { if !s1.vault.Running() { t.Fatalf("Vault client should be running") } + + if s1.vault.GetConfig().Namespace != "nondefault" { + t.Fatalf("Vault client did not get new namespace") + } } func connectionReset(msg string) bool { diff --git a/nomad/state/events_test.go b/nomad/state/events_test.go index 1af2cc6cc83..44a9c326ef1 100644 --- a/nomad/state/events_test.go +++ b/nomad/state/events_test.go @@ -6,7 +6,7 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -177,7 +177,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } c2 := mock.Alloc() c2.JobID = j.ID @@ -185,7 +185,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) @@ -254,7 +254,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } c2 := mock.Alloc() c2.JobID = j.ID @@ -262,7 +262,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) @@ -602,7 +602,7 @@ func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) req := &structs.AllocUpdateDesiredTransitionRequest{ Allocs: map[string]*structs.DesiredTransition{ - alloc.ID: {Migrate: helper.BoolToPtr(true)}, + alloc.ID: {Migrate: pointer.Of(true)}, }, Evals: evals, } diff --git a/nomad/state/paginator/tokenizer.go b/nomad/state/paginator/tokenizer.go index 343d7e17f67..fa5e58d5bae 100644 --- a/nomad/state/paginator/tokenizer.go +++ b/nomad/state/paginator/tokenizer.go @@ -33,27 +33,28 @@ type CreateIndexGetter interface { // StructsTokenizerOptions is the configuration provided to a StructsTokenizer. // // These are some of the common use cases: -// - Structs that can be uniquely identified with only its own ID: // -// StructsTokenizerOptions { -// WithID: true, -// } +// Structs that can be uniquely identified with only its own ID: // -// - Structs that are only unique within their namespace: +// StructsTokenizerOptions { +// WithID: true, +// } // -// StructsTokenizerOptions { -// WithID: true, -// WithNamespace: true, -// } +// Structs that are only unique within their namespace: // -// - Structs that can be sorted by their create index should also set -// `WithCreateIndex` to `true` along with the other options: +// StructsTokenizerOptions { +// WithID: true, +// WithNamespace: true, +// } // -// StructsTokenizerOptions { -// WithID: true, -// WithNamespace: true, -// WithCreateIndex: true, -// } +// Structs that can be sorted by their create index should also set +// `WithCreateIndex` to `true` along with the other options: +// +// StructsTokenizerOptions { +// WithID: true, +// WithNamespace: true, +// WithCreateIndex: true, +// } type StructsTokenizerOptions struct { WithCreateIndex bool WithNamespace bool diff --git a/nomad/state/schema.go b/nomad/state/schema.go index df16d1011aa..b8439d11b27 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -780,10 +780,80 @@ func aclPolicyTableSchema() *memdb.TableSchema { Field: "Name", }, }, + "job": { + Name: "job", + AllowMissing: true, + Unique: false, + Indexer: &ACLPolicyJobACLFieldIndex{}, + }, }, } } +// ACLPolicyJobACLFieldIndex is used to extract the policy's JobACL field and +// build an index on it. +type ACLPolicyJobACLFieldIndex struct{} + +// FromObject is used to extract an index value from an +// object or to indicate that the index value is missing. +func (a *ACLPolicyJobACLFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + policy, ok := obj.(*structs.ACLPolicy) + if !ok { + return false, nil, fmt.Errorf("object %#v is not an ACLPolicy", obj) + } + + if policy.JobACL == nil { + return false, nil, nil + } + + ns := policy.JobACL.Namespace + if ns == "" { + return false, nil, nil + } + jobID := policy.JobACL.JobID + if jobID == "" { + return false, nil, fmt.Errorf( + "object %#v is not a valid ACLPolicy: JobACL.JobID without Namespace", obj) + } + + val := ns + "\x00" + jobID + "\x00" + return true, []byte(val), nil +} + +// FromArgs is used to build an exact index lookup based on arguments +func (a *ACLPolicyJobACLFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 2 { + return nil, fmt.Errorf("must provide two arguments") + } + arg0, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + arg1, ok := args[1].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + + // Add the null character as a terminator + arg0 += "\x00" + arg1 + "\x00" + return []byte(arg0), nil +} + +// PrefixFromArgs returns a prefix that should be used for scanning based on the arguments +func (a *ACLPolicyJobACLFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := a.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + // aclTokenTableSchema returns the MemDB schema for the tokens table. // This table is used to store the bearer tokens which are used to authenticate func aclTokenTableSchema() *memdb.TableSchema { diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 5a3d6028cfc..23f28cf1224 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" ) @@ -3488,7 +3488,7 @@ func (s *StateStore) nestedUpdateAllocFromClient(txn *txn, index uint64, alloc * // We got new health information from the client if newHasHealthy && (!oldHasHealthy || *copyAlloc.DeploymentStatus.Healthy != *alloc.DeploymentStatus.Healthy) { // Updated deployment health and timestamp - copyAlloc.DeploymentStatus.Healthy = helper.BoolToPtr(*alloc.DeploymentStatus.Healthy) + copyAlloc.DeploymentStatus.Healthy = pointer.Of(*alloc.DeploymentStatus.Healthy) copyAlloc.DeploymentStatus.Timestamp = alloc.DeploymentStatus.Timestamp copyAlloc.DeploymentStatus.ModifyIndex = index } @@ -4564,7 +4564,7 @@ func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, in if copy.DeploymentStatus == nil { copy.DeploymentStatus = &structs.AllocDeploymentStatus{} } - copy.DeploymentStatus.Healthy = helper.BoolToPtr(healthy) + copy.DeploymentStatus.Healthy = pointer.Of(healthy) copy.DeploymentStatus.Timestamp = ts copy.DeploymentStatus.ModifyIndex = index copy.ModifyIndex = index @@ -5570,6 +5570,20 @@ func (s *StateStore) ACLPolicyByNamePrefix(ws memdb.WatchSet, prefix string) (me return iter, nil } +// ACLPolicyByJob is used to lookup policies that have been attached to a +// specific job +func (s *StateStore) ACLPolicyByJob(ws memdb.WatchSet, ns, jobID string) (memdb.ResultIterator, error) { + txn := s.db.ReadTxn() + + iter, err := txn.Get("acl_policy", "job_prefix", ns, jobID) + if err != nil { + return nil, fmt.Errorf("acl policy lookup failed: %v", err) + } + ws.Add(iter.WatchCh()) + + return iter, nil +} + // ACLPolicies returns an iterator over all the acl policies func (s *StateStore) ACLPolicies(ws memdb.WatchSet) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() diff --git a/nomad/state/state_store_secure_variables.go b/nomad/state/state_store_secure_variables.go index 281c6a6f301..9fbc7e64a41 100644 --- a/nomad/state/state_store_secure_variables.go +++ b/nomad/state/state_store_secure_variables.go @@ -2,7 +2,7 @@ package state import ( "fmt" - "time" + "math" "github.com/hashicorp/go-memdb" @@ -99,10 +99,11 @@ func (s *StateStore) GetSecureVariable( txn := s.db.ReadTxn() // Try to fetch the secure variable. - raw, err := txn.First(TableSecureVariables, indexID, namespace, path) + watchCh, raw, err := txn.FirstWatch(TableSecureVariables, indexID, namespace, path) if err != nil { // error during fetch return nil, fmt.Errorf("secure variable lookup failed: %v", err) } + ws.Add(watchCh) if raw == nil { // not found return nil, nil } @@ -111,178 +112,361 @@ func (s *StateStore) GetSecureVariable( return sv, nil } -func (s *StateStore) UpsertSecureVariables(msgType structs.MessageType, index uint64, svs []*structs.SecureVariableEncrypted) error { - txn := s.db.WriteTxn(index) - defer txn.Abort() +// SVESet is used to store a secure variable object. +func (s *StateStore) SVESet(idx uint64, sv *structs.SVApplyStateRequest) *structs.SVApplyStateResponse { + tx := s.db.WriteTxn(idx) + defer tx.Abort() - var updated bool = false - for _, sv := range svs { - if err := s.upsertSecureVariableImpl(index, txn, sv, &updated); err != nil { - return err - } + // Perform the actual set. + resp := s.svSetTxn(tx, idx, sv) + if resp.IsError() { + return resp } - if !updated { - return nil + if err := tx.Commit(); err != nil { + return sv.ErrorResponse(idx, err) } + return resp +} - if err := txn.Insert(tableIndex, &IndexEntry{TableSecureVariables, index}); err != nil { - return fmt.Errorf("index update failed: %v", err) +// SVESetCAS is used to do a check-and-set operation on a secure +// variable. The ModifyIndex in the provided entry is used to determine if +// we should write the entry to the state store or not. +func (s *StateStore) SVESetCAS(idx uint64, sv *structs.SVApplyStateRequest) *structs.SVApplyStateResponse { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + resp := s.svSetCASTxn(tx, idx, sv) + if resp.IsError() || resp.IsConflict() { + return resp } - return txn.Commit() + if err := tx.Commit(); err != nil { + return sv.ErrorResponse(idx, err) + } + return resp } -// upsertSecureVariableImpl is used to upsert a secure variable -func (s *StateStore) upsertSecureVariableImpl(index uint64, txn *txn, sv *structs.SecureVariableEncrypted, updated *bool) error { - // Check if the secure variable already exists - existing, err := txn.First(TableSecureVariables, indexID, sv.Namespace, sv.Path) +// svSetCASTxn is the inner method used to do a CAS inside an existing +// transaction. +func (s *StateStore) svSetCASTxn(tx WriteTxn, idx uint64, req *structs.SVApplyStateRequest) *structs.SVApplyStateResponse { + sv := req.Var + raw, err := tx.First(TableSecureVariables, indexID, sv.Namespace, sv.Path) if err != nil { - return fmt.Errorf("secure variable lookup failed: %v", err) + return req.ErrorResponse(idx, fmt.Errorf("failed sve lookup: %s", err)) } + svEx, ok := raw.(*structs.SecureVariableEncrypted) - existingQuota, err := txn.First(TableSecureVariablesQuotas, indexID, sv.Namespace) + // ModifyIndex of 0 means that we are doing a set-if-not-exists. + if sv.ModifyIndex == 0 && raw != nil { + return req.ConflictResponse(idx, svEx) + } + + // If the ModifyIndex is set but the variable doesn't exist, return a + // plausible zero value as the conflict + if sv.ModifyIndex != 0 && raw == nil { + zeroVal := &structs.SecureVariableEncrypted{ + SecureVariableMetadata: structs.SecureVariableMetadata{ + Namespace: sv.Namespace, + Path: sv.Path, + }, + } + return req.ConflictResponse(idx, zeroVal) + } + + // If the existing index does not match the provided CAS index arg, then we + // shouldn't update anything and can safely return early here. + if ok && sv.ModifyIndex != svEx.ModifyIndex { + return req.ConflictResponse(idx, svEx) + } + + // If we made it this far, we should perform the set. + return s.svSetTxn(tx, idx, req) +} + +// svSetTxn is used to insert or update a secure variable in the state +// store. It is the inner method used and handles only the actual storage. +func (s *StateStore) svSetTxn(tx WriteTxn, idx uint64, req *structs.SVApplyStateRequest) *structs.SVApplyStateResponse { + sv := req.Var + existingRaw, err := tx.First(TableSecureVariables, indexID, sv.Namespace, sv.Path) + if err != nil { + return req.ErrorResponse(idx, fmt.Errorf("failed sve lookup: %s", err)) + } + existing, _ := existingRaw.(*structs.SecureVariableEncrypted) + + existingQuota, err := tx.First(TableSecureVariablesQuotas, indexID, sv.Namespace) if err != nil { - return fmt.Errorf("secure variable quota lookup failed: %v", err) + return req.ErrorResponse(idx, fmt.Errorf("secure variable quota lookup failed: %v", err)) } - var quotaChange int + var quotaChange int64 - // Setup the indexes correctly - nowNano := time.Now().UnixNano() + // Set the CreateIndex and CreateTime if existing != nil { - exist := existing.(*structs.SecureVariableEncrypted) - if !shouldWrite(sv, exist) { - *updated = false - return nil + sv.CreateIndex = existing.CreateIndex + sv.CreateTime = existing.CreateTime + + if existing.Equals(*sv) { + // Skip further writing in the state store if the entry is not actually + // changed. Nevertheless, the input's ModifyIndex should be reset + // since the TXN API returns a copy in the response. + sv.ModifyIndex = existing.ModifyIndex + sv.ModifyTime = existing.ModifyTime + return req.SuccessResponse(idx, nil) } - sv.CreateIndex = exist.CreateIndex - sv.CreateTime = exist.CreateTime - sv.ModifyIndex = index - sv.ModifyTime = nowNano - quotaChange = len(sv.Data) - len(exist.Data) + sv.ModifyIndex = idx + quotaChange = int64(len(sv.Data) - len(existing.Data)) } else { - sv.CreateIndex = index - sv.CreateTime = nowNano - sv.ModifyIndex = index - sv.ModifyTime = nowNano - quotaChange = len(sv.Data) + sv.CreateIndex = idx + sv.ModifyIndex = idx + quotaChange = int64(len(sv.Data)) } - // Insert the secure variable - if err := txn.Insert(TableSecureVariables, sv); err != nil { - return fmt.Errorf("secure variable insert failed: %v", err) + if err := tx.Insert(TableSecureVariables, sv); err != nil { + return req.ErrorResponse(idx, fmt.Errorf("failed inserting secure variable: %s", err)) } - if quotaChange != 0 { - // Track quota usage - var quotaUsed *structs.SecureVariablesQuota - if existingQuota != nil { - quotaUsed = existingQuota.(*structs.SecureVariablesQuota) - quotaUsed = quotaUsed.Copy() - } else { - quotaUsed = &structs.SecureVariablesQuota{ - Namespace: sv.Namespace, - CreateIndex: index, - } - } - quotaUsed.ModifyIndex = index - if quotaChange > 0 { - quotaUsed.Size += uint64(quotaChange) - } else { - quotaUsed.Size -= uint64(helper.MinInt(int(quotaUsed.Size), -quotaChange)) + // Track quota usage + var quotaUsed *structs.SecureVariablesQuota + if existingQuota != nil { + quotaUsed = existingQuota.(*structs.SecureVariablesQuota) + quotaUsed = quotaUsed.Copy() + } else { + quotaUsed = &structs.SecureVariablesQuota{ + Namespace: sv.Namespace, + CreateIndex: idx, } - if err := txn.Insert(TableSecureVariablesQuotas, quotaUsed); err != nil { - return fmt.Errorf("secure variable quota insert failed: %v", err) + } + + if quotaChange > math.MaxInt64-quotaUsed.Size { + // this limit is actually shared across all namespaces in the region's + // quota (if there is one), but we need this check here to prevent + // overflow as well + return req.ErrorResponse(idx, fmt.Errorf("secure variables can store a maximum of %d bytes of encrypted data per namespace", math.MaxInt)) + } + + if quotaChange > 0 { + quotaUsed.Size += quotaChange + } else if quotaChange < 0 { + quotaUsed.Size -= helper.Min(quotaUsed.Size, -quotaChange) + } + + err = s.enforceSecureVariablesQuota(idx, tx, sv.Namespace, quotaChange) + if err != nil { + return req.ErrorResponse(idx, err) + } + + // we check enforcement above even if there's no change because another + // namespace may have used up quota to make this no longer valid, but we + // only update the table if this namespace has changed + if quotaChange != 0 { + quotaUsed.ModifyIndex = idx + if err := tx.Insert(TableSecureVariablesQuotas, quotaUsed); err != nil { + return req.ErrorResponse(idx, fmt.Errorf("secure variable quota insert failed: %v", err)) } } - *updated = true - return nil + if err := tx.Insert(tableIndex, + &IndexEntry{TableSecureVariables, idx}); err != nil { + return req.ErrorResponse(idx, fmt.Errorf("failed updating secure variable index: %s", err)) + } + + return req.SuccessResponse(idx, &sv.SecureVariableMetadata) +} + +// SVEGet is used to retrieve a key/value pair from the state store. +func (s *StateStore) SVEGet(ws memdb.WatchSet, namespace, path string) (uint64, *structs.SecureVariableEncrypted, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + return svGetTxn(tx, ws, namespace, path) } -// shouldWrite can be used to determine if a write needs to happen. -func shouldWrite(sv, existing *structs.SecureVariableEncrypted) bool { - // FIXME: Move this to the RPC layer eventually. - if existing == nil { - return true +// svGetTxn is the inner method that gets a secure variable inside an existing +// transaction. +func svGetTxn(tx ReadTxn, + ws memdb.WatchSet, namespace, path string) (uint64, *structs.SecureVariableEncrypted, error) { + + // Get the table index. + idx := svMaxIndex(tx) + + watchCh, entry, err := tx.FirstWatch(TableSecureVariables, indexID, namespace, path) + if err != nil { + return 0, nil, fmt.Errorf("failed secure variable lookup: %s", err) } - if sv.Equals(*existing) { - return false + ws.Add(watchCh) + if entry != nil { + return idx, entry.(*structs.SecureVariableEncrypted), nil } - return true + return idx, nil, nil } -func (s *StateStore) DeleteSecureVariables(msgType structs.MessageType, index uint64, namespace string, paths []string) error { - txn := s.db.WriteTxn(index) - defer txn.Abort() +// SVEDelete is used to delete a single secure variable in the +// the state store. +func (s *StateStore) SVEDelete(idx uint64, req *structs.SVApplyStateRequest) *structs.SVApplyStateResponse { + tx := s.db.WriteTxn(idx) + defer tx.Abort() - err := s.DeleteSecureVariablesTxn(index, namespace, paths, txn) - if err == nil { - return txn.Commit() + // Perform the actual delete + resp := s.svDeleteTxn(tx, idx, req) + if !resp.IsOk() { + return resp } - return err -} -func (s *StateStore) DeleteSecureVariablesTxn(index uint64, namespace string, paths []string, txn Txn) error { - for _, path := range paths { - err := s.DeleteSecureVariableTxn(index, namespace, path, txn) - if err != nil { - return err - } + err := tx.Commit() + if err != nil { + return req.ErrorResponse(idx, err) } - return nil + + return resp } -// DeleteSecureVariable is used to delete a single secure variable -func (s *StateStore) DeleteSecureVariable(index uint64, namespace, path string) error { - txn := s.db.WriteTxn(index) - defer txn.Abort() +// SVEDeleteCAS is used to conditionally delete a secure +// variable if and only if it has a given modify index. If the CAS +// index (cidx) specified is not equal to the last observed index for +// the given variable, then the call is a noop, otherwise a normal +// delete is invoked. +func (s *StateStore) SVEDeleteCAS(idx uint64, req *structs.SVApplyStateRequest) *structs.SVApplyStateResponse { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + resp := s.svDeleteCASTxn(tx, idx, req) + if !resp.IsOk() { + return resp + } - err := s.DeleteSecureVariableTxn(index, namespace, path, txn) - if err == nil { - return txn.Commit() + err := tx.Commit() + if err != nil { + return req.ErrorResponse(idx, err) } - return err + + return resp } -// DeleteSecureVariableTxn is used to delete the secure variable, like DeleteSecureVariable -// but in a transaction. Useful for when making multiple modifications atomically -func (s *StateStore) DeleteSecureVariableTxn(index uint64, namespace, path string, txn Txn) error { - // Lookup the variable - existing, err := txn.First(TableSecureVariables, indexID, namespace, path) +// svDeleteCASTxn is an inner method used to check the existing value +// of a secure variable within an existing transaction as part of a +// conditional delete. +func (s *StateStore) svDeleteCASTxn(tx WriteTxn, idx uint64, req *structs.SVApplyStateRequest) *structs.SVApplyStateResponse { + sv := req.Var + raw, err := tx.First(TableSecureVariables, indexID, sv.Namespace, sv.Path) if err != nil { - return fmt.Errorf("secure variable lookup failed: %v", err) + return req.ErrorResponse(idx, fmt.Errorf("failed secure variable lookup: %s", err)) } - if existing == nil { - return fmt.Errorf("secure variable not found") + + svEx, ok := raw.(*structs.SecureVariableEncrypted) + + // ModifyIndex of 0 means that we are doing a delete-if-not-exists. + if sv.ModifyIndex == 0 && raw != nil { + return req.ConflictResponse(idx, svEx) } - existingQuota, err := txn.First(TableSecureVariablesQuotas, indexID, namespace) - if err != nil { - return fmt.Errorf("secure variable quota lookup failed: %v", err) + + // If the ModifyIndex is set but the variable doesn't exist, return a + // plausible zero value as the conflict + if sv.ModifyIndex != 0 && raw == nil { + zeroVal := &structs.SecureVariableEncrypted{ + SecureVariableMetadata: structs.SecureVariableMetadata{ + Namespace: sv.Namespace, + Path: sv.Path, + }, + } + return req.ConflictResponse(idx, zeroVal) } - // Delete the variable - if err := txn.Delete(TableSecureVariables, existing); err != nil { - return fmt.Errorf("secure variable delete failed: %v", err) + // If the existing index does not match the provided CAS index arg, then we + // shouldn't update anything and can safely return early here. + if !ok || sv.ModifyIndex != svEx.ModifyIndex { + return req.ConflictResponse(idx, svEx) + } + + // Call the actual deletion if the above passed. + return s.svDeleteTxn(tx, idx, req) +} + +// svDeleteTxn is the inner method used to perform the actual deletion +// of a secure variable within an existing transaction. +func (s *StateStore) svDeleteTxn(tx WriteTxn, idx uint64, req *structs.SVApplyStateRequest) *structs.SVApplyStateResponse { + + // Look up the entry in the state store. + existingRaw, err := tx.First(TableSecureVariables, indexID, req.Var.Namespace, req.Var.Path) + if err != nil { + return req.ErrorResponse(idx, fmt.Errorf("failed secure variable lookup: %s", err)) } - if err := txn.Insert("index", &IndexEntry{TableSecureVariables, index}); err != nil { - return fmt.Errorf("index update failed: %v", err) + if existingRaw == nil { + return req.SuccessResponse(idx, nil) } + existingQuota, err := tx.First(TableSecureVariablesQuotas, indexID, req.Var.Namespace) + if err != nil { + return req.ErrorResponse(idx, fmt.Errorf("secure variable quota lookup failed: %v", err)) + } + + sv := existingRaw.(*structs.SecureVariableEncrypted) + // Track quota usage if existingQuota != nil { quotaUsed := existingQuota.(*structs.SecureVariablesQuota) quotaUsed = quotaUsed.Copy() - sv := existing.(*structs.SecureVariableEncrypted) - quotaUsed.Size -= uint64(len(sv.Data)) - quotaUsed.ModifyIndex = index - if err := txn.Insert(TableSecureVariablesQuotas, quotaUsed); err != nil { - return fmt.Errorf("secure variable quota insert failed: %v", err) + quotaUsed.Size -= helper.Min(quotaUsed.Size, int64(len(sv.Data))) + quotaUsed.ModifyIndex = idx + if err := tx.Insert(TableSecureVariablesQuotas, quotaUsed); err != nil { + return req.ErrorResponse(idx, fmt.Errorf("secure variable quota insert failed: %v", err)) } } - return nil + // Delete the secure variable and update the index table. + if err := tx.Delete(TableSecureVariables, sv); err != nil { + return req.ErrorResponse(idx, fmt.Errorf("failed deleting secure variable entry: %s", err)) + } + + if err := tx.Insert(tableIndex, &IndexEntry{TableSecureVariables, idx}); err != nil { + return req.ErrorResponse(idx, fmt.Errorf("failed updating secure variable index: %s", err)) + } + + return req.SuccessResponse(idx, nil) +} + +// This extra indirection is to facilitate the tombstone case if it matters. +func svMaxIndex(tx ReadTxn) uint64 { + return maxIndexTxn(tx, TableSecureVariables) +} + +// WriteTxn is implemented by memdb.Txn to perform write operations. +type WriteTxn interface { + ReadTxn + Defer(func()) + Delete(table string, obj interface{}) error + DeleteAll(table, index string, args ...interface{}) (int, error) + DeletePrefix(table string, index string, prefix string) (bool, error) + Insert(table string, obj interface{}) error +} + +// maxIndex is a helper used to retrieve the highest known index +// amongst a set of tables in the db. +func (s *StateStore) maxIndex(tables ...string) uint64 { + tx := s.db.ReadTxn() + defer tx.Abort() + return maxIndexTxn(tx, tables...) +} + +// maxIndexTxn is a helper used to retrieve the highest known index +// amongst a set of tables in the db. +func maxIndexTxn(tx ReadTxn, tables ...string) uint64 { + return maxIndexWatchTxn(tx, nil, tables...) +} + +func maxIndexWatchTxn(tx ReadTxn, ws memdb.WatchSet, tables ...string) uint64 { + var lindex uint64 + for _, table := range tables { + ch, ti, err := tx.FirstWatch(tableIndex, "id", table) + if err != nil { + panic(fmt.Sprintf("unknown index: %s err: %s", table, err)) + } + if idx, ok := ti.(*IndexEntry); ok && idx.Value > lindex { + lindex = idx.Value + } + ws.Add(ch) + } + return lindex } // SecureVariablesQuotas queries all the quotas and is used only for diff --git a/nomad/state/state_store_secure_variables_oss.go b/nomad/state/state_store_secure_variables_oss.go new file mode 100644 index 00000000000..f730fec6b09 --- /dev/null +++ b/nomad/state/state_store_secure_variables_oss.go @@ -0,0 +1,8 @@ +//go:build !ent +// +build !ent + +package state + +func (s *StateStore) enforceSecureVariablesQuota(_ uint64, _ WriteTxn, _ string, _ int64) error { + return nil +} diff --git a/nomad/state/state_store_secure_variables_test.go b/nomad/state/state_store_secure_variables_test.go index bd4c5388286..4b0f00cc77b 100644 --- a/nomad/state/state_store_secure_variables_test.go +++ b/nomad/state/state_store_secure_variables_test.go @@ -2,18 +2,17 @@ package state import ( "encoding/json" - "fmt" "sort" "strings" "testing" - "time" memdb "github.com/hashicorp/go-memdb" + "github.com/stretchr/testify/require" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/require" ) func TestStateStore_GetSecureVariable(t *testing.T) { @@ -30,85 +29,100 @@ func TestStateStore_UpsertSecureVariables(t *testing.T) { testState := testStateStore(t) ws := memdb.NewWatchSet() - svs, svm := mockSecureVariables(2) - t.Log(printSecureVariables(svs)) + svs := []*structs.SecureVariableEncrypted{ + mock.SecureVariableEncrypted(), + mock.SecureVariableEncrypted(), + } + svs[0].Path = "aaaaa" + svs[1].Path = "bbbbb" + insertIndex := uint64(20) - var expectedQuotaSize uint64 + var expectedQuotaSize int for _, v := range svs { - expectedQuotaSize += uint64(len(v.Data)) + expectedQuotaSize += len(v.Data) } // Ensure new secure variables are inserted as expected with their // correct indexes, along with an update to the index table. t.Run("1 create new variables", func(t *testing.T) { - // Perform the initial upsert of secure variables. - err := testState.UpsertSecureVariables(structs.MsgTypeTestSetup, insertIndex, svs) - require.NoError(t, err) + for _, sv := range svs { + insertIndex++ + resp := testState.SVESet(insertIndex, &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sv, + }) + require.NoError(t, resp.Error) + } // Check that the index for the table was modified as expected. initialIndex, err := testState.Index(TableSecureVariables) require.NoError(t, err) require.Equal(t, insertIndex, initialIndex) - // List all the secure variables in the table, so we can perform a - // number of tests on the return array. - + // List all the secure variables in the table iter, err := testState.SecureVariables(ws) require.NoError(t, err) - // Count how many table entries we have, to ensure it is the expected - // number. - var count int - + got := []*structs.SecureVariableEncrypted{} for raw := iter.Next(); raw != nil; raw = iter.Next() { - count++ - - // Ensure the create and modify indexes are populated correctly. sv := raw.(*structs.SecureVariableEncrypted) - require.Equal(t, insertIndex, sv.CreateIndex, "incorrect create index", sv.Path) - require.Equal(t, insertIndex, sv.ModifyIndex, "incorrect modify index", sv.Path) - // update the mock element so the test element has the correct create/modify - // indexes and times now that we have validated them - nv := sv.Copy() - svm[sv.Path] = &nv + var svCopy structs.SecureVariableEncrypted + svCopy = sv.Copy() + got = append(got, &svCopy) } - require.Equal(t, len(svs), count, "incorrect number of secure variables found") + require.Len(t, got, 2, "incorrect number of secure variables found") + + // Ensure the create and modify indexes are populated correctly. + require.Equal(t, uint64(21), got[0].CreateIndex, "%s: incorrect create index", got[0].Path) + require.Equal(t, uint64(21), got[0].ModifyIndex, "%s: incorrect modify index", got[0].Path) + require.Equal(t, uint64(22), got[1].CreateIndex, "%s: incorrect create index", got[1].Path) + require.Equal(t, uint64(22), got[1].ModifyIndex, "%s: incorrect modify index", got[1].Path) quotaUsed, err := testState.SecureVariablesQuotaByNamespace(ws, structs.DefaultNamespace) require.NoError(t, err) - require.Equal(t, expectedQuotaSize, quotaUsed.Size) + require.Equal(t, int64(expectedQuotaSize), quotaUsed.Size) + + // update the mocks so the test element has the correct create/modify + // indexes and times now that we have validated them + svs = got }) - svs = svm.List() - t.Log(printSecureVariables(svs)) t.Run("1a fetch variable", func(t *testing.T) { sve, err := testState.GetSecureVariable(ws, svs[0].Namespace, svs[0].Path) require.NoError(t, err) require.NotNil(t, sve) }) - // Upsert the exact same secure variables without any - // modification. In this case, the index table should not be - // updated, indicating no write actually happened due to equality - // checking. + // Upsert the exact same secure variables without any modification. In this + // case, the index table should not be updated, indicating no write actually + // happened due to equality checking. t.Run("2 upsert same", func(t *testing.T) { reInsertIndex := uint64(30) - require.NoError(t, testState.UpsertSecureVariables(structs.MsgTypeTestSetup, reInsertIndex, svs)) + + for _, sv := range svs { + svReq := &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sv, + } + reInsertIndex++ + resp := testState.SVESet(reInsertIndex, svReq) + require.NoError(t, resp.Error) + } + reInsertActualIndex, err := testState.Index(TableSecureVariables) require.NoError(t, err) require.Equal(t, insertIndex, reInsertActualIndex, "index should not have changed") quotaUsed, err := testState.SecureVariablesQuotaByNamespace(ws, structs.DefaultNamespace) require.NoError(t, err) - require.Equal(t, expectedQuotaSize, quotaUsed.Size) + require.Equal(t, int64(expectedQuotaSize), quotaUsed.Size) }) - // Modify a single one of the previously inserted secure variables - // and performs an upsert. This ensures the index table is - // modified correctly and that each secure variable is updated, or - // not, as expected. + // Modify a single one of the previously inserted secure variables and + // performs an upsert. This ensures the index table is modified correctly + // and that each secure variable is updated, or not, as expected. t.Run("3 modify one", func(t *testing.T) { sv1Update := svs[0].Copy() sv1Update.KeyID = "sv1-update" @@ -118,10 +132,13 @@ func TestStateStore_UpsertSecureVariables(t *testing.T) { buf[len(buf)-1] = 'x' sv1Update.Data = buf - svs1Update := []*structs.SecureVariableEncrypted{&sv1Update} - update1Index := uint64(40) - require.NoError(t, testState.UpsertSecureVariables(structs.MsgTypeTestSetup, update1Index, svs1Update)) + + resp := testState.SVESet(update1Index, &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: &sv1Update, + }) + require.NoError(t, resp.Error) // Check that the index for the table was modified as expected. updateActualIndex, err := testState.Index(TableSecureVariables) @@ -132,38 +149,28 @@ func TestStateStore_UpsertSecureVariables(t *testing.T) { iter, err := testState.SecureVariables(ws) require.NoError(t, err) - // Iterate all the stored variables and assert they are as expected. + got := []*structs.SecureVariableEncrypted{} + + // Iterate all the stored variables and assert indexes have been updated as expected for raw := iter.Next(); raw != nil; raw = iter.Next() { sv := raw.(*structs.SecureVariableEncrypted) - t.Logf("S " + printSecureVariable(sv)) - - var expectedModifyIndex uint64 - - switch sv.Path { - case sv1Update.Path: - expectedModifyIndex = update1Index - case svs[1].Path: - expectedModifyIndex = insertIndex - default: - t.Errorf("unknown secure variable found: %s", sv.Path) - continue - } - require.Equal(t, insertIndex, sv.CreateIndex, "incorrect create index", sv.Path) - require.Equal(t, expectedModifyIndex, sv.ModifyIndex, "incorrect modify index", sv.Path) - // update the mock element so the test element has the correct create/modify - // indexes and times now that we have validated them - nv := sv.Copy() - svm[sv.Path] = &nv + var svCopy structs.SecureVariableEncrypted + svCopy = sv.Copy() + got = append(got, &svCopy) } + require.Len(t, got, 2) + require.Equal(t, update1Index, got[0].ModifyIndex) + require.Equal(t, insertIndex, got[1].ModifyIndex) + + // update the mocks so the test element has the correct create/modify + // indexes and times now that we have validated them + svs = got quotaUsed, err := testState.SecureVariablesQuotaByNamespace(ws, structs.DefaultNamespace) require.NoError(t, err) - require.Equal(t, expectedQuotaSize+1, quotaUsed.Size) + require.Equal(t, int64(expectedQuotaSize+1), quotaUsed.Size) }) - svs = svm.List() - t.Log(printSecureVariables(svs)) - // Modify the second variable but send an upsert request that // includes this and the already modified variable. t.Run("4 upsert other", func(t *testing.T) { @@ -171,11 +178,12 @@ func TestStateStore_UpsertSecureVariables(t *testing.T) { sv2 := svs[1].Copy() sv2.KeyID = "sv2-update" sv2.ModifyIndex = update2Index - svs2Update := []*structs.SecureVariableEncrypted{svs[0], &sv2} - t.Logf("* " + printSecureVariable(svs[0])) - t.Logf("* " + printSecureVariable(&sv2)) - require.NoError(t, testState.UpsertSecureVariables(structs.MsgTypeTestSetup, update2Index, svs2Update)) + resp := testState.SVESet(update2Index, &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: &sv2, + }) + require.NoError(t, resp.Error) // Check that the index for the table was modified as expected. update2ActualIndex, err := testState.Index(TableSecureVariables) @@ -186,40 +194,23 @@ func TestStateStore_UpsertSecureVariables(t *testing.T) { iter, err := testState.SecureVariables(ws) require.NoError(t, err) - // Iterate all the stored variables and assert they are as expected. + got := []structs.SecureVariableEncrypted{} + + // Iterate all the stored variables and assert indexes have been updated as expected for raw := iter.Next(); raw != nil; raw = iter.Next() { sv := raw.(*structs.SecureVariableEncrypted) - t.Logf("S " + printSecureVariable(sv)) - - var ( - expectedModifyIndex uint64 - expectedSV *structs.SecureVariableEncrypted - ) - - switch sv.Path { - case sv2.Path: - expectedModifyIndex = update2Index - expectedSV = &sv2 - case svs[0].Path: - expectedModifyIndex = svs[0].ModifyIndex - expectedSV = svs[0] - default: - t.Errorf("unknown secure variable found: %s", sv.Path) - continue - } - require.Equal(t, insertIndex, sv.CreateIndex, "%s: incorrect create index", sv.Path) - require.Equal(t, expectedModifyIndex, sv.ModifyIndex, "%s: incorrect modify index", sv.Path) - - // update the mock element so the test element has the correct create/modify - // indexes and times now that we have validated them - expectedSV.ModifyTime = sv.ModifyTime - - require.True(t, expectedSV.Equals(*sv), "Secure Variables are not equal:\n expected:%s\n received:%s\n", printSecureVariable(expectedSV), printSecureVariable(sv)) + got = append(got, sv.Copy()) } + require.Len(t, got, 2) + require.Equal(t, svs[0].ModifyIndex, got[0].ModifyIndex) + require.Equal(t, update2Index, got[1].ModifyIndex) + + require.True(t, svs[0].Equals(got[0])) + require.True(t, sv2.Equals(got[1])) quotaUsed, err := testState.SecureVariablesQuotaByNamespace(ws, structs.DefaultNamespace) require.NoError(t, err) - require.Equal(t, expectedQuotaSize+1, quotaUsed.Size) + require.Equal(t, int64(expectedQuotaSize+1), quotaUsed.Size) }) } @@ -229,13 +220,22 @@ func TestStateStore_DeleteSecureVariable(t *testing.T) { testState := testStateStore(t) // Generate some test secure variables that we will use and modify throughout. - svs, _ := mockSecureVariables(2) + svs := []*structs.SecureVariableEncrypted{ + mock.SecureVariableEncrypted(), + mock.SecureVariableEncrypted(), + } + svs[0].Path = "aaaaa" + svs[1].Path = "bbbbb" + initialIndex := uint64(10) t.Run("1 delete a secure variable that does not exist", func(t *testing.T) { - err := testState.DeleteSecureVariables( - structs.MsgTypeTestSetup, initialIndex, svs[0].Namespace, []string{svs[0].Path}) - require.EqualError(t, err, "secure variable not found") + + resp := testState.SVEDelete(initialIndex, &structs.SVApplyStateRequest{ + Op: structs.SVOpDelete, + Var: svs[0], + }) + require.NoError(t, resp.Error, "deleting non-existing secure var is not an error") actualInitialIndex, err := testState.Index(TableSecureVariables) require.NoError(t, err) @@ -250,13 +250,28 @@ func TestStateStore_DeleteSecureVariable(t *testing.T) { // remaining is left as expected. t.Run("2 upsert variable and delete", func(t *testing.T) { - require.NoError(t, testState.UpsertSecureVariables( - structs.MsgTypeTestSetup, initialIndex, svs)) + ns := mock.Namespace() + ns.Name = svs[0].Namespace + require.NoError(t, testState.UpsertNamespaces(initialIndex, []*structs.Namespace{ns})) + + for _, sv := range svs { + svReq := &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sv, + } + initialIndex++ + resp := testState.SVESet(initialIndex, svReq) + require.NoError(t, resp.Error) + } // Perform the delete. delete1Index := uint64(20) - require.NoError(t, testState.DeleteSecureVariables( - structs.MsgTypeTestSetup, delete1Index, svs[0].Namespace, []string{svs[0].Path})) + + resp := testState.SVEDelete(delete1Index, &structs.SVApplyStateRequest{ + Op: structs.SVOpDelete, + Var: svs[0], + }) + require.NoError(t, resp.Error) // Check that the index for the table was modified as expected. actualDelete1Index, err := testState.Index(TableSecureVariables) @@ -270,25 +285,29 @@ func TestStateStore_DeleteSecureVariable(t *testing.T) { require.NoError(t, err) var delete1Count int - var expectedQuotaSize uint64 + var expectedQuotaSize int // Iterate all the stored variables and assert we have the expected // number. for raw := iter.Next(); raw != nil; raw = iter.Next() { delete1Count++ v := raw.(*structs.SecureVariableEncrypted) - expectedQuotaSize += uint64(len(v.Data)) + expectedQuotaSize += len(v.Data) } require.Equal(t, 1, delete1Count, "unexpected number of variables in table") quotaUsed, err := testState.SecureVariablesQuotaByNamespace(ws, structs.DefaultNamespace) require.NoError(t, err) - require.Equal(t, expectedQuotaSize, quotaUsed.Size) + require.Equal(t, int64(expectedQuotaSize), quotaUsed.Size) }) t.Run("3 delete remaining variable", func(t *testing.T) { delete2Index := uint64(30) - require.NoError(t, testState.DeleteSecureVariable( - delete2Index, svs[1].Namespace, svs[1].Path)) + + resp := testState.SVEDelete(delete2Index, &structs.SVApplyStateRequest{ + Op: structs.SVOpDelete, + Var: svs[1], + }) + require.NoError(t, resp.Error) // Check that the index for the table was modified as expected. actualDelete2Index, err := testState.Index(TableSecureVariables) @@ -310,7 +329,7 @@ func TestStateStore_DeleteSecureVariable(t *testing.T) { quotaUsed, err := testState.SecureVariablesQuotaByNamespace(ws, structs.DefaultNamespace) require.NoError(t, err) - require.Equal(t, uint64(0), quotaUsed.Size) + require.Equal(t, int64(0), quotaUsed.Size) }) } @@ -318,11 +337,29 @@ func TestStateStore_GetSecureVariables(t *testing.T) { ci.Parallel(t) testState := testStateStore(t) - // Generate some test secure variables and upsert them. - svs, _ := mockSecureVariables(2) - svs[0].Namespace = "~*magical*~" + ns := mock.Namespace() + ns.Name = "~*magical*~" initialIndex := uint64(10) - require.NoError(t, testState.UpsertSecureVariables(structs.MsgTypeTestSetup, initialIndex, svs)) + require.NoError(t, testState.UpsertNamespaces(initialIndex, []*structs.Namespace{ns})) + + // Generate some test secure variables in different namespaces and upsert them. + svs := []*structs.SecureVariableEncrypted{ + mock.SecureVariableEncrypted(), + mock.SecureVariableEncrypted(), + } + svs[0].Path = "aaaaa" + svs[0].Namespace = "~*magical*~" + svs[1].Path = "bbbbb" + + for _, sv := range svs { + svReq := &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sv, + } + initialIndex++ + resp := testState.SVESet(initialIndex, svReq) + require.NoError(t, resp.Error) + } // Look up secure variables using the namespace of the first mock variable. ws := memdb.NewWatchSet() @@ -332,13 +369,13 @@ func TestStateStore_GetSecureVariables(t *testing.T) { var count1 int for raw := iter.Next(); raw != nil; raw = iter.Next() { - count1++ sv := raw.(*structs.SecureVariableEncrypted) - t.Logf("- sv: n=%q p=%q ci=%v mi=%v ed.ki=%q", sv.Namespace, sv.Path, sv.CreateIndex, sv.ModifyIndex, sv.KeyID) - require.Equal(t, initialIndex, sv.CreateIndex, "incorrect create index", sv.Path) - require.Equal(t, initialIndex, sv.ModifyIndex, "incorrect modify index", sv.Path) require.Equal(t, svs[0].Namespace, sv.Namespace) + require.Equal(t, uint64(11), sv.CreateIndex, "%s incorrect create index", sv.Path) + require.Equal(t, uint64(11), sv.ModifyIndex, "%s incorrect modify index", sv.Path) + count1++ } + require.Equal(t, 1, count1) // Look up variables using the namespace of the second mock variable. @@ -350,9 +387,8 @@ func TestStateStore_GetSecureVariables(t *testing.T) { for raw := iter.Next(); raw != nil; raw = iter.Next() { count2++ sv := raw.(*structs.SecureVariableEncrypted) - t.Logf("- sv: n=%q p=%q ci=%v mi=%v ed.ki=%q", sv.Namespace, sv.Path, sv.CreateIndex, sv.ModifyIndex, sv.KeyID) - require.Equal(t, initialIndex, sv.CreateIndex, "incorrect create index", sv.Path) - require.Equal(t, initialIndex, sv.ModifyIndex, "incorrect modify index", sv.Path) + require.Equal(t, initialIndex, sv.CreateIndex, "%s incorrect create index", sv.Path) + require.Equal(t, initialIndex, sv.ModifyIndex, "%s incorrect modify index", sv.Path) require.Equal(t, svs[1].Namespace, sv.Namespace) } require.Equal(t, 1, count2) @@ -375,7 +411,12 @@ func TestStateStore_ListSecureVariablesByNamespaceAndPrefix(t *testing.T) { testState := testStateStore(t) // Generate some test secure variables and upsert them. - svs, _ := mockSecureVariables(6) + svs := []*structs.SecureVariableEncrypted{} + for i := 0; i < 6; i++ { + sv := mock.SecureVariableEncrypted() + svs = append(svs, sv) + } + svs[0].Path = "a/b" svs[1].Path = "a/b/c" svs[2].Path = "unrelated/b/c" @@ -386,8 +427,20 @@ func TestStateStore_ListSecureVariablesByNamespaceAndPrefix(t *testing.T) { svs[5].Namespace = "other" svs[5].Path = "a/z/z" + ns := mock.Namespace() + ns.Name = "other" initialIndex := uint64(10) - require.NoError(t, testState.UpsertSecureVariables(structs.MsgTypeTestSetup, initialIndex, svs)) + require.NoError(t, testState.UpsertNamespaces(initialIndex, []*structs.Namespace{ns})) + + for _, sv := range svs { + svReq := &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sv, + } + initialIndex++ + resp := testState.SVESet(initialIndex, svReq) + require.NoError(t, resp.Error) + } t.Run("ByNamespace", func(t *testing.T) { testCases := []struct { @@ -422,9 +475,6 @@ func TestStateStore_ListSecureVariablesByNamespaceAndPrefix(t *testing.T) { for raw := iter.Next(); raw != nil; raw = iter.Next() { count++ sv := raw.(*structs.SecureVariableEncrypted) - t.Logf("- sv: n=%q p=%q ci=%v mi=%v ed.ki=%q", sv.Namespace, sv.Path, sv.CreateIndex, sv.ModifyIndex, sv.KeyID) - require.Equal(t, initialIndex, sv.CreateIndex, "incorrect create index", sv.Path) - require.Equal(t, initialIndex, sv.ModifyIndex, "incorrect modify index", sv.Path) require.Equal(t, tC.namespace, sv.Namespace) } }) @@ -480,9 +530,6 @@ func TestStateStore_ListSecureVariablesByNamespaceAndPrefix(t *testing.T) { for raw := iter.Next(); raw != nil; raw = iter.Next() { count++ sv := raw.(*structs.SecureVariableEncrypted) - t.Logf("- sv: n=%q p=%q ci=%v mi=%v ed.ki=%q", sv.Namespace, sv.Path, sv.CreateIndex, sv.ModifyIndex, sv.KeyID) - require.Equal(t, initialIndex, sv.CreateIndex, "incorrect create index", sv.Path) - require.Equal(t, initialIndex, sv.ModifyIndex, "incorrect modify index", sv.Path) require.Equal(t, tC.namespace, sv.Namespace) require.True(t, strings.HasPrefix(sv.Path, tC.prefix)) } @@ -524,9 +571,6 @@ func TestStateStore_ListSecureVariablesByNamespaceAndPrefix(t *testing.T) { for raw := iter.Next(); raw != nil; raw = iter.Next() { count++ sv := raw.(*structs.SecureVariableEncrypted) - t.Logf("- sv: n=%q p=%q ci=%v mi=%v ed.ki=%q", sv.Namespace, sv.Path, sv.CreateIndex, sv.ModifyIndex, sv.KeyID) - require.Equal(t, initialIndex, sv.CreateIndex, "incorrect create index", sv.Path) - require.Equal(t, initialIndex, sv.ModifyIndex, "incorrect modify index", sv.Path) require.True(t, strings.HasPrefix(sv.Path, tC.prefix)) } require.Equal(t, tC.expectedCount, count) @@ -539,7 +583,13 @@ func TestStateStore_ListSecureVariablesByKeyID(t *testing.T) { testState := testStateStore(t) // Generate some test secure variables and upsert them. - svs, _ := mockSecureVariables(7) + svs := []*structs.SecureVariableEncrypted{} + for i := 0; i < 7; i++ { + sv := mock.SecureVariableEncrypted() + sv.Path = uuid.Generate() + svs = append(svs, sv) + } + keyID := uuid.Generate() expectedForKey := []string{} @@ -552,8 +602,16 @@ func TestStateStore_ListSecureVariablesByKeyID(t *testing.T) { expectedOrphaned := []string{svs[5].Path, svs[6].Path} initialIndex := uint64(10) - require.NoError(t, testState.UpsertSecureVariables( - structs.MsgTypeTestSetup, initialIndex, svs)) + + for _, sv := range svs { + svReq := &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sv, + } + initialIndex++ + resp := testState.SVESet(initialIndex, svReq) + require.NoError(t, resp.Error) + } ws := memdb.NewWatchSet() iter, err := testState.GetSecureVariablesByKeyID(ws, keyID) @@ -570,43 +628,6 @@ func TestStateStore_ListSecureVariablesByKeyID(t *testing.T) { require.Equal(t, 5, count) } -// mockSecureVariables returns a random number of secure variables between min -// and max inclusive. -func mockSecureVariables(count int) ( - []*structs.SecureVariableEncrypted, secureVariableMocks) { - var svm secureVariableMocks = make(map[string]*structs.SecureVariableEncrypted, count) - for i := 0; i < count; i++ { - nv := mock.SecureVariableEncrypted() - // There is an extremely rare chance of path collision because the mock - // secure variables generate their paths randomly. This check will add - // an extra component on conflict to (ideally) disambiguate them. - if _, found := svm[nv.Path]; found { - nv.Path = nv.Path + "/" + fmt.Sprint(time.Now().UnixNano()) - } - svm[nv.Path] = nv - } - return svm.List(), svm -} - -type secureVariableMocks map[string]*structs.SecureVariableEncrypted - -func (svm secureVariableMocks) List() []*structs.SecureVariableEncrypted { - out := make([]*structs.SecureVariableEncrypted, len(svm)) - i := 0 - for _, v := range svm { - out[i] = v - i++ - } - // objects will always come out of state store in namespace, path order. - sort.SliceStable(out, func(i, j int) bool { - if out[i].Namespace != out[j].Namespace { - return out[i].Namespace < out[j].Namespace - } - return out[i].Path < out[j].Path - }) - return out -} - func printSecureVariable(tsv *structs.SecureVariableEncrypted) string { b, _ := json.Marshal(tsv) return string(b) diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 8d0427fd000..739c7d2dbbe 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -1054,7 +1054,12 @@ func TestStateStore_DeleteNamespaces_SecureVariables(t *testing.T) { sv := mock.SecureVariableEncrypted() sv.Namespace = ns.Name - require.NoError(t, state.UpsertSecureVariables(structs.MsgTypeTestSetup, 1001, []*structs.SecureVariableEncrypted{sv})) + + resp := state.SVESet(1001, &structs.SVApplyStateRequest{ + Op: structs.SVOpSet, + Var: sv, + }) + require.NoError(t, resp.Error) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() @@ -5163,7 +5168,7 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { JobID: alloc.JobID, TaskGroup: alloc.TaskGroup, DeploymentStatus: &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: healthy, }, } @@ -5208,7 +5213,7 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { JobID: alloc.JobID, TaskGroup: alloc.TaskGroup, DeploymentStatus: &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: false, }, } @@ -5679,10 +5684,10 @@ func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) t1 := &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), } t2 := &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(false), + Migrate: pointer.Of(false), } eval := &structs.Evaluation{ ID: uuid.Generate(), @@ -7409,7 +7414,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { c3.JobID = j.ID c3.DeploymentID = d.ID c3.DesiredStatus = structs.AllocDesiredStatusStop - c3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} + c3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2, c3})) @@ -7495,7 +7500,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } c2 := mock.Alloc() c2.JobID = j.ID @@ -7503,7 +7508,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2}); err != nil { @@ -7590,7 +7595,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: true, } @@ -7601,7 +7606,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: true, } @@ -7610,7 +7615,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { c3.DeploymentID = d.ID d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) c3.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Canary: true, } @@ -7747,7 +7752,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { a.JobID = job.ID a.DeploymentID = d1.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Canary: true, } require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) @@ -7765,7 +7770,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { b.JobID = job.ID b.DeploymentID = d1.ID b.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Canary: false, } require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{b})) @@ -7786,7 +7791,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { c.JobID = job.ID c.DeploymentID = d2.ID c.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Canary: true, } require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 6, []*structs.Allocation{c})) @@ -7817,7 +7822,7 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { a.JobID = job.ID a.DeploymentID = d1.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: false, } require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) diff --git a/nomad/stream/event_broker.go b/nomad/stream/event_broker.go index e619968e0ab..da5f8f0abae 100644 --- a/nomad/stream/event_broker.go +++ b/nomad/stream/event_broker.go @@ -43,7 +43,7 @@ type EventBroker struct { aclDelegate ACLDelegate aclCache *lru.TwoQueueCache - aclCh chan *structs.Event + aclCh chan structs.Event logger hclog.Logger } @@ -72,7 +72,7 @@ func NewEventBroker(ctx context.Context, aclDelegate ACLDelegate, cfg EventBroke logger: cfg.Logger.Named("event_broker"), eventBuf: buffer, publishCh: make(chan *structs.Events, 64), - aclCh: make(chan *structs.Event, 10), + aclCh: make(chan structs.Event, 10), aclDelegate: aclDelegate, aclCache: aclCache, subscriptions: &subscriptions{ @@ -101,7 +101,7 @@ func (e *EventBroker) Publish(events *structs.Events) { // updated ACL Token or Policy for _, event := range events.Events { if event.Topic == structs.TopicACLToken || event.Topic == structs.TopicACLPolicy { - e.aclCh <- &event + e.aclCh <- event } } diff --git a/nomad/stream/subscription_test.go b/nomad/stream/subscription_test.go index d7bb9be368d..6f31a320451 100644 --- a/nomad/stream/subscription_test.go +++ b/nomad/stream/subscription_test.go @@ -190,7 +190,7 @@ func TestFilter_NamespaceAll(t *testing.T) { func TestFilter_FilterKeys(t *testing.T) { ci.Parallel(t) - + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One", FilterKeys: []string{"extra-key"}}, structs.Event{Topic: "Test", Key: "Two"}, structs.Event{Topic: "Test", Key: "Two"}) diff --git a/nomad/structs/config/artifact.go b/nomad/structs/config/artifact.go index 732b4ce8732..1b942805c0c 100644 --- a/nomad/structs/config/artifact.go +++ b/nomad/structs/config/artifact.go @@ -6,7 +6,7 @@ import ( "time" "github.com/dustin/go-humanize" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) // ArtifactConfig is the configuration specific to the Artifact stanza @@ -43,22 +43,22 @@ func (a *ArtifactConfig) Copy() *ArtifactConfig { newCopy := &ArtifactConfig{} if a.HTTPReadTimeout != nil { - newCopy.HTTPReadTimeout = helper.StringToPtr(*a.HTTPReadTimeout) + newCopy.HTTPReadTimeout = pointer.Of(*a.HTTPReadTimeout) } if a.HTTPMaxSize != nil { - newCopy.HTTPMaxSize = helper.StringToPtr(*a.HTTPMaxSize) + newCopy.HTTPMaxSize = pointer.Of(*a.HTTPMaxSize) } if a.GCSTimeout != nil { - newCopy.GCSTimeout = helper.StringToPtr(*a.GCSTimeout) + newCopy.GCSTimeout = pointer.Of(*a.GCSTimeout) } if a.GitTimeout != nil { - newCopy.GitTimeout = helper.StringToPtr(*a.GitTimeout) + newCopy.GitTimeout = pointer.Of(*a.GitTimeout) } if a.HgTimeout != nil { - newCopy.HgTimeout = helper.StringToPtr(*a.HgTimeout) + newCopy.HgTimeout = pointer.Of(*a.HgTimeout) } if a.S3Timeout != nil { - newCopy.S3Timeout = helper.StringToPtr(*a.S3Timeout) + newCopy.S3Timeout = pointer.Of(*a.S3Timeout) } return newCopy @@ -74,22 +74,22 @@ func (a *ArtifactConfig) Merge(o *ArtifactConfig) *ArtifactConfig { newCopy := a.Copy() if o.HTTPReadTimeout != nil { - newCopy.HTTPReadTimeout = helper.StringToPtr(*o.HTTPReadTimeout) + newCopy.HTTPReadTimeout = pointer.Of(*o.HTTPReadTimeout) } if o.HTTPMaxSize != nil { - newCopy.HTTPMaxSize = helper.StringToPtr(*o.HTTPMaxSize) + newCopy.HTTPMaxSize = pointer.Of(*o.HTTPMaxSize) } if o.GCSTimeout != nil { - newCopy.GCSTimeout = helper.StringToPtr(*o.GCSTimeout) + newCopy.GCSTimeout = pointer.Of(*o.GCSTimeout) } if o.GitTimeout != nil { - newCopy.GitTimeout = helper.StringToPtr(*o.GitTimeout) + newCopy.GitTimeout = pointer.Of(*o.GitTimeout) } if o.HgTimeout != nil { - newCopy.HgTimeout = helper.StringToPtr(*o.HgTimeout) + newCopy.HgTimeout = pointer.Of(*o.HgTimeout) } if o.S3Timeout != nil { - newCopy.S3Timeout = helper.StringToPtr(*o.S3Timeout) + newCopy.S3Timeout = pointer.Of(*o.S3Timeout) } return newCopy @@ -161,26 +161,26 @@ func DefaultArtifactConfig() *ArtifactConfig { return &ArtifactConfig{ // Read timeout for HTTP operations. Must be long enough to // accommodate large/slow downloads. - HTTPReadTimeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), // Maximum download size. Must be large enough to accommodate // large downloads. - HTTPMaxSize: helper.StringToPtr("100GB"), + HTTPMaxSize: pointer.Of("100GB"), // Timeout for GCS operations. Must be long enough to // accommodate large/slow downloads. - GCSTimeout: helper.StringToPtr("30m"), + GCSTimeout: pointer.Of("30m"), // Timeout for Git operations. Must be long enough to // accommodate large/slow clones. - GitTimeout: helper.StringToPtr("30m"), + GitTimeout: pointer.Of("30m"), // Timeout for Hg operations. Must be long enough to // accommodate large/slow clones. - HgTimeout: helper.StringToPtr("30m"), + HgTimeout: pointer.Of("30m"), // Timeout for S3 operations. Must be long enough to // accommodate large/slow downloads. - S3Timeout: helper.StringToPtr("30m"), + S3Timeout: pointer.Of("30m"), } } diff --git a/nomad/structs/config/artifact_test.go b/nomad/structs/config/artifact_test.go index e8c78d1f6cd..f1ba29a0b7f 100644 --- a/nomad/structs/config/artifact_test.go +++ b/nomad/structs/config/artifact_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -15,10 +15,10 @@ func TestArtifactConfig_Copy(t *testing.T) { b := a.Copy() require.Equal(t, a, b) - b.HTTPReadTimeout = helper.StringToPtr("5m") - b.HTTPMaxSize = helper.StringToPtr("2MB") - b.GitTimeout = helper.StringToPtr("3m") - b.HgTimeout = helper.StringToPtr("2m") + b.HTTPReadTimeout = pointer.Of("5m") + b.HTTPMaxSize = pointer.Of("2MB") + b.GitTimeout = pointer.Of("3m") + b.HgTimeout = pointer.Of("2m") require.NotEqual(t, a, b) } @@ -34,68 +34,68 @@ func TestArtifactConfig_Merge(t *testing.T) { { name: "merge all fields", source: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, other: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("5m"), - HTTPMaxSize: helper.StringToPtr("2GB"), - GCSTimeout: helper.StringToPtr("1m"), - GitTimeout: helper.StringToPtr("2m"), - HgTimeout: helper.StringToPtr("3m"), - S3Timeout: helper.StringToPtr("4m"), + HTTPReadTimeout: pointer.Of("5m"), + HTTPMaxSize: pointer.Of("2GB"), + GCSTimeout: pointer.Of("1m"), + GitTimeout: pointer.Of("2m"), + HgTimeout: pointer.Of("3m"), + S3Timeout: pointer.Of("4m"), }, expected: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("5m"), - HTTPMaxSize: helper.StringToPtr("2GB"), - GCSTimeout: helper.StringToPtr("1m"), - GitTimeout: helper.StringToPtr("2m"), - HgTimeout: helper.StringToPtr("3m"), - S3Timeout: helper.StringToPtr("4m"), + HTTPReadTimeout: pointer.Of("5m"), + HTTPMaxSize: pointer.Of("2GB"), + GCSTimeout: pointer.Of("1m"), + GitTimeout: pointer.Of("2m"), + HgTimeout: pointer.Of("3m"), + S3Timeout: pointer.Of("4m"), }, }, { name: "null source", source: nil, other: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("5m"), - HTTPMaxSize: helper.StringToPtr("2GB"), - GCSTimeout: helper.StringToPtr("1m"), - GitTimeout: helper.StringToPtr("2m"), - HgTimeout: helper.StringToPtr("3m"), - S3Timeout: helper.StringToPtr("4m"), + HTTPReadTimeout: pointer.Of("5m"), + HTTPMaxSize: pointer.Of("2GB"), + GCSTimeout: pointer.Of("1m"), + GitTimeout: pointer.Of("2m"), + HgTimeout: pointer.Of("3m"), + S3Timeout: pointer.Of("4m"), }, expected: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("5m"), - HTTPMaxSize: helper.StringToPtr("2GB"), - GCSTimeout: helper.StringToPtr("1m"), - GitTimeout: helper.StringToPtr("2m"), - HgTimeout: helper.StringToPtr("3m"), - S3Timeout: helper.StringToPtr("4m"), + HTTPReadTimeout: pointer.Of("5m"), + HTTPMaxSize: pointer.Of("2GB"), + GCSTimeout: pointer.Of("1m"), + GitTimeout: pointer.Of("2m"), + HgTimeout: pointer.Of("3m"), + S3Timeout: pointer.Of("4m"), }, }, { name: "null other", source: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, other: nil, expected: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, }, } @@ -131,28 +131,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "http read timeout is invalid", config: func(a *ArtifactConfig) { - a.HTTPReadTimeout = helper.StringToPtr("invalid") + a.HTTPReadTimeout = pointer.Of("invalid") }, expectedError: "http_read_timeout not a valid duration", }, { name: "http read timeout is empty", config: func(a *ArtifactConfig) { - a.HTTPReadTimeout = helper.StringToPtr("") + a.HTTPReadTimeout = pointer.Of("") }, expectedError: "http_read_timeout not a valid duration", }, { name: "http read timeout is zero", config: func(a *ArtifactConfig) { - a.HTTPReadTimeout = helper.StringToPtr("0") + a.HTTPReadTimeout = pointer.Of("0") }, expectedError: "", }, { name: "http read timeout is negative", config: func(a *ArtifactConfig) { - a.HTTPReadTimeout = helper.StringToPtr("-10m") + a.HTTPReadTimeout = pointer.Of("-10m") }, expectedError: "http_read_timeout must be > 0", }, @@ -166,28 +166,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "http max size is invalid", config: func(a *ArtifactConfig) { - a.HTTPMaxSize = helper.StringToPtr("invalid") + a.HTTPMaxSize = pointer.Of("invalid") }, expectedError: "http_max_size not a valid size", }, { name: "http max size is empty", config: func(a *ArtifactConfig) { - a.HTTPMaxSize = helper.StringToPtr("") + a.HTTPMaxSize = pointer.Of("") }, expectedError: "http_max_size not a valid size", }, { name: "http max size is zero", config: func(a *ArtifactConfig) { - a.HTTPMaxSize = helper.StringToPtr("0") + a.HTTPMaxSize = pointer.Of("0") }, expectedError: "", }, { name: "http max size is negative", config: func(a *ArtifactConfig) { - a.HTTPMaxSize = helper.StringToPtr("-l0MB") + a.HTTPMaxSize = pointer.Of("-l0MB") }, expectedError: "http_max_size not a valid size", }, @@ -201,28 +201,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "gcs timeout is invalid", config: func(a *ArtifactConfig) { - a.GCSTimeout = helper.StringToPtr("invalid") + a.GCSTimeout = pointer.Of("invalid") }, expectedError: "gcs_timeout not a valid duration", }, { name: "gcs timeout is empty", config: func(a *ArtifactConfig) { - a.GCSTimeout = helper.StringToPtr("") + a.GCSTimeout = pointer.Of("") }, expectedError: "gcs_timeout not a valid duration", }, { name: "gcs timeout is zero", config: func(a *ArtifactConfig) { - a.GCSTimeout = helper.StringToPtr("0") + a.GCSTimeout = pointer.Of("0") }, expectedError: "", }, { name: "gcs timeout is negative", config: func(a *ArtifactConfig) { - a.GCSTimeout = helper.StringToPtr("-l0m") + a.GCSTimeout = pointer.Of("-l0m") }, expectedError: "gcs_timeout not a valid duration", }, @@ -236,28 +236,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "git timeout is invalid", config: func(a *ArtifactConfig) { - a.GitTimeout = helper.StringToPtr("invalid") + a.GitTimeout = pointer.Of("invalid") }, expectedError: "git_timeout not a valid duration", }, { name: "git timeout is empty", config: func(a *ArtifactConfig) { - a.GitTimeout = helper.StringToPtr("") + a.GitTimeout = pointer.Of("") }, expectedError: "git_timeout not a valid duration", }, { name: "git timeout is zero", config: func(a *ArtifactConfig) { - a.GitTimeout = helper.StringToPtr("0") + a.GitTimeout = pointer.Of("0") }, expectedError: "", }, { name: "git timeout is negative", config: func(a *ArtifactConfig) { - a.GitTimeout = helper.StringToPtr("-l0m") + a.GitTimeout = pointer.Of("-l0m") }, expectedError: "git_timeout not a valid duration", }, @@ -271,28 +271,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "hg timeout is invalid", config: func(a *ArtifactConfig) { - a.HgTimeout = helper.StringToPtr("invalid") + a.HgTimeout = pointer.Of("invalid") }, expectedError: "hg_timeout not a valid duration", }, { name: "hg timeout is empty", config: func(a *ArtifactConfig) { - a.HgTimeout = helper.StringToPtr("") + a.HgTimeout = pointer.Of("") }, expectedError: "hg_timeout not a valid duration", }, { name: "hg timeout is zero", config: func(a *ArtifactConfig) { - a.HgTimeout = helper.StringToPtr("0") + a.HgTimeout = pointer.Of("0") }, expectedError: "", }, { name: "hg timeout is negative", config: func(a *ArtifactConfig) { - a.HgTimeout = helper.StringToPtr("-l0m") + a.HgTimeout = pointer.Of("-l0m") }, expectedError: "hg_timeout not a valid duration", }, @@ -306,28 +306,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "s3 timeout is invalid", config: func(a *ArtifactConfig) { - a.S3Timeout = helper.StringToPtr("invalid") + a.S3Timeout = pointer.Of("invalid") }, expectedError: "s3_timeout not a valid duration", }, { name: "s3 timeout is empty", config: func(a *ArtifactConfig) { - a.S3Timeout = helper.StringToPtr("") + a.S3Timeout = pointer.Of("") }, expectedError: "s3_timeout not a valid duration", }, { name: "s3 timeout is zero", config: func(a *ArtifactConfig) { - a.S3Timeout = helper.StringToPtr("0") + a.S3Timeout = pointer.Of("0") }, expectedError: "", }, { name: "s3 timeout is negative", config: func(a *ArtifactConfig) { - a.S3Timeout = helper.StringToPtr("-l0m") + a.S3Timeout = pointer.Of("-l0m") }, expectedError: "s3_timeout not a valid duration", }, diff --git a/nomad/structs/config/audit.go b/nomad/structs/config/audit.go index cc7e7bf5d07..91a43016469 100644 --- a/nomad/structs/config/audit.go +++ b/nomad/structs/config/audit.go @@ -4,6 +4,7 @@ import ( "time" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) // AuditConfig is the configuration specific to Audit Logging @@ -83,7 +84,7 @@ func (a *AuditConfig) Copy() *AuditConfig { // Copy bool pointers if a.Enabled != nil { - nc.Enabled = helper.BoolToPtr(*a.Enabled) + nc.Enabled = pointer.Of(*a.Enabled) } // Copy Sinks and Filters @@ -98,7 +99,7 @@ func (a *AuditConfig) Merge(b *AuditConfig) *AuditConfig { result := a.Copy() if b.Enabled != nil { - result.Enabled = helper.BoolToPtr(*b.Enabled) + result.Enabled = pointer.Of(*b.Enabled) } // Merge Sinks diff --git a/nomad/structs/config/audit_test.go b/nomad/structs/config/audit_test.go index 7cd9d930a2b..0388edb8773 100644 --- a/nomad/structs/config/audit_test.go +++ b/nomad/structs/config/audit_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -13,7 +13,7 @@ func TestAuditConfig_Merge(t *testing.T) { ci.Parallel(t) c1 := &AuditConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Sinks: []*AuditSink{ { DeliveryGuarantee: "enforced", @@ -71,7 +71,7 @@ func TestAuditConfig_Merge(t *testing.T) { } e := &AuditConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Sinks: []*AuditSink{ { DeliveryGuarantee: "best-effort", diff --git a/nomad/structs/config/autopilot.go b/nomad/structs/config/autopilot.go index d71b0c39803..2efd0f20aba 100644 --- a/nomad/structs/config/autopilot.go +++ b/nomad/structs/config/autopilot.go @@ -3,7 +3,7 @@ package config import ( "time" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) type AutopilotConfig struct { @@ -60,7 +60,7 @@ func (a *AutopilotConfig) Merge(b *AutopilotConfig) *AutopilotConfig { result := a.Copy() if b.CleanupDeadServers != nil { - result.CleanupDeadServers = helper.BoolToPtr(*b.CleanupDeadServers) + result.CleanupDeadServers = pointer.Of(*b.CleanupDeadServers) } if b.ServerStabilizationTime != 0 { result.ServerStabilizationTime = b.ServerStabilizationTime @@ -84,7 +84,7 @@ func (a *AutopilotConfig) Merge(b *AutopilotConfig) *AutopilotConfig { result.EnableRedundancyZones = b.EnableRedundancyZones } if b.DisableUpgradeMigration != nil { - result.DisableUpgradeMigration = helper.BoolToPtr(*b.DisableUpgradeMigration) + result.DisableUpgradeMigration = pointer.Of(*b.DisableUpgradeMigration) } if b.EnableCustomUpgrades != nil { result.EnableCustomUpgrades = b.EnableCustomUpgrades @@ -104,16 +104,16 @@ func (a *AutopilotConfig) Copy() *AutopilotConfig { // Copy the bools if a.CleanupDeadServers != nil { - nc.CleanupDeadServers = helper.BoolToPtr(*a.CleanupDeadServers) + nc.CleanupDeadServers = pointer.Of(*a.CleanupDeadServers) } if a.EnableRedundancyZones != nil { - nc.EnableRedundancyZones = helper.BoolToPtr(*a.EnableRedundancyZones) + nc.EnableRedundancyZones = pointer.Of(*a.EnableRedundancyZones) } if a.DisableUpgradeMigration != nil { - nc.DisableUpgradeMigration = helper.BoolToPtr(*a.DisableUpgradeMigration) + nc.DisableUpgradeMigration = pointer.Of(*a.DisableUpgradeMigration) } if a.EnableCustomUpgrades != nil { - nc.EnableCustomUpgrades = helper.BoolToPtr(*a.EnableCustomUpgrades) + nc.EnableCustomUpgrades = pointer.Of(*a.EnableCustomUpgrades) } return nc diff --git a/nomad/structs/config/consul.go b/nomad/structs/config/consul.go index f836b822b8d..bddd7947fed 100644 --- a/nomad/structs/config/consul.go +++ b/nomad/structs/config/consul.go @@ -8,7 +8,7 @@ import ( consul "github.com/hashicorp/consul/api" "github.com/hashicorp/go-secure-stdlib/listenerutil" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) // ConsulConfig contains the configuration information necessary to @@ -16,11 +16,11 @@ import ( // // - Register services and their checks with Consul // -// - Bootstrap this Nomad Client with the list of Nomad Servers registered -// with Consul +// - Bootstrap this Nomad Client with the list of Nomad Servers registered +// with Consul // -// - Establish how this Nomad Client will resolve Envoy Connect Sidecar -// images. +// - Establish how this Nomad Client will resolve Envoy Connect Sidecar +// images. // // Both the Agent and the executor need to be able to import ConsulConfig. type ConsulConfig struct { @@ -141,17 +141,17 @@ func DefaultConsulConfig() *ConsulConfig { ServerRPCCheckName: "Nomad Server RPC Check", ClientServiceName: "nomad-client", ClientHTTPCheckName: "Nomad Client HTTP Check", - AutoAdvertise: helper.BoolToPtr(true), - ChecksUseAdvertise: helper.BoolToPtr(false), - ServerAutoJoin: helper.BoolToPtr(true), - ClientAutoJoin: helper.BoolToPtr(true), - AllowUnauthenticated: helper.BoolToPtr(true), + AutoAdvertise: pointer.Of(true), + ChecksUseAdvertise: pointer.Of(false), + ServerAutoJoin: pointer.Of(true), + ClientAutoJoin: pointer.Of(true), + AllowUnauthenticated: pointer.Of(true), Timeout: 5 * time.Second, // From Consul api package defaults Addr: def.Address, - EnableSSL: helper.BoolToPtr(def.Scheme == "https"), - VerifySSL: helper.BoolToPtr(!def.TLSConfig.InsecureSkipVerify), + EnableSSL: pointer.Of(def.Scheme == "https"), + VerifySSL: pointer.Of(!def.TLSConfig.InsecureSkipVerify), CAFile: def.TLSConfig.CAFile, Namespace: def.Namespace, } @@ -190,7 +190,7 @@ func (c *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { } result.Tags = append(result.Tags, b.Tags...) if b.AutoAdvertise != nil { - result.AutoAdvertise = helper.BoolToPtr(*b.AutoAdvertise) + result.AutoAdvertise = pointer.Of(*b.AutoAdvertise) } if b.Addr != "" { result.Addr = b.Addr @@ -211,13 +211,13 @@ func (c *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { result.Auth = b.Auth } if b.EnableSSL != nil { - result.EnableSSL = helper.BoolToPtr(*b.EnableSSL) + result.EnableSSL = pointer.Of(*b.EnableSSL) } if b.VerifySSL != nil { - result.VerifySSL = helper.BoolToPtr(*b.VerifySSL) + result.VerifySSL = pointer.Of(*b.VerifySSL) } if b.ShareSSL != nil { - result.ShareSSL = helper.BoolToPtr(*b.ShareSSL) + result.ShareSSL = pointer.Of(*b.ShareSSL) } if b.CAFile != "" { result.CAFile = b.CAFile @@ -229,16 +229,16 @@ func (c *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { result.KeyFile = b.KeyFile } if b.ServerAutoJoin != nil { - result.ServerAutoJoin = helper.BoolToPtr(*b.ServerAutoJoin) + result.ServerAutoJoin = pointer.Of(*b.ServerAutoJoin) } if b.ClientAutoJoin != nil { - result.ClientAutoJoin = helper.BoolToPtr(*b.ClientAutoJoin) + result.ClientAutoJoin = pointer.Of(*b.ClientAutoJoin) } if b.ChecksUseAdvertise != nil { - result.ChecksUseAdvertise = helper.BoolToPtr(*b.ChecksUseAdvertise) + result.ChecksUseAdvertise = pointer.Of(*b.ChecksUseAdvertise) } if b.AllowUnauthenticated != nil { - result.AllowUnauthenticated = helper.BoolToPtr(*b.AllowUnauthenticated) + result.AllowUnauthenticated = pointer.Of(*b.AllowUnauthenticated) } if b.Namespace != "" { result.Namespace = b.Namespace @@ -319,28 +319,28 @@ func (c *ConsulConfig) Copy() *ConsulConfig { // Copy the bools if nc.AutoAdvertise != nil { - nc.AutoAdvertise = helper.BoolToPtr(*nc.AutoAdvertise) + nc.AutoAdvertise = pointer.Of(*nc.AutoAdvertise) } if nc.ChecksUseAdvertise != nil { - nc.ChecksUseAdvertise = helper.BoolToPtr(*nc.ChecksUseAdvertise) + nc.ChecksUseAdvertise = pointer.Of(*nc.ChecksUseAdvertise) } if nc.EnableSSL != nil { - nc.EnableSSL = helper.BoolToPtr(*nc.EnableSSL) + nc.EnableSSL = pointer.Of(*nc.EnableSSL) } if nc.VerifySSL != nil { - nc.VerifySSL = helper.BoolToPtr(*nc.VerifySSL) + nc.VerifySSL = pointer.Of(*nc.VerifySSL) } if nc.ShareSSL != nil { - nc.ShareSSL = helper.BoolToPtr(*nc.ShareSSL) + nc.ShareSSL = pointer.Of(*nc.ShareSSL) } if nc.ServerAutoJoin != nil { - nc.ServerAutoJoin = helper.BoolToPtr(*nc.ServerAutoJoin) + nc.ServerAutoJoin = pointer.Of(*nc.ServerAutoJoin) } if nc.ClientAutoJoin != nil { - nc.ClientAutoJoin = helper.BoolToPtr(*nc.ClientAutoJoin) + nc.ClientAutoJoin = pointer.Of(*nc.ClientAutoJoin) } if nc.AllowUnauthenticated != nil { - nc.AllowUnauthenticated = helper.BoolToPtr(*nc.AllowUnauthenticated) + nc.AllowUnauthenticated = pointer.Of(*nc.AllowUnauthenticated) } return nc diff --git a/nomad/structs/config/limits.go b/nomad/structs/config/limits.go index 5c17bc99ee5..77fda5190f0 100644 --- a/nomad/structs/config/limits.go +++ b/nomad/structs/config/limits.go @@ -1,6 +1,6 @@ package config -import "github.com/hashicorp/nomad/helper" +import "github.com/hashicorp/nomad/helper/pointer" const ( // LimitsNonStreamingConnsPerClient is the number of connections per @@ -47,9 +47,9 @@ type Limits struct { func DefaultLimits() Limits { return Limits{ HTTPSHandshakeTimeout: "5s", - HTTPMaxConnsPerClient: helper.IntToPtr(100), + HTTPMaxConnsPerClient: pointer.Of(100), RPCHandshakeTimeout: "5s", - RPCMaxConnsPerClient: helper.IntToPtr(100), + RPCMaxConnsPerClient: pointer.Of(100), } } @@ -62,13 +62,13 @@ func (l *Limits) Merge(o Limits) Limits { m.HTTPSHandshakeTimeout = o.HTTPSHandshakeTimeout } if o.HTTPMaxConnsPerClient != nil { - m.HTTPMaxConnsPerClient = helper.IntToPtr(*o.HTTPMaxConnsPerClient) + m.HTTPMaxConnsPerClient = pointer.Of(*o.HTTPMaxConnsPerClient) } if o.RPCHandshakeTimeout != "" { m.RPCHandshakeTimeout = o.RPCHandshakeTimeout } if o.RPCMaxConnsPerClient != nil { - m.RPCMaxConnsPerClient = helper.IntToPtr(*o.RPCMaxConnsPerClient) + m.RPCMaxConnsPerClient = pointer.Of(*o.RPCMaxConnsPerClient) } return m @@ -78,10 +78,10 @@ func (l *Limits) Merge(o Limits) Limits { func (l *Limits) Copy() Limits { c := *l if l.HTTPMaxConnsPerClient != nil { - c.HTTPMaxConnsPerClient = helper.IntToPtr(*l.HTTPMaxConnsPerClient) + c.HTTPMaxConnsPerClient = pointer.Of(*l.HTTPMaxConnsPerClient) } if l.RPCMaxConnsPerClient != nil { - c.RPCMaxConnsPerClient = helper.IntToPtr(*l.RPCMaxConnsPerClient) + c.RPCMaxConnsPerClient = pointer.Of(*l.RPCMaxConnsPerClient) } return c } diff --git a/nomad/structs/config/limits_test.go b/nomad/structs/config/limits_test.go index 7a4082f3db7..95f68037f6d 100644 --- a/nomad/structs/config/limits_test.go +++ b/nomad/structs/config/limits_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -32,9 +32,9 @@ func TestLimits_Copy(t *testing.T) { // Assert changes to copy are not propagated to the original c.HTTPSHandshakeTimeout = "1s" - c.HTTPMaxConnsPerClient = helper.IntToPtr(50) + c.HTTPMaxConnsPerClient = pointer.Of(50) c.RPCHandshakeTimeout = "1s" - c.RPCMaxConnsPerClient = helper.IntToPtr(50) + c.RPCMaxConnsPerClient = pointer.Of(50) require.NotEqual(t, c.HTTPSHandshakeTimeout, o.HTTPSHandshakeTimeout) @@ -74,7 +74,7 @@ func TestLimits_Merge(t *testing.T) { // Use short struct initialization style so it fails to compile if // fields are added - expected := Limits{"10s", helper.IntToPtr(100), "5s", helper.IntToPtr(100)} + expected := Limits{"10s", pointer.Of(100), "5s", pointer.Of(100)} require.Equal(t, expected, m2) // Mergin in 0 values should not change anything diff --git a/nomad/structs/config/sentinel.go b/nomad/structs/config/sentinel.go index c5ea34afbe0..6dc17c85a38 100644 --- a/nomad/structs/config/sentinel.go +++ b/nomad/structs/config/sentinel.go @@ -1,11 +1,26 @@ package config +import ( + "github.com/hashicorp/nomad/helper" + "golang.org/x/exp/slices" +) + // SentinelConfig is configuration specific to Sentinel type SentinelConfig struct { // Imports are the configured imports Imports []*SentinelImport `hcl:"import,expand"` } +func (s *SentinelConfig) Copy() *SentinelConfig { + if s == nil { + return nil + } + + ns := *s + ns.Imports = helper.CopySlice(s.Imports) + return &ns +} + // SentinelImport is used per configured import type SentinelImport struct { Name string `hcl:",key"` @@ -13,6 +28,16 @@ type SentinelImport struct { Args []string `hcl:"args"` } +func (s *SentinelImport) Copy() *SentinelImport { + if s == nil { + return nil + } + + ns := *s + ns.Args = slices.Clone(s.Args) + return &ns +} + // Merge is used to merge two Sentinel configs together. The settings from the input always take precedence. func (a *SentinelConfig) Merge(b *SentinelConfig) *SentinelConfig { result := *a diff --git a/nomad/structs/config/tls.go b/nomad/structs/config/tls.go index afb4d8e43c1..6130747d109 100644 --- a/nomad/structs/config/tls.go +++ b/nomad/structs/config/tls.go @@ -125,16 +125,6 @@ func (k *KeyLoader) GetClientCertificate(*tls.CertificateRequestInfo) (*tls.Cert return k.certificate, nil } -func (k *KeyLoader) Copy() *KeyLoader { - if k == nil { - return nil - } - - new := KeyLoader{} - new.certificate = k.certificate - return &new -} - // GetKeyLoader returns the keyloader for a TLSConfig object. If the keyloader // has not been initialized, it will first do so. func (t *TLSConfig) GetKeyLoader() *KeyLoader { @@ -162,8 +152,12 @@ func (t *TLSConfig) Copy() *TLSConfig { new.CAFile = t.CAFile new.CertFile = t.CertFile + // Shallow copy the key loader as its GetOutgoingCertificate method is what + // is used by the HTTP server to retrieve the certificate. If we create a new + // KeyLoader struct, the HTTP server will still be calling the old + // GetOutgoingCertificate method. t.keyloaderLock.Lock() - new.KeyLoader = t.KeyLoader.Copy() + new.KeyLoader = t.KeyLoader t.keyloaderLock.Unlock() new.KeyFile = t.KeyFile diff --git a/nomad/structs/config/tls_test.go b/nomad/structs/config/tls_test.go index 59bcfed18d7..0e89898ef49 100644 --- a/nomad/structs/config/tls_test.go +++ b/nomad/structs/config/tls_test.go @@ -204,7 +204,7 @@ func TestTLS_Copy(t *testing.T) { // object func TestTLS_GetKeyloader(t *testing.T) { ci.Parallel(t) - + require := require.New(t) a := &TLSConfig{} require.NotNil(a.GetKeyLoader()) diff --git a/nomad/structs/config/vault.go b/nomad/structs/config/vault.go index 83a239a19ce..4deb4c2f743 100644 --- a/nomad/structs/config/vault.go +++ b/nomad/structs/config/vault.go @@ -3,7 +3,7 @@ package config import ( "time" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" vault "github.com/hashicorp/vault/api" ) @@ -87,7 +87,7 @@ func DefaultVaultConfig() *VaultConfig { return &VaultConfig{ Addr: "https://vault.service.consul:8200", ConnectionRetryIntv: DefaultVaultConnectRetryIntv, - AllowUnauthenticated: helper.BoolToPtr(true), + AllowUnauthenticated: pointer.Of(true), } } @@ -106,14 +106,20 @@ func (c *VaultConfig) AllowsUnauthenticated() bool { func (c *VaultConfig) Merge(b *VaultConfig) *VaultConfig { result := *c + if b.Enabled != nil { + result.Enabled = b.Enabled + } if b.Token != "" { result.Token = b.Token } + if b.Role != "" { + result.Role = b.Role + } if b.Namespace != "" { result.Namespace = b.Namespace } - if b.Role != "" { - result.Role = b.Role + if b.AllowUnauthenticated != nil { + result.AllowUnauthenticated = b.AllowUnauthenticated } if b.TaskTokenTTL != "" { result.TaskTokenTTL = b.TaskTokenTTL @@ -136,17 +142,11 @@ func (c *VaultConfig) Merge(b *VaultConfig) *VaultConfig { if b.TLSKeyFile != "" { result.TLSKeyFile = b.TLSKeyFile } - if b.TLSServerName != "" { - result.TLSServerName = b.TLSServerName - } - if b.AllowUnauthenticated != nil { - result.AllowUnauthenticated = b.AllowUnauthenticated - } if b.TLSSkipVerify != nil { result.TLSSkipVerify = b.TLSSkipVerify } - if b.Enabled != nil { - result.Enabled = b.Enabled + if b.TLSServerName != "" { + result.TLSServerName = b.TLSServerName } return &result @@ -188,9 +188,9 @@ func (c *VaultConfig) Copy() *VaultConfig { return nc } -// IsEqual compares two Vault configurations and returns a boolean indicating +// Equals compares two Vault configurations and returns a boolean indicating // if they are equal. -func (c *VaultConfig) IsEqual(b *VaultConfig) bool { +func (c *VaultConfig) Equals(b *VaultConfig) bool { if c == nil && b != nil { return false } @@ -198,12 +198,32 @@ func (c *VaultConfig) IsEqual(b *VaultConfig) bool { return false } + if c.Enabled == nil || b.Enabled == nil { + if c.Enabled != b.Enabled { + return false + } + } else if *c.Enabled != *b.Enabled { + return false + } + if c.Token != b.Token { return false } if c.Role != b.Role { return false } + if c.Namespace != b.Namespace { + return false + } + + if c.AllowUnauthenticated == nil || b.AllowUnauthenticated == nil { + if c.AllowUnauthenticated != b.AllowUnauthenticated { + return false + } + } else if *c.AllowUnauthenticated != *b.AllowUnauthenticated { + return false + } + if c.TaskTokenTTL != b.TaskTokenTTL { return false } @@ -225,17 +245,18 @@ func (c *VaultConfig) IsEqual(b *VaultConfig) bool { if c.TLSKeyFile != b.TLSKeyFile { return false } - if c.TLSServerName != b.TLSServerName { - return false - } - if c.AllowUnauthenticated != b.AllowUnauthenticated { - return false - } - if c.TLSSkipVerify != b.TLSSkipVerify { + + if c.TLSSkipVerify == nil || b.TLSSkipVerify == nil { + if c.TLSSkipVerify != b.TLSSkipVerify { + return false + } + } else if *c.TLSSkipVerify != *b.TLSSkipVerify { return false } - if c.Enabled != b.Enabled { + + if c.TLSServerName != b.TLSServerName { return false } + return true } diff --git a/nomad/structs/config/vault_test.go b/nomad/structs/config/vault_test.go index f6fa2c6bd4f..6f5e2909c98 100644 --- a/nomad/structs/config/vault_test.go +++ b/nomad/structs/config/vault_test.go @@ -3,35 +3,36 @@ package config import ( "reflect" "testing" + "time" "github.com/hashicorp/nomad/ci" - "github.com/stretchr/testify/require" + "github.com/hashicorp/nomad/helper/pointer" + "github.com/shoenig/test/must" ) func TestVaultConfig_Merge(t *testing.T) { ci.Parallel(t) - trueValue, falseValue := true, false c1 := &VaultConfig{ - Enabled: &falseValue, + Enabled: pointer.Of(false), Token: "1", Role: "1", - AllowUnauthenticated: &trueValue, + AllowUnauthenticated: pointer.Of(true), TaskTokenTTL: "1", Addr: "1", TLSCaFile: "1", TLSCaPath: "1", TLSCertFile: "1", TLSKeyFile: "1", - TLSSkipVerify: &trueValue, + TLSSkipVerify: pointer.Of(true), TLSServerName: "1", } c2 := &VaultConfig{ - Enabled: &trueValue, + Enabled: pointer.Of(true), Token: "2", Role: "2", - AllowUnauthenticated: &falseValue, + AllowUnauthenticated: pointer.Of(false), TaskTokenTTL: "2", Addr: "2", TLSCaFile: "2", @@ -43,17 +44,17 @@ func TestVaultConfig_Merge(t *testing.T) { } e := &VaultConfig{ - Enabled: &trueValue, + Enabled: pointer.Of(true), Token: "2", Role: "2", - AllowUnauthenticated: &falseValue, + AllowUnauthenticated: pointer.Of(false), TaskTokenTTL: "2", Addr: "2", TLSCaFile: "2", TLSCaPath: "2", TLSCertFile: "2", TLSKeyFile: "2", - TLSSkipVerify: &trueValue, + TLSSkipVerify: pointer.Of(true), TLSServerName: "2", } @@ -63,72 +64,78 @@ func TestVaultConfig_Merge(t *testing.T) { } } -func TestVaultConfig_IsEqual(t *testing.T) { +func TestVaultConfig_Equals(t *testing.T) { ci.Parallel(t) - require := require.New(t) - - trueValue, falseValue := true, false c1 := &VaultConfig{ - Enabled: &falseValue, + Enabled: pointer.Of(false), Token: "1", Role: "1", - AllowUnauthenticated: &trueValue, + Namespace: "1", + AllowUnauthenticated: pointer.Of(true), TaskTokenTTL: "1", Addr: "1", + ConnectionRetryIntv: time.Second, TLSCaFile: "1", TLSCaPath: "1", TLSCertFile: "1", TLSKeyFile: "1", - TLSSkipVerify: &trueValue, + TLSSkipVerify: pointer.Of(true), TLSServerName: "1", } c2 := &VaultConfig{ - Enabled: &falseValue, + Enabled: pointer.Of(false), Token: "1", Role: "1", - AllowUnauthenticated: &trueValue, + Namespace: "1", + AllowUnauthenticated: pointer.Of(true), TaskTokenTTL: "1", Addr: "1", + ConnectionRetryIntv: time.Second, TLSCaFile: "1", TLSCaPath: "1", TLSCertFile: "1", TLSKeyFile: "1", - TLSSkipVerify: &trueValue, + TLSSkipVerify: pointer.Of(true), TLSServerName: "1", } - require.True(c1.IsEqual(c2)) + must.Equals(t, c1, c2) c3 := &VaultConfig{ - Enabled: &trueValue, + Enabled: pointer.Of(true), Token: "1", Role: "1", - AllowUnauthenticated: &trueValue, + Namespace: "1", + AllowUnauthenticated: pointer.Of(true), TaskTokenTTL: "1", Addr: "1", + ConnectionRetryIntv: time.Second, TLSCaFile: "1", TLSCaPath: "1", TLSCertFile: "1", TLSKeyFile: "1", - TLSSkipVerify: &trueValue, + TLSSkipVerify: pointer.Of(true), TLSServerName: "1", } c4 := &VaultConfig{ - Enabled: &falseValue, + Enabled: pointer.Of(false), Token: "1", Role: "1", - AllowUnauthenticated: &trueValue, + Namespace: "1", + AllowUnauthenticated: pointer.Of(true), TaskTokenTTL: "1", Addr: "1", + ConnectionRetryIntv: time.Second, TLSCaFile: "1", TLSCaPath: "1", TLSCertFile: "1", TLSKeyFile: "1", - TLSSkipVerify: &trueValue, + TLSSkipVerify: pointer.Of(true), TLSServerName: "1", } - require.False(c3.IsEqual(c4)) + + must.NotEquals(t, c3, c4) } diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index eea20b597d6..75601706b31 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -62,12 +62,17 @@ type TaskCSIPluginConfig struct { // Type instructs Nomad on how to handle processing a plugin Type CSIPluginType - // MountDir is the destination that nomad should mount in its CSI - // directory for the plugin. It will then expect a file called CSISocketName - // to be created by the plugin, and will provide references into - // "MountDir/CSIIntermediaryDirname/{VolumeName}/{AllocID} for mounts. + // MountDir is the directory (within its container) in which the plugin creates a + // socket (called CSISocketName) for communication with Nomad. Default is /csi. MountDir string + // StagePublishBaseDir is the base directory (within its container) in which the plugin + // mounts volumes being staged and bind mount volumes being published. + // e.g. staging_target_path = {StagePublishBaseDir}/staging/{volume-id}/{usage-mode} + // e.g. target_path = {StagePublishBaseDir}/per-alloc/{alloc-id}/{volume-id}/{usage-mode} + // Default is /local/csi. + StagePublishBaseDir string + // HealthTimeout is the time after which the CSI plugin tasks will be killed // if the CSI Plugin is not healthy. HealthTimeout time.Duration `mapstructure:"health_timeout" hcl:"health_timeout,optional"` diff --git a/nomad/structs/devices_test.go b/nomad/structs/devices_test.go index 8d43a45ad35..821e237380d 100644 --- a/nomad/structs/devices_test.go +++ b/nomad/structs/devices_test.go @@ -211,7 +211,7 @@ func TestDeviceAccounter_AddReserved(t *testing.T) { // Test that collision detection works func TestDeviceAccounter_AddReserved_Collision(t *testing.T) { ci.Parallel(t) - + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) diff --git a/nomad/structs/diff.go b/nomad/structs/diff.go index 8c16123e1c8..77f511f71b5 100644 --- a/nomad/structs/diff.go +++ b/nomad/structs/diff.go @@ -1649,6 +1649,39 @@ func waitConfigDiff(old, new *WaitConfig, contextual bool) *ObjectDiff { return diff } +// changeScriptDiff returns the diff of two ChangeScript objects. If contextual +// diff is enabled, all fields will be returned, even if no diff occurred. +func changeScriptDiff(old, new *ChangeScript, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "ChangeScript"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &ChangeScript{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &ChangeScript{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Args diffs + if setDiff := stringSetDiff(old.Args, new.Args, "Args", contextual); setDiff != nil { + diff.Objects = append(diff.Objects, setDiff) + } + + return diff +} + // templateDiff returns the diff of two Consul Template objects. If contextual diff is // enabled, all fields will be returned, even if no diff occurred. func templateDiff(old, new *Template, contextual bool) *ObjectDiff { @@ -1671,6 +1704,24 @@ func templateDiff(old, new *Template, contextual bool) *ObjectDiff { newPrimitiveFlat = flatmap.Flatten(new, nil, true) } + // Add the pointer primitive fields. + if old != nil { + if old.Uid != nil { + oldPrimitiveFlat["Uid"] = fmt.Sprintf("%v", *old.Uid) + } + if old.Gid != nil { + oldPrimitiveFlat["Gid"] = fmt.Sprintf("%v", *old.Gid) + } + } + if new != nil { + if new.Uid != nil { + newPrimitiveFlat["Uid"] = fmt.Sprintf("%v", *new.Uid) + } + if new.Gid != nil { + newPrimitiveFlat["Gid"] = fmt.Sprintf("%v", *new.Gid) + } + } + // Diff the primitive fields. diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) @@ -1679,6 +1730,13 @@ func templateDiff(old, new *Template, contextual bool) *ObjectDiff { diff.Objects = append(diff.Objects, waitDiffs) } + // ChangeScript diffs + if changeScriptDiffs := changeScriptDiff( + old.ChangeScript, new.ChangeScript, contextual, + ); changeScriptDiffs != nil { + diff.Objects = append(diff.Objects, changeScriptDiffs) + } + return diff } diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index 1a5751a8c7e..6eca9ab99cc 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -2696,7 +2696,7 @@ func TestTaskGroupDiff(t *testing.T) { }, Gateway: &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: false, EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "service1": { @@ -2778,7 +2778,7 @@ func TestTaskGroupDiff(t *testing.T) { LocalBindPort: 8000, Datacenter: "dc2", LocalBindAddress: "127.0.0.2", - MeshGateway: &ConsulMeshGateway{ + MeshGateway: ConsulMeshGateway{ Mode: "remote", }, }, @@ -2790,7 +2790,7 @@ func TestTaskGroupDiff(t *testing.T) { }, Gateway: &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "service1": { @@ -3736,10 +3736,10 @@ func TestTaskGroupDiff(t *testing.T) { { TestCase: "TaskGroup shutdown_delay edited", Old: &TaskGroup{ - ShutdownDelay: helper.TimeToPtr(30 * time.Second), + ShutdownDelay: pointer.Of(30 * time.Second), }, New: &TaskGroup{ - ShutdownDelay: helper.TimeToPtr(5 * time.Second), + ShutdownDelay: pointer.Of(5 * time.Second), }, Expected: &TaskGroupDiff{ Type: DiffTypeEdited, @@ -3756,7 +3756,7 @@ func TestTaskGroupDiff(t *testing.T) { { TestCase: "TaskGroup shutdown_delay removed", Old: &TaskGroup{ - ShutdownDelay: helper.TimeToPtr(30 * time.Second), + ShutdownDelay: pointer.Of(30 * time.Second), }, New: &TaskGroup{}, Expected: &TaskGroupDiff{ @@ -3775,7 +3775,7 @@ func TestTaskGroupDiff(t *testing.T) { TestCase: "TaskGroup shutdown_delay added", Old: &TaskGroup{}, New: &TaskGroup{ - ShutdownDelay: helper.TimeToPtr(30 * time.Second), + ShutdownDelay: pointer.Of(30 * time.Second), }, Expected: &TaskGroupDiff{ Type: DiffTypeEdited, @@ -3943,7 +3943,7 @@ func TestTaskGroupDiff(t *testing.T) { }, New: &TaskGroup{ Name: "foo", - MaxClientDisconnect: helper.TimeToPtr(20 * time.Second), + MaxClientDisconnect: pointer.Of(20 * time.Second), }, Expected: &TaskGroupDiff{ Type: DiffTypeEdited, @@ -3962,11 +3962,11 @@ func TestTaskGroupDiff(t *testing.T) { TestCase: "MaxClientDisconnect updated", Old: &TaskGroup{ Name: "foo", - MaxClientDisconnect: helper.TimeToPtr(10 * time.Second), + MaxClientDisconnect: pointer.Of(10 * time.Second), }, New: &TaskGroup{ Name: "foo", - MaxClientDisconnect: helper.TimeToPtr(20 * time.Second), + MaxClientDisconnect: pointer.Of(20 * time.Second), }, Expected: &TaskGroupDiff{ Type: DiffTypeEdited, @@ -3985,7 +3985,7 @@ func TestTaskGroupDiff(t *testing.T) { TestCase: "MaxClientDisconnect deleted", Old: &TaskGroup{ Name: "foo", - MaxClientDisconnect: helper.TimeToPtr(10 * time.Second), + MaxClientDisconnect: pointer.Of(10 * time.Second), }, New: &TaskGroup{ Name: "foo", @@ -7042,11 +7042,19 @@ func TestTaskDiff(t *testing.T) { EmbeddedTmpl: "baz", ChangeMode: "bam", ChangeSignal: "SIGHUP", - Splay: 1, - Perms: "0644", + ChangeScript: &ChangeScript{ + Command: "/bin/foo", + Args: []string{"-debug"}, + Timeout: 5, + FailOnError: false, + }, + Splay: 1, + Perms: "0644", + Uid: pointer.Of(1001), + Gid: pointer.Of(21), Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, { @@ -7055,9 +7063,17 @@ func TestTaskDiff(t *testing.T) { EmbeddedTmpl: "baz2", ChangeMode: "bam2", ChangeSignal: "SIGHUP2", - Splay: 2, - Perms: "0666", - Envvars: true, + ChangeScript: &ChangeScript{ + Command: "/bin/foo2", + Args: []string{"-debugs"}, + Timeout: 6, + FailOnError: false, + }, + Splay: 2, + Perms: "0666", + Uid: pointer.Of(1000), + Gid: pointer.Of(20), + Envvars: true, }, }, }, @@ -7069,11 +7085,19 @@ func TestTaskDiff(t *testing.T) { EmbeddedTmpl: "baz new", ChangeMode: "bam", ChangeSignal: "SIGHUP", - Splay: 1, - Perms: "0644", + ChangeScript: &ChangeScript{ + Command: "/bin/foo", + Args: []string{"-debug"}, + Timeout: 5, + FailOnError: false, + }, + Splay: 1, + Perms: "0644", + Uid: pointer.Of(1001), + Gid: pointer.Of(21), Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, { @@ -7082,11 +7106,19 @@ func TestTaskDiff(t *testing.T) { EmbeddedTmpl: "baz3", ChangeMode: "bam3", ChangeSignal: "SIGHUP3", - Splay: 3, - Perms: "0776", + ChangeScript: &ChangeScript{ + Command: "/bin/foo3", + Args: []string{"-debugss"}, + Timeout: 7, + FailOnError: false, + }, + Splay: 3, + Perms: "0776", + Uid: pointer.Of(1002), + Gid: pointer.Of(22), Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, }, @@ -7154,6 +7186,12 @@ func TestTaskDiff(t *testing.T) { Old: "", New: "false", }, + { + Type: DiffTypeAdded, + Name: "Gid", + Old: "", + New: "22", + }, { Type: DiffTypeAdded, Name: "Perms", @@ -7172,6 +7210,12 @@ func TestTaskDiff(t *testing.T) { Old: "", New: "3", }, + { + Type: DiffTypeAdded, + Name: "Uid", + Old: "", + New: "1002", + }, { Type: DiffTypeAdded, Name: "VaultGrace", @@ -7198,6 +7242,44 @@ func TestTaskDiff(t *testing.T) { }, }, }, + { + Type: DiffTypeAdded, + Name: "ChangeScript", + Fields: []*FieldDiff{ + { + Type: DiffTypeAdded, + Name: "Command", + Old: "", + New: "/bin/foo3", + }, + { + Type: DiffTypeAdded, + Name: "FailOnError", + Old: "", + New: "false", + }, + { + Type: DiffTypeAdded, + Name: "Timeout", + Old: "", + New: "7", + }, + }, + Objects: []*ObjectDiff{ + { + Type: DiffTypeAdded, + Name: "Args", + Fields: []*FieldDiff{ + { + Type: DiffTypeAdded, + Name: "Args", + Old: "", + New: "-debugss", + }, + }, + }, + }, + }, }, }, { @@ -7234,6 +7316,12 @@ func TestTaskDiff(t *testing.T) { Old: "true", New: "", }, + { + Type: DiffTypeDeleted, + Name: "Gid", + Old: "20", + New: "", + }, { Type: DiffTypeDeleted, Name: "Perms", @@ -7252,6 +7340,12 @@ func TestTaskDiff(t *testing.T) { Old: "2", New: "", }, + { + Type: DiffTypeDeleted, + Name: "Uid", + Old: "1000", + New: "", + }, { Type: DiffTypeDeleted, Name: "VaultGrace", @@ -7259,6 +7353,46 @@ func TestTaskDiff(t *testing.T) { New: "", }, }, + Objects: []*ObjectDiff{ + { + Type: DiffTypeDeleted, + Name: "ChangeScript", + Fields: []*FieldDiff{ + { + Type: DiffTypeDeleted, + Name: "Command", + Old: "/bin/foo2", + New: "", + }, + { + Type: DiffTypeDeleted, + Name: "FailOnError", + Old: "false", + New: "", + }, + { + Type: DiffTypeDeleted, + Name: "Timeout", + Old: "6", + New: "", + }, + }, + Objects: []*ObjectDiff{ + { + Type: DiffTypeDeleted, + Name: "Args", + Fields: []*FieldDiff{ + { + Type: DiffTypeDeleted, + Name: "Args", + Old: "-debugs", + New: "", + }, + }, + }, + }, + }, + }, }, }, }, diff --git a/nomad/structs/node.go b/nomad/structs/node.go index 8bbdc7a4139..1dd7ed87cf8 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -12,29 +12,29 @@ import ( // "zone", "rack", etc. // // According to CSI, there are a few requirements for the keys within this map: -// - Valid keys have two segments: an OPTIONAL prefix and name, separated -// by a slash (/), for example: "com.company.example/zone". -// - The key name segment is REQUIRED. The prefix is OPTIONAL. -// - The key name MUST be 63 characters or less, begin and end with an -// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), -// underscores (_), dots (.), or alphanumerics in between, for example -// "zone". -// - The key prefix MUST be 63 characters or less, begin and end with a -// lower-case alphanumeric character ([a-z0-9]), contain only -// dashes (-), dots (.), or lower-case alphanumerics in between, and -// follow domain name notation format -// (https://tools.ietf.org/html/rfc1035#section-2.3.1). -// - The key prefix SHOULD include the plugin's host company name and/or -// the plugin name, to minimize the possibility of collisions with keys -// from other plugins. -// - If a key prefix is specified, it MUST be identical across all -// topology keys returned by the SP (across all RPCs). -// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" -// MUST not both exist. -// - Each value (topological segment) MUST contain 1 or more strings. -// - Each string MUST be 63 characters or less and begin and end with an -// alphanumeric character with '-', '_', '.', or alphanumerics in -// between. +// - Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// - The key name segment is REQUIRED. The prefix is OPTIONAL. +// - The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// - The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// - The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// - If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// - Each value (topological segment) MUST contain 1 or more strings. +// - Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. // // However, Nomad applies lighter restrictions to these, as they are already // only referenced by plugin within the scheduler and as such collisions and diff --git a/nomad/structs/node_class_test.go b/nomad/structs/node_class_test.go index 77faca36ba8..3975f0038aa 100644 --- a/nomad/structs/node_class_test.go +++ b/nomad/structs/node_class_test.go @@ -217,7 +217,7 @@ func TestNode_ComputedClass_Meta(t *testing.T) { func TestNode_EscapedConstraints(t *testing.T) { ci.Parallel(t) - + // Non-escaped constraints ne1 := &Constraint{ LTarget: "${attr.kernel.name}", diff --git a/nomad/structs/operator.go b/nomad/structs/operator.go index 28452a312f5..a6cfced9c71 100644 --- a/nomad/structs/operator.go +++ b/nomad/structs/operator.go @@ -125,6 +125,15 @@ type AutopilotConfig struct { ModifyIndex uint64 } +func (a *AutopilotConfig) Copy() *AutopilotConfig { + if a == nil { + return nil + } + + na := *a + return &na +} + // SchedulerAlgorithm is an enum string that encapsulates the valid options for a // SchedulerConfiguration stanza's SchedulerAlgorithm. These modes will allow the // scheduler to be user-selectable. @@ -167,6 +176,15 @@ type SchedulerConfiguration struct { ModifyIndex uint64 } +func (s *SchedulerConfiguration) Copy() *SchedulerConfiguration { + if s == nil { + return s + } + + ns := *s + return &ns +} + func (s *SchedulerConfiguration) EffectiveSchedulerAlgorithm() SchedulerAlgorithm { if s == nil || s.SchedulerAlgorithm == "" { return SchedulerAlgorithmBinpack diff --git a/nomad/structs/search.go b/nomad/structs/search.go index 1ba8b78db3b..52cdb43e540 100644 --- a/nomad/structs/search.go +++ b/nomad/structs/search.go @@ -59,6 +59,15 @@ type SearchConfig struct { MinTermLength int `hcl:"min_term_length"` } +func (s *SearchConfig) Copy() *SearchConfig { + if s == nil { + return nil + } + + ns := *s + return &ns +} + // SearchResponse is used to return matches and information about whether // the match list is truncated specific to each type of Context. type SearchResponse struct { @@ -94,7 +103,8 @@ type SearchRequest struct { // ID. // // e.g. A Task-level service would have scope like, -// ["", "", "", ""] +// +// ["", "", "", ""] type FuzzyMatch struct { ID string // ID is UUID or Name of object Scope []string `json:",omitempty"` // IDs of parent objects diff --git a/nomad/structs/secure_variables.go b/nomad/structs/secure_variables.go index d5af53ce66a..dead3c1b055 100644 --- a/nomad/structs/secure_variables.go +++ b/nomad/structs/secure_variables.go @@ -17,19 +17,13 @@ import ( ) const ( - // SecureVariablesUpsertRPCMethod is the RPC method for upserting - // secure variables into Nomad state. + // SecureVariablesApplyRPCMethod is the RPC method for upserting or + // deleting a secure variable by its namespace and path, with optional + // conflict detection. // - // Args: SecureVariablesUpsertRequest - // Reply: SecureVariablesUpsertResponse - SecureVariablesUpsertRPCMethod = "SecureVariables.Upsert" - - // SecureVariablesDeleteRPCMethod is the RPC method for deleting - // a secure variable by its namespace and path. - // - // Args: SecureVariablesDeleteRequest - // Reply: SecureVariablesDeleteResponse - SecureVariablesDeleteRPCMethod = "SecureVariables.Delete" + // Args: SecureVariablesApplyRequest + // Reply: SecureVariablesApplyResponse + SecureVariablesApplyRPCMethod = "SecureVariables.Apply" // SecureVariablesListRPCMethod is the RPC method for listing secure // variables within Nomad. @@ -214,13 +208,14 @@ func (sv SecureVariableMetadata) GetCreateIndex() uint64 { return sv.CreateIndex } -// SecureVariablesQuota is used to track the total size of secure -// variables entries per namespace. The total length of -// SecureVariable.EncryptedData will be added to the SecureVariablesQuota -// table in the same transaction as a write, update, or delete. +// SecureVariablesQuota is used to track the total size of secure variables +// entries per namespace. The total length of SecureVariable.EncryptedData in +// bytes will be added to the SecureVariablesQuota table in the same transaction +// as a write, update, or delete. This tracking effectively caps the maximum +// size of secure variables in a given namespace to MaxInt64 bytes. type SecureVariablesQuota struct { Namespace string - Size uint64 + Size int64 CreateIndex uint64 ModifyIndex uint64 } @@ -234,26 +229,127 @@ func (svq *SecureVariablesQuota) Copy() *SecureVariablesQuota { return nq } -type SecureVariablesUpsertRequest struct { - Data []*SecureVariableDecrypted - CheckIndex *uint64 +// --------------------------------------- +// RPC and FSM request/response objects + +// SVOp constants give possible operations available in a transaction. +type SVOp string + +const ( + SVOpSet SVOp = "set" + SVOpDelete SVOp = "delete" + SVOpDeleteCAS SVOp = "delete-cas" + SVOpCAS SVOp = "cas" +) + +// SVOpResult constants give possible operations results from a transaction. +type SVOpResult string + +const ( + SVOpResultOk SVOpResult = "ok" + SVOpResultConflict SVOpResult = "conflict" + SVOpResultRedacted SVOpResult = "conflict-redacted" + SVOpResultError SVOpResult = "error" +) + +// SecureVariablesApplyRequest is used by users to operate on the secure variable store +type SecureVariablesApplyRequest struct { + Op SVOp // Operation to be performed during apply + Var *SecureVariableDecrypted // Variable-shaped request data WriteRequest } -func (svur *SecureVariablesUpsertRequest) SetCheckIndex(ci uint64) { - svur.CheckIndex = &ci +// SecureVariablesApplyResponse is sent back to the user to inform them of success or failure +type SecureVariablesApplyResponse struct { + Op SVOp // Operation performed + Input *SecureVariableDecrypted // Input supplied + Result SVOpResult // Return status from operation + Error error // Error if any + Conflict *SecureVariableDecrypted // Conflicting value if applicable + Output *SecureVariableDecrypted // Operation Result if successful; nil for successful deletes + WriteMeta +} + +func (r *SecureVariablesApplyResponse) IsOk() bool { + return r.Result == SVOpResultOk } -type SecureVariablesEncryptedUpsertRequest struct { - Data []*SecureVariableEncrypted +func (r *SecureVariablesApplyResponse) IsConflict() bool { + return r.Result == SVOpResultConflict || r.Result == SVOpResultRedacted +} + +func (r *SecureVariablesApplyResponse) IsError() bool { + return r.Result == SVOpResultError +} + +func (r *SecureVariablesApplyResponse) IsRedacted() bool { + return r.Result == SVOpResultRedacted +} + +// SVApplyStateRequest is used by the FSM to modify the secure variable store +type SVApplyStateRequest struct { + Op SVOp // Which operation are we performing + Var *SecureVariableEncrypted // Which directory entry WriteRequest } -type SecureVariablesUpsertResponse struct { - Conflicts []*SecureVariableDecrypted +// SVApplyStateResponse is used by the FSM to inform the RPC layer of success or failure +type SVApplyStateResponse struct { + Op SVOp // Which operation were we performing + Result SVOpResult // What happened (ok, conflict, error) + Error error // error if any + Conflict *SecureVariableEncrypted // conflicting secure variable if applies + WrittenSVMeta *SecureVariableMetadata // for making the SecureVariablesApplyResponse WriteMeta } +func (r *SVApplyStateRequest) ErrorResponse(raftIndex uint64, err error) *SVApplyStateResponse { + return &SVApplyStateResponse{ + Op: r.Op, + Result: SVOpResultError, + Error: err, + WriteMeta: WriteMeta{Index: raftIndex}, + } +} + +func (r *SVApplyStateRequest) SuccessResponse(raftIndex uint64, meta *SecureVariableMetadata) *SVApplyStateResponse { + return &SVApplyStateResponse{ + Op: r.Op, + Result: SVOpResultOk, + WrittenSVMeta: meta, + WriteMeta: WriteMeta{Index: raftIndex}, + } +} + +func (r *SVApplyStateRequest) ConflictResponse(raftIndex uint64, cv *SecureVariableEncrypted) *SVApplyStateResponse { + var cvCopy SecureVariableEncrypted + if cv != nil { + // make a copy so that we aren't sending + // the live state store version + cvCopy = cv.Copy() + } + return &SVApplyStateResponse{ + Op: r.Op, + Result: SVOpResultConflict, + Conflict: &cvCopy, + WriteMeta: WriteMeta{Index: raftIndex}, + } +} + +func (r *SVApplyStateResponse) IsOk() bool { + return r.Result == SVOpResultOk +} + +func (r *SVApplyStateResponse) IsConflict() bool { + return r.Result == SVOpResultConflict +} + +func (r *SVApplyStateResponse) IsError() bool { + // FIXME: This is brittle and requires immense faith that + // the response is properly managed. + return r.Result == SVOpResultError +} + type SecureVariablesListRequest struct { QueryOptions } @@ -273,20 +369,8 @@ type SecureVariablesReadResponse struct { QueryMeta } -type SecureVariablesDeleteRequest struct { - Path string - CheckIndex *uint64 - WriteRequest -} - -func (svdr *SecureVariablesDeleteRequest) SetCheckIndex(ci uint64) { - svdr.CheckIndex = &ci -} - -type SecureVariablesDeleteResponse struct { - Conflict *SecureVariableDecrypted - WriteMeta -} +// --------------------------------------- +// Keyring state and RPC objects // RootKey is used to encrypt and decrypt secure variables. It is // never stored in raft. diff --git a/nomad/structs/services.go b/nomad/structs/services.go index 757c3185dd3..38ddd5970a3 100644 --- a/nomad/structs/services.go +++ b/nomad/structs/services.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/args" + "github.com/hashicorp/nomad/helper/pointer" "github.com/mitchellh/copystructure" "golang.org/x/exp/slices" ) @@ -193,13 +194,14 @@ func (sc *ServiceCheck) Equals(o *ServiceCheck) bool { return true } -func (sc *ServiceCheck) Canonicalize(serviceName string) { +func (sc *ServiceCheck) Canonicalize(serviceName, taskName string) { // Ensure empty maps/slices are treated as null to avoid scheduling // issues when using DeepEquals. if len(sc.Args) == 0 { sc.Args = nil } + // Ensure empty slices are nil if len(sc.Header) == 0 { sc.Header = nil } else { @@ -210,10 +212,17 @@ func (sc *ServiceCheck) Canonicalize(serviceName string) { } } + // Ensure a default name for the check if sc.Name == "" { sc.Name = fmt.Sprintf("service: %q check", serviceName) } + // Set task name if not already set + if sc.TaskName == "" && taskName != "group" { + sc.TaskName = taskName + } + + // Ensure OnUpdate defaults to require_healthy (i.e. healthiness check) if sc.OnUpdate == "" { sc.OnUpdate = OnUpdateRequireHealthy } @@ -344,17 +353,8 @@ func (sc *ServiceCheck) validateNomad() error { } if sc.Type == "http" { - if sc.Method != "" && sc.Method != "GET" { - // unset turns into GET - return fmt.Errorf("http checks may only use GET method in Nomad services") - } - - if len(sc.Header) > 0 { - return fmt.Errorf("http checks may not set headers in Nomad services") - } - - if len(sc.Body) > 0 { - return fmt.Errorf("http checks may not set Body in Nomad services") + if sc.Method != "" && !helper.IsMethodHTTP(sc.Method) { + return fmt.Errorf("method type %q not supported in Nomad http check", sc.Method) } } @@ -534,9 +534,10 @@ type Service struct { Name string // Name of the Task associated with this service. - // - // Currently only used to identify the implementing task of a Consul - // Connect Native enabled service. + // Group services do not have a task name, unless they are a connect native + // service specifying the task implementing the service. + // Task-level services automatically have the task name plumbed through + // down to checks for convenience. TaskName string // PortLabel is either the numeric port number or the `host:port`. @@ -632,6 +633,11 @@ func (s *Service) Canonicalize(job, taskGroup, task, jobNamespace string) { s.TaggedAddresses = nil } + // Set the task name if not already set + if s.TaskName == "" && task != "group" { + s.TaskName = task + } + s.Name = args.ReplaceEnv(s.Name, map[string]string{ "JOB": job, "TASKGROUP": taskGroup, @@ -640,7 +646,7 @@ func (s *Service) Canonicalize(job, taskGroup, task, jobNamespace string) { }) for _, check := range s.Checks { - check.Canonicalize(s.Name) + check.Canonicalize(s.Name, s.TaskName) } // Set the provider to its default value. The value of consul ensures this @@ -900,20 +906,7 @@ func (s *Service) Equals(o *Service) bool { return false } - if len(s.Checks) != len(o.Checks) { - return false - } - -OUTER: - for i := range s.Checks { - for ii := range o.Checks { - if s.Checks[i].Equals(o.Checks[ii]) { - // Found match; continue with next check - continue OUTER - } - } - - // No match + if !helper.ElementsEquals(s.Checks, o.Checks) { return false } @@ -1255,11 +1248,11 @@ func (t *SidecarTask) Copy() *SidecarTask { } if t.KillTimeout != nil { - nt.KillTimeout = helper.TimeToPtr(*t.KillTimeout) + nt.KillTimeout = pointer.Of(*t.KillTimeout) } if t.ShutdownDelay != nil { - nt.ShutdownDelay = helper.TimeToPtr(*t.ShutdownDelay) + nt.ShutdownDelay = pointer.Of(*t.ShutdownDelay) } return nt @@ -1452,21 +1445,13 @@ type ConsulMeshGateway struct { Mode string } -func (c *ConsulMeshGateway) Copy() *ConsulMeshGateway { - if c == nil { - return nil - } - - return &ConsulMeshGateway{ +func (c *ConsulMeshGateway) Copy() ConsulMeshGateway { + return ConsulMeshGateway{ Mode: c.Mode, } } -func (c *ConsulMeshGateway) Equals(o *ConsulMeshGateway) bool { - if c == nil || o == nil { - return c == o - } - +func (c *ConsulMeshGateway) Equals(o ConsulMeshGateway) bool { return c.Mode == o.Mode } @@ -1504,7 +1489,7 @@ type ConsulUpstream struct { // MeshGateway is the optional configuration of the mesh gateway for this // upstream to use. - MeshGateway *ConsulMeshGateway + MeshGateway ConsulMeshGateway } func upstreamsEquals(a, b []ConsulUpstream) bool { @@ -1785,7 +1770,7 @@ func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy { } return &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(*p.ConnectTimeout), + ConnectTimeout: pointer.Of(*p.ConnectTimeout), EnvoyGatewayBindTaggedAddresses: p.EnvoyGatewayBindTaggedAddresses, EnvoyGatewayBindAddresses: p.copyBindAddresses(), EnvoyGatewayNoDefaultBind: p.EnvoyGatewayNoDefaultBind, diff --git a/nomad/structs/services_test.go b/nomad/structs/services_test.go index b44c9baefb8..93954769435 100644 --- a/nomad/structs/services_test.go +++ b/nomad/structs/services_test.go @@ -7,9 +7,8 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/shoenig/test/must" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -57,6 +56,41 @@ func TestServiceCheck_Hash(t *testing.T) { }) } +func TestServiceCheck_Canonicalize(t *testing.T) { + ci.Parallel(t) + + t.Run("defaults", func(t *testing.T) { + sc := &ServiceCheck{ + Args: []string{}, + Header: make(map[string][]string), + Method: "", + OnUpdate: "", + } + sc.Canonicalize("MyService", "task1") + must.Nil(t, sc.Args) + must.Nil(t, sc.Header) + must.Eq(t, `service: "MyService" check`, sc.Name) + must.Eq(t, "", sc.Method) + must.Eq(t, OnUpdateRequireHealthy, sc.OnUpdate) + }) + + t.Run("check name set", func(t *testing.T) { + sc := &ServiceCheck{ + Name: "Some Check", + } + sc.Canonicalize("MyService", "task1") + must.Eq(t, "Some Check", sc.Name) + }) + + t.Run("on_update is set", func(t *testing.T) { + sc := &ServiceCheck{ + OnUpdate: OnUpdateIgnore, + } + sc.Canonicalize("MyService", "task1") + must.Eq(t, OnUpdateIgnore, sc.OnUpdate) + }) +} + func TestServiceCheck_validate_PassingTypes(t *testing.T) { ci.Parallel(t) @@ -268,7 +302,17 @@ func TestServiceCheck_validateNomad(t *testing.T) { Path: "/health", Method: "HEAD", }, - exp: `http checks may only use GET method in Nomad services`, + }, + { + name: "http unknown method type", + sc: &ServiceCheck{ + Type: ServiceCheckHTTP, + Interval: 3 * time.Second, + Timeout: 1 * time.Second, + Path: "/health", + Method: "Invalid", + }, + exp: `method type "Invalid" not supported in Nomad http check`, }, { name: "http with headers", @@ -278,9 +322,11 @@ func TestServiceCheck_validateNomad(t *testing.T) { Timeout: 1 * time.Second, Path: "/health", Method: "GET", - Header: map[string][]string{"foo": {"bar"}}, + Header: map[string][]string{ + "foo": {"bar"}, + "baz": nil, + }, }, - exp: `http checks may not set headers in Nomad services`, }, { name: "http with body", @@ -289,10 +335,9 @@ func TestServiceCheck_validateNomad(t *testing.T) { Interval: 3 * time.Second, Timeout: 1 * time.Second, Path: "/health", - Method: "GET", - Body: "blah", + Method: "POST", + Body: "this is a request payload!", }, - exp: `http checks may not set Body in Nomad services`, }, } @@ -486,7 +531,7 @@ func TestConsulConnect_GatewayProxy_CopyEquals(t *testing.T) { ci.Parallel(t) c := &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: false, EnvoyGatewayBindAddresses: make(map[string]*ConsulGatewayBindAddress), } @@ -520,11 +565,11 @@ func TestSidecarTask_MergeIntoTask(t *testing.T) { Meta: map[string]string{ "abc": "123", }, - KillTimeout: helper.TimeToPtr(15 * time.Second), + KillTimeout: pointer.Of(15 * time.Second), LogConfig: &LogConfig{ MaxFiles: 3, }, - ShutdownDelay: helper.TimeToPtr(5 * time.Second), + ShutdownDelay: pointer.Of(5 * time.Second), KillSignal: "SIGABRT", } @@ -566,12 +611,12 @@ func TestSidecarTask_Equals(t *testing.T) { Env: map[string]string{"color": "blue"}, Resources: &Resources{MemoryMB: 300}, Meta: map[string]string{"index": "1"}, - KillTimeout: helper.TimeToPtr(2 * time.Second), + KillTimeout: pointer.Of(2 * time.Second), LogConfig: &LogConfig{ MaxFiles: 2, MaxFileSizeMB: 300, }, - ShutdownDelay: helper.TimeToPtr(10 * time.Second), + ShutdownDelay: pointer.Of(10 * time.Second), KillSignal: "SIGTERM", } @@ -618,7 +663,7 @@ func TestSidecarTask_Equals(t *testing.T) { }) t.Run("mod kill timeout", func(t *testing.T) { - try(t, func(s *st) { s.KillTimeout = helper.TimeToPtr(3 * time.Second) }) + try(t, func(s *st) { s.KillTimeout = pointer.Of(3 * time.Second) }) }) t.Run("mod log config", func(t *testing.T) { @@ -626,7 +671,7 @@ func TestSidecarTask_Equals(t *testing.T) { }) t.Run("mod shutdown delay", func(t *testing.T) { - try(t, func(s *st) { s.ShutdownDelay = helper.TimeToPtr(20 * time.Second) }) + try(t, func(s *st) { s.ShutdownDelay = pointer.Of(20 * time.Second) }) }) t.Run("mod kill signal", func(t *testing.T) { @@ -667,8 +712,8 @@ func TestConsulUpstream_upstreamEquals(t *testing.T) { }) t.Run("different mesh_gateway", func(t *testing.T) { - a := []ConsulUpstream{{DestinationName: "foo", MeshGateway: &ConsulMeshGateway{Mode: "local"}}} - b := []ConsulUpstream{{DestinationName: "foo", MeshGateway: &ConsulMeshGateway{Mode: "remote"}}} + a := []ConsulUpstream{{DestinationName: "foo", MeshGateway: ConsulMeshGateway{Mode: "local"}}} + b := []ConsulUpstream{{DestinationName: "foo", MeshGateway: ConsulMeshGateway{Mode: "remote"}}} require.False(t, upstreamsEquals(a, b)) }) @@ -779,7 +824,7 @@ func TestConsulSidecarService_Copy(t *testing.T) { var ( consulIngressGateway1 = &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "listener1": {Address: "10.0.0.1", Port: 2001}, @@ -816,7 +861,7 @@ var ( consulTerminatingGateway1 = &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyDNSDiscoveryType: "STRICT_DNS", EnvoyGatewayBindAddresses: nil, }, @@ -835,7 +880,7 @@ var ( consulMeshGateway1 = &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), }, Mesh: &ConsulMeshConfigEntry{ // nothing @@ -940,7 +985,7 @@ func TestConsulGateway_Equals_ingress(t *testing.T) { // proxy stanza equality checks t.Run("mod gateway timeout", func(t *testing.T) { - try(t, func(g *cg) { g.Proxy.ConnectTimeout = helper.TimeToPtr(9 * time.Second) }) + try(t, func(g *cg) { g.Proxy.ConnectTimeout = pointer.Of(9 * time.Second) }) }) t.Run("mod gateway envoy_gateway_bind_tagged_addresses", func(t *testing.T) { @@ -1222,7 +1267,7 @@ func TestConsulGatewayProxy_Validate(t *testing.T) { t.Run("invalid bind address", func(t *testing.T) { err := (&ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "service1": { Address: "10.0.0.1", @@ -1234,7 +1279,7 @@ func TestConsulGatewayProxy_Validate(t *testing.T) { t.Run("invalid dns discovery type", func(t *testing.T) { err := (&ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyDNSDiscoveryType: "RANDOM_DNS", }).Validate() require.EqualError(t, err, "Consul Gateway Proxy Envoy DNS Discovery type must be STRICT_DNS or LOGICAL_DNS") @@ -1242,14 +1287,14 @@ func TestConsulGatewayProxy_Validate(t *testing.T) { t.Run("ok with nothing set", func(t *testing.T) { err := (&ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), }).Validate() require.NoError(t, err) }) t.Run("ok with everything set", func(t *testing.T) { err := (&ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "service1": { Address: "10.0.0.1", @@ -1592,11 +1637,11 @@ func TestConsulMeshGateway_Copy(t *testing.T) { func TestConsulMeshGateway_Equals(t *testing.T) { ci.Parallel(t) - c := &ConsulMeshGateway{Mode: "local"} - require.False(t, c.Equals(nil)) + c := ConsulMeshGateway{Mode: "local"} + require.False(t, c.Equals(ConsulMeshGateway{})) require.True(t, c.Equals(c)) - o := &ConsulMeshGateway{Mode: "remote"} + o := ConsulMeshGateway{Mode: "remote"} require.False(t, c.Equals(o)) } @@ -1629,24 +1674,24 @@ func TestService_Validate(t *testing.T) { name string }{ { + name: "base service", input: &Service{ Name: "testservice", }, expErr: false, - name: "base service", }, { + name: "Native Connect without task name", input: &Service{ Name: "testservice", Connect: &ConsulConnect{ Native: true, }, }, - expErr: true, - expErrStr: "Connect Native and requires setting the task", - name: "Native Connect without task name", + expErr: false, // gets set automatically }, { + name: "Native Connect with task name", input: &Service{ Name: "testservice", TaskName: "testtask", @@ -1655,9 +1700,9 @@ func TestService_Validate(t *testing.T) { }, }, expErr: false, - name: "Native Connect with task name", }, { + name: "Native Connect with Sidecar", input: &Service{ Name: "testservice", TaskName: "testtask", @@ -1668,9 +1713,9 @@ func TestService_Validate(t *testing.T) { }, expErr: true, expErrStr: "Consul Connect must be exclusively native", - name: "Native Connect with Sidecar", }, { + name: "provider nomad with checks", input: &Service{ Name: "testservice", Provider: "nomad", @@ -1692,9 +1737,9 @@ func TestService_Validate(t *testing.T) { }, }, expErr: false, - name: "provider nomad with checks", }, { + name: "provider nomad with invalid check type", input: &Service{ Name: "testservice", Provider: "nomad", @@ -1706,9 +1751,9 @@ func TestService_Validate(t *testing.T) { }, }, expErr: true, - name: "provider nomad with invalid check type", }, { + name: "provider nomad with connect", input: &Service{ Name: "testservice", Provider: "nomad", @@ -1718,15 +1763,14 @@ func TestService_Validate(t *testing.T) { }, expErr: true, expErrStr: "Service with provider nomad cannot include Connect blocks", - name: "provider nomad with connect", }, { + name: "provider nomad valid", input: &Service{ Name: "testservice", Provider: "nomad", }, expErr: false, - name: "provider nomad valid", }, } @@ -1735,10 +1779,10 @@ func TestService_Validate(t *testing.T) { tc.input.Canonicalize("testjob", "testgroup", "testtask", "testnamespace") err := tc.input.Validate() if tc.expErr { - assert.Error(t, err) - assert.Contains(t, err.Error(), tc.expErrStr) + require.Error(t, err) + require.Contains(t, err.Error(), tc.expErrStr) } else { - assert.NoError(t, err) + require.NoError(t, err) } }) } diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 24f91ab7bb4..1d88f7c4333 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -37,6 +37,7 @@ import ( "github.com/hashicorp/nomad/helper/args" "github.com/hashicorp/nomad/helper/constraints/semver" "github.com/hashicorp/nomad/helper/escapingfs" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/lib/cpuset" "github.com/hashicorp/nomad/lib/kheap" @@ -109,12 +110,11 @@ const ( ServiceRegistrationUpsertRequestType MessageType = 47 ServiceRegistrationDeleteByIDRequestType MessageType = 48 ServiceRegistrationDeleteByNodeIDRequestType MessageType = 49 - SecureVariableUpsertRequestType MessageType = 50 - SecureVariableDeleteRequestType MessageType = 51 - RootKeyMetaUpsertRequestType MessageType = 52 - RootKeyMetaDeleteRequestType MessageType = 53 - ACLRolesUpsertRequestType MessageType = 54 - ACLRolesDeleteByIDRequestType MessageType = 55 + SVApplyStateRequestType MessageType = 50 + RootKeyMetaUpsertRequestType MessageType = 51 + RootKeyMetaDeleteRequestType MessageType = 52 + ACLRolesUpsertRequestType MessageType = 53 + ACLRolesDeleteByIDRequestType MessageType = 54 // Namespace types were moved from enterprise and therefore start at 64 NamespaceUpsertRequestType MessageType = 64 @@ -1030,6 +1030,7 @@ type AllocsGetRequest struct { type AllocRestartRequest struct { AllocID string TaskName string + AllTasks bool QueryOptions } @@ -2061,9 +2062,8 @@ func (n *Node) Copy() *Node { if n == nil { return nil } - nn := new(Node) - *nn = *n - nn.Attributes = helper.CopyMapStringString(nn.Attributes) + nn := *n + nn.Attributes = helper.CopyMap(nn.Attributes) nn.NodeResources = nn.NodeResources.Copy() nn.ReservedResources = nn.ReservedResources.Copy() nn.Resources = nn.Resources.Copy() @@ -2071,87 +2071,14 @@ func (n *Node) Copy() *Node { nn.Links = helper.CopyMapStringString(nn.Links) nn.Meta = helper.CopyMapStringString(nn.Meta) nn.DrainStrategy = nn.DrainStrategy.Copy() - nn.Events = copyNodeEvents(n.Events) - nn.Drivers = copyNodeDrivers(n.Drivers) - nn.CSIControllerPlugins = copyNodeCSI(nn.CSIControllerPlugins) - nn.CSINodePlugins = copyNodeCSI(nn.CSINodePlugins) - nn.HostVolumes = copyNodeHostVolumes(n.HostVolumes) - nn.HostNetworks = copyNodeHostNetworks(n.HostNetworks) + nn.Events = helper.CopySlice(n.Events) + nn.Drivers = helper.DeepCopyMap(n.Drivers) + nn.CSIControllerPlugins = helper.DeepCopyMap(nn.CSIControllerPlugins) + nn.CSINodePlugins = helper.DeepCopyMap(nn.CSINodePlugins) + nn.HostVolumes = helper.DeepCopyMap(n.HostVolumes) + nn.HostNetworks = helper.DeepCopyMap(n.HostNetworks) nn.LastDrain = nn.LastDrain.Copy() - return nn -} - -// copyNodeEvents is a helper to copy a list of NodeEvent's -func copyNodeEvents(events []*NodeEvent) []*NodeEvent { - l := len(events) - if l == 0 { - return nil - } - - c := make([]*NodeEvent, l) - for i, event := range events { - c[i] = event.Copy() - } - return c -} - -// copyNodeCSI is a helper to copy a map of CSIInfo -func copyNodeCSI(plugins map[string]*CSIInfo) map[string]*CSIInfo { - l := len(plugins) - if l == 0 { - return nil - } - - c := make(map[string]*CSIInfo, l) - for plugin, info := range plugins { - c[plugin] = info.Copy() - } - - return c -} - -// copyNodeDrivers is a helper to copy a map of DriverInfo -func copyNodeDrivers(drivers map[string]*DriverInfo) map[string]*DriverInfo { - l := len(drivers) - if l == 0 { - return nil - } - - c := make(map[string]*DriverInfo, l) - for driver, info := range drivers { - c[driver] = info.Copy() - } - return c -} - -// copyNodeHostVolumes is a helper to copy a map of string to Volume -func copyNodeHostVolumes(volumes map[string]*ClientHostVolumeConfig) map[string]*ClientHostVolumeConfig { - l := len(volumes) - if l == 0 { - return nil - } - - c := make(map[string]*ClientHostVolumeConfig, l) - for volume, v := range volumes { - c[volume] = v.Copy() - } - - return c -} - -// copyNodeHostVolumes is a helper to copy a map of string to HostNetwork -func copyNodeHostNetworks(networks map[string]*ClientHostNetworkConfig) map[string]*ClientHostNetworkConfig { - l := len(networks) - if l == 0 { - return nil - } - - c := make(map[string]*ClientHostNetworkConfig, l) - for network, v := range networks { - c[network] = v.Copy() - } - - return c + return &nn } // TerminalStatus returns if the current status is terminal and @@ -6677,10 +6604,19 @@ func (tg *TaskGroup) validateServices() error { for _, service := range task.Services { - // Ensure no task-level checks specify a task + // Ensure no task-level service can only specify the task it belongs to. + if service.TaskName != "" && service.TaskName != task.Name { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Service %s is invalid: may only specify task the service belongs to, got %q", service.Name, service.TaskName), + ) + } + + // Ensure no task-level checks can only specify the task they belong to. for _, check := range service.Checks { - if check.TaskName != "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Check %s is invalid: only task group service checks can be assigned tasks", check.Name)) + if check.TaskName != "" && check.TaskName != task.Name { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Check %s is invalid: may only specify task the check belongs to, got %q", check.Name, check.TaskName), + ) } } @@ -7107,6 +7043,25 @@ func (t *Task) UsesConnectSidecar() bool { return t.Kind.IsConnectProxy() || t.Kind.IsAnyConnectGateway() } +func (t *Task) IsPrestart() bool { + return t != nil && t.Lifecycle != nil && + t.Lifecycle.Hook == TaskLifecycleHookPrestart +} + +func (t *Task) IsMain() bool { + return t != nil && (t.Lifecycle == nil || t.Lifecycle.Hook == "") +} + +func (t *Task) IsPoststart() bool { + return t != nil && t.Lifecycle != nil && + t.Lifecycle.Hook == TaskLifecycleHookPoststart +} + +func (t *Task) IsPoststop() bool { + return t != nil && t.Lifecycle != nil && + t.Lifecycle.Hook == TaskLifecycleHookPoststop +} + func (t *Task) Copy() *Task { if t == nil { return nil @@ -7679,14 +7634,32 @@ const ( // TemplateChangeModeRestart marks that the task should be restarted if the // template is re-rendered TemplateChangeModeRestart = "restart" + + // TemplateChangeModeScript marks that the task should trigger a script if + // the template is re-rendered + TemplateChangeModeScript = "script" ) var ( // TemplateChangeModeInvalidError is the error for when an invalid change // mode is given - TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart") + TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, script, restart") ) +// ChangeScript holds the configuration for the script that is executed if +// change mode is set to script +type ChangeScript struct { + // Command is the full path to the script + Command string + // Args is a slice of arguments passed to the script + Args []string + // Timeout is the amount of seconds we wait for the script to finish + Timeout time.Duration + // FailOnError indicates whether a task should fail in case script execution + // fails or log script failure and don't interrupt the task + FailOnError bool +} + // Template represents a template configuration to be rendered for a given task type Template struct { // SourcePath is the path to the template to be rendered @@ -7706,6 +7679,10 @@ type Template struct { // requires it. ChangeSignal string + // ChangeScript is the configuration of the script. It's required if + // ChangeMode is set to script. + ChangeScript *ChangeScript + // Splay is used to avoid coordinated restarts of processes by applying a // random wait between 0 and the given splay value before signalling the // application of a change @@ -7713,6 +7690,9 @@ type Template struct { // Perms is the permission the file should be written out with. Perms string + // User and group that should own the file. + Uid *int + Gid *int // LeftDelim and RightDelim are optional configurations to control what // delimiter is utilized when parsing the template. @@ -7801,6 +7781,10 @@ func (t *Template) Validate() error { if t.Envvars { _ = multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates")) } + case TemplateChangeModeScript: + if t.ChangeScript.Command == "" { + _ = multierror.Append(&mErr, fmt.Errorf("must specify script path value when change mode is script")) + } default: _ = multierror.Append(&mErr, TemplateChangeModeInvalidError) } @@ -7923,7 +7907,7 @@ type AllocState struct { // they are assigned to is down, their state is migrated to the replacement // allocation. // -// Minimal set of fields from plugins/drivers/task_handle.go:TaskHandle +// Minimal set of fields from plugins/drivers/task_handle.go:TaskHandle type TaskHandle struct { // Version of driver state. Used by the driver to gracefully handle // plugin upgrades. @@ -8062,7 +8046,7 @@ const ( // restarted because it has exceeded its restart policy. TaskNotRestarting = "Not Restarting" - // TaskRestartSignal indicates that the task has been signalled to be + // TaskRestartSignal indicates that the task has been signaled to be // restarted TaskRestartSignal = "Restart Signaled" @@ -8106,6 +8090,10 @@ const ( // TaskHookFailed indicates that one of the hooks for a task failed. TaskHookFailed = "Task hook failed" + // TaskHookMessage indicates that one of the hooks for a task emitted a + // message. + TaskHookMessage = "Task hook message" + // TaskRestoreFailed indicates Nomad was unable to reattach to a // restored task. TaskRestoreFailed = "Failed Restoring Task" @@ -8339,6 +8327,9 @@ func (e *TaskEvent) PopulateEventDisplayMessage() { } func (e *TaskEvent) GoString() string { + if e == nil { + return "" + } return fmt.Sprintf("%v - %v", e.Time, e.Type) } @@ -10814,7 +10805,7 @@ func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus { *c = *a if a.Healthy != nil { - c.Healthy = helper.BoolToPtr(*a.Healthy) + c.Healthy = pointer.Of(*a.Healthy) } return c @@ -11792,11 +11783,21 @@ type ACLPolicy struct { Description string // Human readable Rules string // HCL or JSON format RulesJSON *acl.Policy // Generated from Rules on read + JobACL *JobACL Hash []byte + CreateIndex uint64 ModifyIndex uint64 } +// JobACL represents an ACL policy's attachment to a job, group, or task. +type JobACL struct { + Namespace string // namespace of the job + JobID string // ID of the job + Group string // ID of the group + Task string // ID of the task +} + // SetHash is used to compute and set the hash of the ACL policy func (a *ACLPolicy) SetHash() []byte { // Initialize a 256bit Blake2 hash (32 bytes) @@ -11810,6 +11811,13 @@ func (a *ACLPolicy) SetHash() []byte { _, _ = hash.Write([]byte(a.Description)) _, _ = hash.Write([]byte(a.Rules)) + if a.JobACL != nil { + _, _ = hash.Write([]byte(a.JobACL.Namespace)) + _, _ = hash.Write([]byte(a.JobACL.JobID)) + _, _ = hash.Write([]byte(a.JobACL.Group)) + _, _ = hash.Write([]byte(a.JobACL.Task)) + } + // Finalize the hash hashVal := hash.Sum(nil) @@ -11842,6 +11850,21 @@ func (a *ACLPolicy) Validate() error { err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength) mErr.Errors = append(mErr.Errors, err) } + if a.JobACL != nil { + if a.JobACL.JobID != "" && a.JobACL.Namespace == "" { + err := fmt.Errorf("namespace must be set to set job ID") + mErr.Errors = append(mErr.Errors, err) + } + if a.JobACL.Group != "" && a.JobACL.JobID == "" { + err := fmt.Errorf("job ID must be set to set group") + mErr.Errors = append(mErr.Errors, err) + } + if a.JobACL.Task != "" && a.JobACL.Group == "" { + err := fmt.Errorf("group must be set to set task") + mErr.Errors = append(mErr.Errors, err) + } + } + return mErr.ErrorOrNil() } diff --git a/nomad/structs/structs_periodic_test.go b/nomad/structs/structs_periodic_test.go index f828bced45b..48bf536efa2 100644 --- a/nomad/structs/structs_periodic_test.go +++ b/nomad/structs/structs_periodic_test.go @@ -225,7 +225,7 @@ func TestPeriodicConfig_DSTChange_Transitions(t *testing.T) { func TestPeriodConfig_DSTSprintForward_Property(t *testing.T) { ci.Parallel(t) - + locName := "America/Los_Angeles" loc, err := time.LoadLocation(locName) require.NoError(t, err) diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index d05dcd73e88..e14ba8be356 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -11,9 +11,8 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" - "github.com/kr/pretty" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -2580,6 +2579,64 @@ func TestTask_Validate_Template(t *testing.T) { } } +func TestTemplate_Copy(t *testing.T) { + ci.Parallel(t) + + t1 := &Template{ + SourcePath: "/local/file.txt", + DestPath: "/local/dest.txt", + EmbeddedTmpl: "tpl", + ChangeMode: TemplateChangeModeSignal, + ChangeSignal: "SIGHUP", + Splay: 10 * time.Second, + Perms: "777", + Uid: pointer.Of(1000), + Gid: pointer.Of(2000), + LeftDelim: "[[", + RightDelim: "]]", + Envvars: true, + VaultGrace: time.Minute, + Wait: &WaitConfig{ + Min: pointer.Of(time.Second), + Max: pointer.Of(time.Minute), + }, + } + t2 := t1.Copy() + + t1.SourcePath = "/local/file2.txt" + t1.DestPath = "/local/dest2.txt" + t1.EmbeddedTmpl = "tpl2" + t1.ChangeMode = TemplateChangeModeRestart + t1.ChangeSignal = "" + t1.Splay = 5 * time.Second + t1.Perms = "700" + t1.Uid = pointer.Of(5000) + t1.Gid = pointer.Of(6000) + t1.LeftDelim = "((" + t1.RightDelim = "))" + t1.Envvars = false + t1.VaultGrace = 2 * time.Minute + t1.Wait.Min = pointer.Of(2 * time.Second) + t1.Wait.Max = pointer.Of(2 * time.Minute) + + require.NotEqual(t, t1.SourcePath, t2.SourcePath) + require.NotEqual(t, t1.DestPath, t2.DestPath) + require.NotEqual(t, t1.EmbeddedTmpl, t2.EmbeddedTmpl) + require.NotEqual(t, t1.ChangeMode, t2.ChangeMode) + require.NotEqual(t, t1.ChangeSignal, t2.ChangeSignal) + require.NotEqual(t, t1.Splay, t2.Splay) + require.NotEqual(t, t1.Perms, t2.Perms) + require.NotEqual(t, t1.Uid, t2.Uid) + require.NotEqual(t, t1.Gid, t2.Gid) + require.NotEqual(t, t1.LeftDelim, t2.LeftDelim) + require.NotEqual(t, t1.RightDelim, t2.RightDelim) + require.NotEqual(t, t1.Envvars, t2.Envvars) + require.NotEqual(t, t1.VaultGrace, t2.VaultGrace) + require.NotEqual(t, t1.Wait.Min, t2.Wait.Min) + require.NotEqual(t, t1.Wait.Max, t2.Wait.Max) + +} + func TestTemplate_Validate(t *testing.T) { ci.Parallel(t) @@ -2670,8 +2727,8 @@ func TestTemplate_Validate(t *testing.T) { DestPath: "local/foo", ChangeMode: "noop", Wait: &WaitConfig{ - Min: helper.TimeToPtr(10 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(10 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, Fail: true, @@ -2685,8 +2742,8 @@ func TestTemplate_Validate(t *testing.T) { DestPath: "local/foo", ChangeMode: "noop", Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, Fail: false, @@ -2697,8 +2754,8 @@ func TestTemplate_Validate(t *testing.T) { DestPath: "local/foo", ChangeMode: "noop", Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, Fail: false, @@ -2735,12 +2792,12 @@ func TestTaskWaitConfig_Equals(t *testing.T) { { name: "all-fields", config: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, expected: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, { @@ -2751,19 +2808,19 @@ func TestTaskWaitConfig_Equals(t *testing.T) { { name: "min-only", config: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), }, expected: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), }, }, { name: "max-only", config: &WaitConfig{ - Max: helper.TimeToPtr(10 * time.Second), + Max: pointer.Of(10 * time.Second), }, expected: &WaitConfig{ - Max: helper.TimeToPtr(10 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, } @@ -3431,6 +3488,7 @@ func TestService_Canonicalize(t *testing.T) { Name: "redis-db", Provider: "consul", Namespace: "default", + TaskName: "redis", }, name: "interpolate task in name", }, @@ -3446,6 +3504,7 @@ func TestService_Canonicalize(t *testing.T) { Name: "db", Provider: "consul", Namespace: "default", + TaskName: "redis", }, name: "no interpolation in name", }, @@ -3461,6 +3520,7 @@ func TestService_Canonicalize(t *testing.T) { Name: "example-cache-redis-db", Provider: "consul", Namespace: "default", + TaskName: "redis", }, name: "interpolate job, taskgroup and task in name", }, @@ -3476,6 +3536,7 @@ func TestService_Canonicalize(t *testing.T) { Name: "example-cache-redis-db", Provider: "consul", Namespace: "default", + TaskName: "redis", }, name: "interpolate base in name", }, @@ -3492,6 +3553,7 @@ func TestService_Canonicalize(t *testing.T) { Name: "db", Provider: "nomad", Namespace: "platform", + TaskName: "redis", }, name: "nomad provider", }, @@ -4321,7 +4383,6 @@ func TestTaskArtifact_Validate_Checksum(t *testing.T) { err := tc.Input.Validate() if (err != nil) != tc.Err { t.Fatalf("case %d: %v", i, err) - continue } } } @@ -5226,11 +5287,11 @@ func TestAllocation_DisconnectTimeout(t *testing.T) { }, { desc: "has max_client_disconnect", - maxDisconnect: helper.TimeToPtr(30 * time.Second), + maxDisconnect: pointer.Of(30 * time.Second), }, { desc: "zero max_client_disconnect", - maxDisconnect: helper.TimeToPtr(0 * time.Second), + maxDisconnect: pointer.Of(0 * time.Second), }, } for _, tc := range testCases { @@ -6622,7 +6683,6 @@ func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { out, err := r.ParseReservedHostPorts() if (err != nil) != tc.Err { t.Fatalf("test case %d: %v", i, err) - continue } require.Equal(out, tc.Parsed) diff --git a/nomad/vault.go b/nomad/vault.go index 93afd7d919e..0410b53597e 100644 --- a/nomad/vault.go +++ b/nomad/vault.go @@ -111,6 +111,10 @@ type VaultClient interface { // SetConfig updates the config used by the Vault client SetConfig(config *config.VaultConfig) error + // GetConfig returns a copy of the config used by the Vault client, for + // testing + GetConfig() *config.VaultConfig + // CreateToken takes an allocation and task and returns an appropriate Vault // Secret CreateToken(ctx context.Context, a *structs.Allocation, task string) (*vapi.Secret, error) @@ -350,6 +354,13 @@ func (v *vaultClient) flush() { v.tomb = &tomb.Tomb{} } +// GetConfig returns a copy of this vault client's configuration, for testing. +func (v *vaultClient) GetConfig() *config.VaultConfig { + v.setConfigLock.Lock() + defer v.setConfigLock.Unlock() + return v.config.Copy() +} + // SetConfig is used to update the Vault config being used. A temporary outage // may occur after calling as it re-establishes a connection to Vault func (v *vaultClient) SetConfig(config *config.VaultConfig) error { @@ -363,7 +374,7 @@ func (v *vaultClient) SetConfig(config *config.VaultConfig) error { defer v.l.Unlock() // If reloading the same config, no-op - if v.config.IsEqual(config) { + if v.config.Equals(config) { return nil } @@ -605,9 +616,10 @@ func (v *vaultClient) renewalLoop() { // // It should increase the amount of backoff each time, with the following rules: // -// * If token expired already despite earlier renewal attempts, -// back off for 1 minute + jitter -// * If we have an existing authentication that is going to expire, +// - If token expired already despite earlier renewal attempts, +// back off for 1 minute + jitter +// - If we have an existing authentication that is going to expire, +// // never back off more than half of the amount of time remaining // until expiration (with 5s floor) // * Never back off more than 30 seconds multiplied by a random @@ -1241,13 +1253,13 @@ func (v *vaultClient) parallelRevoke(ctx context.Context, accessors []*structs.V // and purge at any given time. // // Limiting the revocation batch size is beneficial for few reasons: -// * A single revocation failure of any entry in batch result into retrying the whole batch; -// the larger the batch is the higher likelihood of such failure -// * Smaller batch sizes result into more co-operativeness: provides hooks for -// reconsidering token TTL and leadership steps down. -// * Batches limit the size of the Raft message purging tokens. Due to bugs -// pre-0.11.3, expired tokens were not properly purged, so users upgrading from -// older versions may have huge numbers (millions) of expired tokens to purge. +// - A single revocation failure of any entry in batch result into retrying the whole batch; +// the larger the batch is the higher likelihood of such failure +// - Smaller batch sizes result into more co-operativeness: provides hooks for +// reconsidering token TTL and leadership steps down. +// - Batches limit the size of the Raft message purging tokens. Due to bugs +// pre-0.11.3, expired tokens were not properly purged, so users upgrading from +// older versions may have huge numbers (millions) of expired tokens to purge. const maxVaultRevokeBatchSize = 1000 // revokeDaemon should be called in a goroutine and is used to periodically diff --git a/nomad/vault_test.go b/nomad/vault_test.go index 7e5834b546b..e7f262a2af3 100644 --- a/nomad/vault_test.go +++ b/nomad/vault_test.go @@ -18,7 +18,7 @@ import ( "golang.org/x/time/rate" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -1302,7 +1302,7 @@ func TestVaultClient_CreateToken_Role_Unrecoverable(t *testing.T) { func TestVaultClient_CreateToken_Prestart(t *testing.T) { ci.Parallel(t) vconfig := &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Token: uuid.Generate(), Addr: "http://127.0.0.1:0", } @@ -1334,7 +1334,7 @@ func TestVaultClient_CreateToken_Prestart(t *testing.T) { func TestVaultClient_MarkForRevocation(t *testing.T) { vconfig := &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Token: uuid.Generate(), Addr: "http://127.0.0.1:0", } @@ -1362,7 +1362,7 @@ func TestVaultClient_MarkForRevocation(t *testing.T) { func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) { ci.Parallel(t) vconfig := &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Token: uuid.Generate(), Addr: "http://127.0.0.1:0", } @@ -1408,7 +1408,7 @@ func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) { func TestVaultClient_RevokeTokens_Failures_TTL(t *testing.T) { ci.Parallel(t) vconfig := &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Token: uuid.Generate(), Addr: "http://127.0.0.1:0", } @@ -1673,7 +1673,7 @@ func TestVaultClient_RevokeDaemon_Bounded(t *testing.T) { // Disable client until we can change settings for testing conf := v.Config.Copy() - conf.Enabled = helper.BoolToPtr(false) + conf.Enabled = pointer.Of(false) const ( batchSize = 100 @@ -1702,7 +1702,7 @@ func TestVaultClient_RevokeDaemon_Bounded(t *testing.T) { client.maxRevokeBatchSize = batchSize client.revocationIntv = 3 * time.Millisecond conf = v.Config.Copy() - conf.Enabled = helper.BoolToPtr(true) + conf.Enabled = pointer.Of(true) require.NoError(t, client.SetConfig(conf)) client.SetActive(true) diff --git a/nomad/vault_testing.go b/nomad/vault_testing.go index 857cd52150f..b81c0f197d1 100644 --- a/nomad/vault_testing.go +++ b/nomad/vault_testing.go @@ -142,6 +142,7 @@ func (v *TestVaultClient) MarkForRevocation(accessors []*structs.VaultAccessor) func (v *TestVaultClient) Stop() {} func (v *TestVaultClient) SetActive(enabled bool) {} +func (v *TestVaultClient) GetConfig() *config.VaultConfig { return nil } func (v *TestVaultClient) SetConfig(config *config.VaultConfig) error { return nil } func (v *TestVaultClient) Running() bool { return true } func (v *TestVaultClient) Stats() map[string]string { return map[string]string{} } diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 1a4dcf0572b..934a1269000 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -35,29 +35,29 @@ type NodeGetInfoResponse struct { // "zone", "rack", etc. // // According to CSI, there are a few requirements for the keys within this map: -// - Valid keys have two segments: an OPTIONAL prefix and name, separated -// by a slash (/), for example: "com.company.example/zone". -// - The key name segment is REQUIRED. The prefix is OPTIONAL. -// - The key name MUST be 63 characters or less, begin and end with an -// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), -// underscores (_), dots (.), or alphanumerics in between, for example -// "zone". -// - The key prefix MUST be 63 characters or less, begin and end with a -// lower-case alphanumeric character ([a-z0-9]), contain only -// dashes (-), dots (.), or lower-case alphanumerics in between, and -// follow domain name notation format -// (https://tools.ietf.org/html/rfc1035#section-2.3.1). -// - The key prefix SHOULD include the plugin's host company name and/or -// the plugin name, to minimize the possibility of collisions with keys -// from other plugins. -// - If a key prefix is specified, it MUST be identical across all -// topology keys returned by the SP (across all RPCs). -// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" -// MUST not both exist. -// - Each value (topological segment) MUST contain 1 or more strings. -// - Each string MUST be 63 characters or less and begin and end with an -// alphanumeric character with '-', '_', '.', or alphanumerics in -// between. +// - Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// - The key name segment is REQUIRED. The prefix is OPTIONAL. +// - The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// - The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// - The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// - If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// - Each value (topological segment) MUST contain 1 or more strings. +// - Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. type Topology struct { Segments map[string]string } diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index cff2bd49f3f..99f13e1ed9a 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -130,7 +130,7 @@ func (c *Client) PluginProbe(ctx context.Context) (bool, error) { // PluginGetInfo is used to return semantic data about the plugin. // Response: -// - string: name, the name of the plugin in domain notation format. +// - string: name, the name of the plugin in domain notation format. func (c *Client) PluginGetInfo(ctx context.Context) (string, string, error) { c.Mu.Lock() defer c.Mu.Unlock() diff --git a/plugins/device/cmd/example/device.go b/plugins/device/cmd/example/device.go index 475f115b014..d6dc1fbbf7f 100644 --- a/plugins/device/cmd/example/device.go +++ b/plugins/device/cmd/example/device.go @@ -10,7 +10,7 @@ import ( "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/device" "github.com/hashicorp/nomad/plugins/shared/hclspec" @@ -343,23 +343,23 @@ func (d *FsDevice) collectStats() (*device.DeviceGroupStats, error) { s := &device.DeviceStats{ Summary: &structs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(f.Size()), + IntNumeratorVal: pointer.Of(f.Size()), Unit: "bytes", Desc: "Filesize in bytes", }, Stats: &structs.StatObject{ Attributes: map[string]*structs.StatValue{ "size": { - IntNumeratorVal: helper.Int64ToPtr(f.Size()), + IntNumeratorVal: pointer.Of(f.Size()), Unit: "bytes", Desc: "Filesize in bytes", }, "modify_time": { - StringVal: helper.StringToPtr(f.ModTime().String()), + StringVal: pointer.Of(f.ModTime().String()), Desc: "Last modified", }, "mode": { - StringVal: helper.StringToPtr(f.Mode().String()), + StringVal: pointer.Of(f.Mode().String()), Desc: "File mode", }, }, diff --git a/plugins/device/plugin_test.go b/plugins/device/plugin_test.go index 52629489fb5..4961faf878f 100644 --- a/plugins/device/plugin_test.go +++ b/plugins/device/plugin_test.go @@ -9,7 +9,7 @@ import ( pb "github.com/golang/protobuf/proto" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/shared/hclspec" @@ -195,7 +195,7 @@ func TestDevicePlugin_Fingerprint(t *testing.T) { Name: "foo", Attributes: map[string]*psstructs.Attribute{ "memory": { - Int: helper.Int64ToPtr(4), + Int: pointer.Of(int64(4)), Unit: "GiB", }, }, @@ -476,8 +476,8 @@ func TestDevicePlugin_Stats(t *testing.T) { InstanceStats: map[string]*DeviceStats{ "1": { Summary: &psstructs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(10), - IntDenominatorVal: helper.Int64ToPtr(20), + IntNumeratorVal: pointer.Of(int64(10)), + IntDenominatorVal: pointer.Of(int64(20)), Unit: "MB", Desc: "Unit test", }, @@ -493,8 +493,8 @@ func TestDevicePlugin_Stats(t *testing.T) { InstanceStats: map[string]*DeviceStats{ "1": { Summary: &psstructs.StatValue{ - FloatNumeratorVal: helper.Float64ToPtr(10.0), - FloatDenominatorVal: helper.Float64ToPtr(20.0), + FloatNumeratorVal: pointer.Of(float64(10.0)), + FloatDenominatorVal: pointer.Of(float64(20.0)), Unit: "MB", Desc: "Unit test", }, @@ -508,7 +508,7 @@ func TestDevicePlugin_Stats(t *testing.T) { InstanceStats: map[string]*DeviceStats{ "1": { Summary: &psstructs.StatValue{ - StringVal: helper.StringToPtr("foo"), + StringVal: pointer.Of("foo"), Unit: "MB", Desc: "Unit test", }, @@ -522,7 +522,7 @@ func TestDevicePlugin_Stats(t *testing.T) { InstanceStats: map[string]*DeviceStats{ "1": { Summary: &psstructs.StatValue{ - BoolVal: helper.BoolToPtr(true), + BoolVal: pointer.Of(true), Unit: "MB", Desc: "Unit test", }, diff --git a/plugins/drivers/proto/driver.pb.go b/plugins/drivers/proto/driver.pb.go index 66407ef55f2..fa72cb62f61 100644 --- a/plugins/drivers/proto/driver.pb.go +++ b/plugins/drivers/proto/driver.pb.go @@ -458,10 +458,10 @@ type FingerprintResponse struct { Attributes map[string]*proto1.Attribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Health is used to determine the state of the health the driver is in. // Health can be one of the following states: - // * UNDETECTED: driver dependencies are not met and the driver can not start - // * UNHEALTHY: driver dependencies are met but the driver is unable to - // perform operations due to some other problem - // * HEALTHY: driver is able to perform all operations + // - UNDETECTED: driver dependencies are not met and the driver can not start + // - UNHEALTHY: driver dependencies are met but the driver is unable to + // perform operations due to some other problem + // - HEALTHY: driver is able to perform all operations Health FingerprintResponse_HealthState `protobuf:"varint,2,opt,name=health,proto3,enum=hashicorp.nomad.plugins.drivers.proto.FingerprintResponse_HealthState" json:"health,omitempty"` // HealthDescription is a human readable message describing the current // state of driver health @@ -641,9 +641,9 @@ type StartTaskResponse struct { // Result is set depending on the type of error that occurred while starting // a task: // - // * SUCCESS: No error occurred, handle is set - // * RETRY: An error occurred, but is recoverable and the RPC should be retried - // * FATAL: A fatal error occurred and is not likely to succeed if retried + // - SUCCESS: No error occurred, handle is set + // - RETRY: An error occurred, but is recoverable and the RPC should be retried + // - FATAL: A fatal error occurred and is not likely to succeed if retried // // If Result is not successful, the DriverErrorMsg will be set. Result StartTaskResponse_Result `protobuf:"varint,1,opt,name=result,proto3,enum=hashicorp.nomad.plugins.drivers.proto.StartTaskResponse_Result" json:"result,omitempty"` @@ -2842,9 +2842,9 @@ type Device struct { HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` // CgroupPermissions defines the Cgroup permissions of the device. // One or more of the following options can be set: - // * r - allows the task to read from the specified device. - // * w - allows the task to write to the specified device. - // * m - allows the task to create device files that do not yet exist. + // - r - allows the task to read from the specified device. + // - w - allows the task to write to the specified device. + // - m - allows the task to create device files that do not yet exist. // // Example: "rw" CgroupPermissions string `protobuf:"bytes,3,opt,name=cgroup_permissions,json=cgroupPermissions,proto3" json:"cgroup_permissions,omitempty"` diff --git a/plugins/shared/hclspec/hcl_spec.pb.go b/plugins/shared/hclspec/hcl_spec.pb.go index 0bf62c49a9d..af1b37ffb38 100644 --- a/plugins/shared/hclspec/hcl_spec.pb.go +++ b/plugins/shared/hclspec/hcl_spec.pb.go @@ -74,6 +74,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Spec defines the available specification types. type Spec struct { // Types that are valid to be assigned to Block: + // // *Spec_Object // *Spec_Array // *Spec_Attr @@ -273,18 +274,18 @@ func (*Spec) XXX_OneofWrappers() []interface{} { } // Attr spec type reads the value of an attribute in the current body -//and returns that value as its result. It also creates validation constraints -//for the given attribute name and its value. +// and returns that value as its result. It also creates validation constraints +// for the given attribute name and its value. // -//```hcl -//Attr { +// ```hcl +// Attr { // name = "document_root" // type = string // required = true -//} -//``` +// } +// ``` // -//`Attr` spec blocks accept the following arguments: +// `Attr` spec blocks accept the following arguments: // // `name` (required) - The attribute name to expect within the HCL input file. // This may be omitted when a default name selector is created by a parent @@ -299,7 +300,7 @@ func (*Spec) XXX_OneofWrappers() []interface{} { // `required` (optional) - If set to `true`, `hcldec` will produce an error // if a value is not provided for the source attribute. // -//`Attr` is a leaf spec type, so no nested spec blocks are permitted. +// `Attr` is a leaf spec type, so no nested spec blocks are permitted. type Attr struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` @@ -356,11 +357,11 @@ func (m *Attr) GetRequired() bool { } // Block spec type applies one nested spec block to the contents of a -//block within the current body and returns the result of that spec. It also -//creates validation constraints for the given block type name. +// block within the current body and returns the result of that spec. It also +// creates validation constraints for the given block type name. // -//```hcl -//Block { +// ```hcl +// Block { // name = "logging" // // Object { @@ -371,10 +372,10 @@ func (m *Attr) GetRequired() bool { // type = string // } // } -//} -//``` +// } +// ``` // -//`Block` spec blocks accept the following arguments: +// `Block` spec blocks accept the following arguments: // // `name` (required) - The block type name to expect within the HCL // input file. This may be omitted when a default name selector is created @@ -384,12 +385,11 @@ func (m *Attr) GetRequired() bool { // `required` (optional) - If set to `true`, `hcldec` will produce an error // if a block of the specified type is not present in the current body. // -//`Block` creates a validation constraint that there must be zero or one blocks -//of the given type name, or exactly one if `required` is set. -// -//`Block` expects a single nested spec block, which is applied to the body of -//the block of the given type when it is present. +// `Block` creates a validation constraint that there must be zero or one blocks +// of the given type name, or exactly one if `required` is set. // +// `Block` expects a single nested spec block, which is applied to the body of +// the block of the given type when it is present. type Block struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty"` @@ -445,7 +445,6 @@ func (m *Block) GetNested() *Spec { return nil } -// // The BlockAttrs spec type is similar to an Attr spec block of a map type, // but it produces a map from the attributes of a block rather than from an // attribute's expression. @@ -533,11 +532,11 @@ func (m *BlockAttrs) GetRequired() bool { } // BlockList spec type is similar to `Block`, but it accepts zero or -//more blocks of a specified type rather than requiring zero or one. The -//result is a JSON array with one entry per block of the given type. +// more blocks of a specified type rather than requiring zero or one. The +// result is a JSON array with one entry per block of the given type. // -//```hcl -//BlockList { +// ```hcl +// BlockList { // name = "log_file" // // Object { @@ -549,10 +548,10 @@ func (m *BlockAttrs) GetRequired() bool { // required = true // } // } -//} -//``` +// } +// ``` // -//`BlockList` spec blocks accept the following arguments: +// `BlockList` spec blocks accept the following arguments: // // `name` (required) - The block type name to expect within the HCL // input file. This may be omitted when a default name selector is created @@ -566,12 +565,11 @@ func (m *BlockAttrs) GetRequired() bool { // produce an error if more than the given number of blocks are present. This // attribute must be greater than or equal to `min_items` if both are set. // -//`Block` creates a validation constraint on the number of blocks of the given -//type that must be present. -// -//`Block` expects a single nested spec block, which is applied to the body of -//each matching block to produce the resulting list items. +// `Block` creates a validation constraint on the number of blocks of the given +// type that must be present. // +// `Block` expects a single nested spec block, which is applied to the body of +// each matching block to produce the resulting list items. type BlockList struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` MinItems uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` @@ -636,10 +634,10 @@ func (m *BlockList) GetNested() *Spec { } // BlockSet spec type behaves the same as BlockList except that -//the result is in no specific order and any duplicate items are removed. +// the result is in no specific order and any duplicate items are removed. // -//```hcl -//BlockSet { +// ```hcl +// BlockSet { // name = "log_file" // // Object { @@ -651,11 +649,10 @@ func (m *BlockList) GetNested() *Spec { // required = true // } // } -//} -//``` -// -//The contents of `BlockSet` are the same as for `BlockList`. +// } +// ``` // +// The contents of `BlockSet` are the same as for `BlockList`. type BlockSet struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` MinItems uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` @@ -720,12 +717,12 @@ func (m *BlockSet) GetNested() *Spec { } // BlockMap spec type is similar to `Block`, but it accepts zero or -//more blocks of a specified type rather than requiring zero or one. The -//result is a JSON object, or possibly multiple nested JSON objects, whose -//properties are derived from the labels set on each matching block. +// more blocks of a specified type rather than requiring zero or one. The +// result is a JSON object, or possibly multiple nested JSON objects, whose +// properties are derived from the labels set on each matching block. // -//```hcl -//BlockMap { +// ```hcl +// BlockMap { // name = "log_file" // labels = ["filename"] // @@ -735,10 +732,10 @@ func (m *BlockSet) GetNested() *Spec { // required = true // } // } -//} -//``` +// } +// ``` // -//`BlockMap` spec blocks accept the following arguments: +// `BlockMap` spec blocks accept the following arguments: // // `name` (required) - The block type name to expect within the HCL // input file. This may be omitted when a default name selector is created @@ -751,12 +748,11 @@ func (m *BlockSet) GetNested() *Spec { // Block header labels are the quoted strings that appear after the block type // name but before the opening `{`. // -//`Block` creates a validation constraint on the number of labels that blocks -//of the given type must have. -// -//`Block` expects a single nested spec block, which is applied to the body of -//each matching block to produce the resulting map items. +// `Block` creates a validation constraint on the number of labels that blocks +// of the given type must have. // +// `Block` expects a single nested spec block, which is applied to the body of +// each matching block to produce the resulting map items. type BlockMap struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` @@ -813,23 +809,23 @@ func (m *BlockMap) GetNested() *Spec { } // Literal spec type returns a given literal value, and creates no -//validation constraints. It is most commonly used with the `Default` spec -//type to create a fallback value, but can also be used e.g. to fill out -//required properties in an `Object` spec that do not correspond to any -//construct in the input configuration. +// validation constraints. It is most commonly used with the `Default` spec +// type to create a fallback value, but can also be used e.g. to fill out +// required properties in an `Object` spec that do not correspond to any +// construct in the input configuration. // -//```hcl -//Literal { +// ```hcl +// Literal { // value = "hello world" -//} -//``` +// } +// ``` // -//`Literal` spec blocks accept the following argument: +// `Literal` spec blocks accept the following argument: // // `value` (required) - The value to return. This attribute may be an expression // that uses [functions](#spec-definition-functions). // -//`Literal` is a leaf spec type, so no nested spec blocks are permitted. +// `Literal` is a leaf spec type, so no nested spec blocks are permitted. type Literal struct { Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -870,12 +866,12 @@ func (m *Literal) GetValue() string { } // Default spec type evaluates a sequence of nested specs in turn and -//returns the result of the first one that produces a non-null value. -//It creates no validation constraints of its own, but passes on the validation -//constraints from its first nested block. +// returns the result of the first one that produces a non-null value. +// It creates no validation constraints of its own, but passes on the validation +// constraints from its first nested block. // -//```hcl -//Default { +// ```hcl +// Default { // Attr { // name = "private" // type = bool @@ -883,17 +879,16 @@ func (m *Literal) GetValue() string { // Literal { // value = false // } -//} -//``` -// -//A `Default` spec block must have at least one nested spec block, and should -//generally have at least two since otherwise the `Default` wrapper is a no-op. +// } +// ``` // -//The second and any subsequent spec blocks are _fallback_ specs. These exhibit -//their usual behavior but are not able to impose validation constraints on the -//current body since they are not evaluated unless all prior specs produce -//`null` as their result. +// A `Default` spec block must have at least one nested spec block, and should +// generally have at least two since otherwise the `Default` wrapper is a no-op. // +// The second and any subsequent spec blocks are _fallback_ specs. These exhibit +// their usual behavior but are not able to impose validation constraints on the +// current body since they are not evaluated unless all prior specs produce +// `null` as their result. type Default struct { Primary *Spec `protobuf:"bytes,1,opt,name=primary,proto3" json:"primary,omitempty"` Default *Spec `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` @@ -942,11 +937,11 @@ func (m *Default) GetDefault() *Spec { } // Object spec type is the most commonly used at the root of a spec file. -//Its result is a JSON object whose properties are set based on any nested -//spec blocks: +// Its result is a JSON object whose properties are set based on any nested +// spec blocks: // -//```hcl -//Object { +// ```hcl +// Object { // Attr "name" { // type = "string" // } @@ -958,18 +953,18 @@ func (m *Default) GetDefault() *Spec { // # ... // } // } -//} -//``` +// } +// ``` // -//Nested spec blocks inside `Object` must always have an extra block label -//`"name"`, `"address"` and `"street"` in the above example) that specifies -//the name of the property that should be created in the JSON object result. -//This label also acts as a default name selector for the nested spec, allowing -//the `Attr` blocks in the above example to omit the usually-required `name` -//argument in cases where the HCL input name and JSON output name are the same. +// Nested spec blocks inside `Object` must always have an extra block label +// `"name"`, `"address"` and `"street"` in the above example) that specifies +// the name of the property that should be created in the JSON object result. +// This label also acts as a default name selector for the nested spec, allowing +// the `Attr` blocks in the above example to omit the usually-required `name` +// argument in cases where the HCL input name and JSON output name are the same. // -//An `Object` spec block creates no validation constraints, but it passes on -//any validation constraints created by the nested specs. +// An `Object` spec block creates no validation constraints, but it passes on +// any validation constraints created by the nested specs. type Object struct { Attributes map[string]*Spec `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1010,10 +1005,10 @@ func (m *Object) GetAttributes() map[string]*Spec { } // Array spec type produces a JSON array whose elements are set based on -//any nested spec blocks: +// any nested spec blocks: // -//```hcl -//Array { +// ```hcl +// Array { // Attr { // name = "first_element" // type = "string" @@ -1022,11 +1017,11 @@ func (m *Object) GetAttributes() map[string]*Spec { // name = "second_element" // type = "string" // } -//} -//``` +// } +// ``` // -//An `Array` spec block creates no validation constraints, but it passes on -//any validation constraints created by the nested specs. +// An `Array` spec block creates no validation constraints, but it passes on +// any validation constraints created by the nested specs. type Array struct { Values []*Spec `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` diff --git a/plugins/shared/structs/attribute.go b/plugins/shared/structs/attribute.go index 9f09257aba6..06fb6a5bdd3 100644 --- a/plugins/shared/structs/attribute.go +++ b/plugins/shared/structs/attribute.go @@ -7,7 +7,7 @@ import ( "strings" "unicode" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) const ( @@ -58,7 +58,7 @@ func (u *Unit) Comparable(o *Unit) bool { func ParseAttribute(input string) *Attribute { ll := len(input) if ll == 0 { - return &Attribute{String: helper.StringToPtr(input)} + return &Attribute{String: pointer.Of(input)} } // Check if the string is a number ending with potential units @@ -82,22 +82,22 @@ func ParseAttribute(input string) *Attribute { // Try to parse as an int i, err := strconv.ParseInt(numeric, 10, 64) if err == nil { - return &Attribute{Int: helper.Int64ToPtr(i), Unit: unit} + return &Attribute{Int: pointer.Of(i), Unit: unit} } // Try to parse as a float f, err := strconv.ParseFloat(numeric, 64) if err == nil { - return &Attribute{Float: helper.Float64ToPtr(f), Unit: unit} + return &Attribute{Float: pointer.Of(f), Unit: unit} } // Try to parse as a bool b, err := strconv.ParseBool(input) if err == nil { - return &Attribute{Bool: helper.BoolToPtr(b)} + return &Attribute{Bool: pointer.Of(b)} } - return &Attribute{String: helper.StringToPtr(input)} + return &Attribute{String: pointer.Of(input)} } // Attribute is used to describe the value of an attribute, optionally @@ -122,14 +122,14 @@ type Attribute struct { // NewStringAttribute returns a new string attribute. func NewStringAttribute(s string) *Attribute { return &Attribute{ - String: helper.StringToPtr(s), + String: pointer.Of(s), } } // NewBoolAttribute returns a new boolean attribute. func NewBoolAttribute(b bool) *Attribute { return &Attribute{ - Bool: helper.BoolToPtr(b), + Bool: pointer.Of(b), } } @@ -137,7 +137,7 @@ func NewBoolAttribute(b bool) *Attribute { // to be valid. func NewIntAttribute(i int64, unit string) *Attribute { return &Attribute{ - Int: helper.Int64ToPtr(i), + Int: pointer.Of(i), Unit: unit, } } @@ -146,7 +146,7 @@ func NewIntAttribute(i int64, unit string) *Attribute { // be valid. func NewFloatAttribute(f float64, unit string) *Attribute { return &Attribute{ - Float: helper.Float64ToPtr(f), + Float: pointer.Of(f), Unit: unit, } } @@ -202,16 +202,16 @@ func (a *Attribute) Copy() *Attribute { } if a.Float != nil { - ca.Float = helper.Float64ToPtr(*a.Float) + ca.Float = pointer.Of(*a.Float) } if a.Int != nil { - ca.Int = helper.Int64ToPtr(*a.Int) + ca.Int = pointer.Of(*a.Int) } if a.Bool != nil { - ca.Bool = helper.BoolToPtr(*a.Bool) + ca.Bool = pointer.Of(*a.Bool) } if a.String != nil { - ca.String = helper.StringToPtr(*a.String) + ca.String = pointer.Of(*a.String) } return ca diff --git a/plugins/shared/structs/attribute_test.go b/plugins/shared/structs/attribute_test.go index b30506764bb..70e1cf47317 100644 --- a/plugins/shared/structs/attribute_test.go +++ b/plugins/shared/structs/attribute_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -15,56 +15,56 @@ func TestAttribute_Validate(t *testing.T) { }{ { Input: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, }, { Input: &Attribute{ - String: helper.StringToPtr("foo"), + String: pointer.Of("foo"), }, }, { Input: &Attribute{ - Int: helper.Int64ToPtr(123), + Int: pointer.Of(int64(123)), }, }, { Input: &Attribute{ - Float: helper.Float64ToPtr(123.2), + Float: pointer.Of(float64(123.2)), }, }, { Input: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), Unit: "MB", }, Fail: true, }, { Input: &Attribute{ - String: helper.StringToPtr("foo"), + String: pointer.Of("foo"), Unit: "MB", }, Fail: true, }, { Input: &Attribute{ - Int: helper.Int64ToPtr(123), + Int: pointer.Of(int64(123)), Unit: "lolNO", }, Fail: true, }, { Input: &Attribute{ - Float: helper.Float64ToPtr(123.2), + Float: pointer.Of(float64(123.2)), Unit: "lolNO", }, Fail: true, }, { Input: &Attribute{ - Int: helper.Int64ToPtr(123), - Float: helper.Float64ToPtr(123.2), + Int: pointer.Of(int64(123)), + Float: pointer.Of(float64(123.2)), Unit: "mW", }, Fail: true, @@ -91,46 +91,46 @@ func TestAttribute_Compare_Bool(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, Expected: 0, }, { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - Bool: helper.BoolToPtr(false), + Bool: pointer.Of(false), }, Expected: 1, }, { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - String: helper.StringToPtr("foo"), + String: pointer.Of("foo"), }, NotComparable: true, }, { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - Int: helper.Int64ToPtr(123), + Int: pointer.Of(int64(123)), }, NotComparable: true, }, { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - Float: helper.Float64ToPtr(123.2), + Float: pointer.Of(float64(123.2)), }, NotComparable: true, }, @@ -142,55 +142,55 @@ func TestAttribute_Compare_String(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - String: helper.StringToPtr("a"), + String: pointer.Of("a"), }, B: &Attribute{ - String: helper.StringToPtr("b"), + String: pointer.Of("b"), }, Expected: -1, }, { A: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, B: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, Expected: 0, }, { A: &Attribute{ - String: helper.StringToPtr("b"), + String: pointer.Of("b"), }, B: &Attribute{ - String: helper.StringToPtr("a"), + String: pointer.Of("a"), }, Expected: 1, }, { A: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, B: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, NotComparable: true, }, { A: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, B: &Attribute{ - Int: helper.Int64ToPtr(123), + Int: pointer.Of(int64(123)), }, NotComparable: true, }, { A: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, B: &Attribute{ - Float: helper.Float64ToPtr(123.2), + Float: pointer.Of(float64(123.2)), }, NotComparable: true, }, @@ -202,46 +202,46 @@ func TestAttribute_Compare_Float(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Float: helper.Float64ToPtr(101.5), + Float: pointer.Of(float64(101.5)), }, B: &Attribute{ - Float: helper.Float64ToPtr(100001.5), + Float: pointer.Of(float64(100001.5)), }, Expected: -1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(100001.5), + Float: pointer.Of(float64(100001.5)), }, B: &Attribute{ - Float: helper.Float64ToPtr(100001.5), + Float: pointer.Of(float64(100001.5)), }, Expected: 0, }, { A: &Attribute{ - Float: helper.Float64ToPtr(999999999.5), + Float: pointer.Of(float64(999999999.5)), }, B: &Attribute{ - Float: helper.Float64ToPtr(101.5), + Float: pointer.Of(float64(101.5)), }, Expected: 1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(101.5), + Float: pointer.Of(float64(101.5)), }, B: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, NotComparable: true, }, { A: &Attribute{ - Float: helper.Float64ToPtr(101.5), + Float: pointer.Of(float64(101.5)), }, B: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, NotComparable: true, }, @@ -253,46 +253,46 @@ func TestAttribute_Compare_Int(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, Expected: 0, }, { A: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, Expected: 1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, NotComparable: true, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, NotComparable: true, }, @@ -304,77 +304,77 @@ func TestAttribute_Compare_Int_With_Units(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), Unit: "MB", }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), Unit: "MB", }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), Unit: "MB", }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), Unit: "MB", }, Expected: 0, }, { A: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), Unit: "MB", }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), Unit: "MB", }, Expected: 1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), Unit: "GB", }, B: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), Unit: "MB", }, Expected: 1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(1), + Int: pointer.Of(int64(1)), Unit: "GiB", }, B: &Attribute{ - Int: helper.Int64ToPtr(1024), + Int: pointer.Of(int64(1024)), Unit: "MiB", }, Expected: 0, }, { A: &Attribute{ - Int: helper.Int64ToPtr(1), + Int: pointer.Of(int64(1)), Unit: "GiB", }, B: &Attribute{ - Int: helper.Int64ToPtr(1025), + Int: pointer.Of(int64(1025)), Unit: "MiB", }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(1000), + Int: pointer.Of(int64(1000)), Unit: "mW", }, B: &Attribute{ - Int: helper.Int64ToPtr(1), + Int: pointer.Of(int64(1)), Unit: "W", }, Expected: 0, @@ -387,88 +387,88 @@ func TestAttribute_Compare_Float_With_Units(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Float: helper.Float64ToPtr(3.0), + Float: pointer.Of(float64(3.0)), Unit: "MB", }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), Unit: "MB", }, Expected: -1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), Unit: "MB", }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), Unit: "MB", }, Expected: 0, }, { A: &Attribute{ - Float: helper.Float64ToPtr(100.0), + Float: pointer.Of(float64(100.0)), Unit: "MB", }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), Unit: "MB", }, Expected: 1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(3.0), + Float: pointer.Of(float64(3.0)), Unit: "GB", }, B: &Attribute{ - Float: helper.Float64ToPtr(3.0), + Float: pointer.Of(float64(3.0)), Unit: "MB", }, Expected: 1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(1.0), + Float: pointer.Of(float64(1.0)), Unit: "GiB", }, B: &Attribute{ - Float: helper.Float64ToPtr(1024.0), + Float: pointer.Of(float64(1024.0)), Unit: "MiB", }, Expected: 0, }, { A: &Attribute{ - Float: helper.Float64ToPtr(1.0), + Float: pointer.Of(float64(1.0)), Unit: "GiB", }, B: &Attribute{ - Float: helper.Float64ToPtr(1025.0), + Float: pointer.Of(float64(1025.0)), Unit: "MiB", }, Expected: -1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(1000.0), + Float: pointer.Of(float64(1000.0)), Unit: "mW", }, B: &Attribute{ - Float: helper.Float64ToPtr(1.0), + Float: pointer.Of(float64(1.0)), Unit: "W", }, Expected: 0, }, { A: &Attribute{ - Float: helper.Float64ToPtr(1.5), + Float: pointer.Of(float64(1.5)), Unit: "GiB", }, B: &Attribute{ - Float: helper.Float64ToPtr(1400.0), + Float: pointer.Of(float64(1400.0)), Unit: "MiB", }, Expected: 1, @@ -481,46 +481,46 @@ func TestAttribute_Compare_IntToFloat(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), }, Expected: 0, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - Float: helper.Float64ToPtr(10.1), + Float: pointer.Of(float64(10.1)), }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), }, Expected: 1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), }, B: &Attribute{ - Float: helper.Float64ToPtr(100.00001), + Float: pointer.Of(float64(100.00001)), }, Expected: -1, }, @@ -549,108 +549,108 @@ func TestAttribute_ParseAndValidate(t *testing.T) { { Input: "true", Expected: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, }, { Input: "false", Expected: &Attribute{ - Bool: helper.BoolToPtr(false), + Bool: pointer.Of(false), }, }, { Input: "1", Expected: &Attribute{ - Int: helper.Int64ToPtr(1), + Int: pointer.Of(int64(1)), }, }, { Input: "100", Expected: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), }, }, { Input: "-100", Expected: &Attribute{ - Int: helper.Int64ToPtr(-100), + Int: pointer.Of(int64(-100)), }, }, { Input: "-1.0", Expected: &Attribute{ - Float: helper.Float64ToPtr(-1.0), + Float: pointer.Of(float64(-1.0)), }, }, { Input: "-100.25", Expected: &Attribute{ - Float: helper.Float64ToPtr(-100.25), + Float: pointer.Of(float64(-100.25)), }, }, { Input: "1.01", Expected: &Attribute{ - Float: helper.Float64ToPtr(1.01), + Float: pointer.Of(float64(1.01)), }, }, { Input: "100.25", Expected: &Attribute{ - Float: helper.Float64ToPtr(100.25), + Float: pointer.Of(float64(100.25)), }, }, { Input: "foobar", Expected: &Attribute{ - String: helper.StringToPtr("foobar"), + String: pointer.Of("foobar"), }, }, { Input: "foo123bar", Expected: &Attribute{ - String: helper.StringToPtr("foo123bar"), + String: pointer.Of("foo123bar"), }, }, { Input: "100MB", Expected: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), Unit: "MB", }, }, { Input: "-100MHz", Expected: &Attribute{ - Int: helper.Int64ToPtr(-100), + Int: pointer.Of(int64(-100)), Unit: "MHz", }, }, { Input: "-1.0MB/s", Expected: &Attribute{ - Float: helper.Float64ToPtr(-1.0), + Float: pointer.Of(float64(-1.0)), Unit: "MB/s", }, }, { Input: "-100.25GiB/s", Expected: &Attribute{ - Float: helper.Float64ToPtr(-100.25), + Float: pointer.Of(float64(-100.25)), Unit: "GiB/s", }, }, { Input: "1.01TB", Expected: &Attribute{ - Float: helper.Float64ToPtr(1.01), + Float: pointer.Of(float64(1.01)), Unit: "TB", }, }, { Input: "100.25mW", Expected: &Attribute{ - Float: helper.Float64ToPtr(100.25), + Float: pointer.Of(float64(100.25)), Unit: "mW", }, }, diff --git a/plugins/shared/structs/proto/attribute.pb.go b/plugins/shared/structs/proto/attribute.pb.go index ab9038aecac..cdc95735cd9 100644 --- a/plugins/shared/structs/proto/attribute.pb.go +++ b/plugins/shared/structs/proto/attribute.pb.go @@ -24,6 +24,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // specifying units type Attribute struct { // Types that are valid to be assigned to Value: + // // *Attribute_FloatVal // *Attribute_IntVal // *Attribute_StringVal diff --git a/plugins/shared/structs/util.go b/plugins/shared/structs/util.go index 2a4b9b0e775..0e3500a43a3 100644 --- a/plugins/shared/structs/util.go +++ b/plugins/shared/structs/util.go @@ -2,7 +2,7 @@ package structs import ( "github.com/golang/protobuf/ptypes/wrappers" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/plugins/shared/structs/proto" ) @@ -13,13 +13,13 @@ func ConvertProtoAttribute(in *proto.Attribute) *Attribute { switch in.Value.(type) { case *proto.Attribute_BoolVal: - out.Bool = helper.BoolToPtr(in.GetBoolVal()) + out.Bool = pointer.Of(in.GetBoolVal()) case *proto.Attribute_FloatVal: - out.Float = helper.Float64ToPtr(in.GetFloatVal()) + out.Float = pointer.Of(in.GetFloatVal()) case *proto.Attribute_IntVal: - out.Int = helper.Int64ToPtr(in.GetIntVal()) + out.Int = pointer.Of(in.GetIntVal()) case *proto.Attribute_StringVal: - out.String = helper.StringToPtr(in.GetStringVal()) + out.String = pointer.Of(in.GetStringVal()) default: } diff --git a/scheduler/annotate.go b/scheduler/annotate.go index fb5aef27c04..4ca41fad9eb 100644 --- a/scheduler/annotate.go +++ b/scheduler/annotate.go @@ -30,11 +30,12 @@ const ( // // Currently the things that are annotated are: // * Task group changes will be annotated with: -// * Count up and count down changes -// * Update counts (creates, destroys, migrates, etc) +// - Count up and count down changes +// - Update counts (creates, destroys, migrates, etc) +// // * Task changes will be annotated with: -// * forces create/destroy update -// * forces in-place update +// - forces create/destroy update +// - forces in-place update func Annotate(diff *structs.JobDiff, annotations *structs.PlanAnnotations) error { tgDiffs := diff.TaskGroups if len(tgDiffs) == 0 { diff --git a/scheduler/benchmarks/benchmarks_test.go b/scheduler/benchmarks/benchmarks_test.go index f46042819b0..c694141f00f 100644 --- a/scheduler/benchmarks/benchmarks_test.go +++ b/scheduler/benchmarks/benchmarks_test.go @@ -16,9 +16,9 @@ import ( // benchmark for the Nomad scheduler. The starting state for your // implementation will depend on the following environment variables: // -// - NOMAD_BENCHMARK_DATADIR: path to data directory -// - NOMAD_BENCHMARK_SNAPSHOT: path to raft snapshot -// - neither: empty starting state +// - NOMAD_BENCHMARK_DATADIR: path to data directory +// - NOMAD_BENCHMARK_SNAPSHOT: path to raft snapshot +// - neither: empty starting state // // You can run a profile for this benchmark with the usual -cpuprofile // -memprofile flags. diff --git a/scheduler/context_test.go b/scheduler/context_test.go index 0bd0c543223..d5a503b51b1 100644 --- a/scheduler/context_test.go +++ b/scheduler/context_test.go @@ -157,7 +157,6 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { // Preempted allocs are removed from the allocs propsed for a node. // // See https://github.com/hashicorp/nomad/issues/6787 -// func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { ci.Parallel(t) state, ctx := testContext(t) diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index 16a0ada00a7..2a5d804a210 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -34,8 +34,7 @@ func TestStaticIterator_Reset(t *testing.T) { out := collectFeasible(static) if len(out) != len(nodes) { - t.Fatalf("out: %#v", out) - t.Fatalf("missing nodes %d %#v", i, static) + t.Fatalf("out: %#v missing nodes %d %#v", out, i, static) } ids := make(map[string]struct{}) diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 7b47768a4ee..1711ca0c1fb 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -10,6 +10,7 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -64,10 +65,11 @@ func TestServiceSched_JobRegister(t *testing.T) { // Ensure the eval has no spawned blocked eval if len(h.CreateEvals) != 0 { - t.Fatalf("bad: %#v", h.CreateEvals) + t.Errorf("bad: %#v", h.CreateEvals) if h.Evals[0].BlockedEval != "" { t.Fatalf("bad: %#v", h.Evals[0]) } + t.FailNow() } // Ensure the plan allocated @@ -1517,10 +1519,11 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { // Ensure the eval has no spawned blocked eval if len(h.Evals) != 1 { - t.Fatalf("bad: %#v", h.Evals) + t.Errorf("bad: %#v", h.Evals) if h.Evals[0].BlockedEval != "" { t.Fatalf("bad: %#v", h.Evals[0]) } + t.FailNow() } // Ensure the plan allocated @@ -2336,7 +2339,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { alloc.JobID = job.ID alloc.Name = fmt.Sprintf("my-job.web[%d]", i) alloc.DeploymentID = d.ID - alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} + alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} alloc.AllocatedResources.Tasks[taskName].Devices = []*structs.AllocatedDeviceResource{&adr} alloc.AllocatedResources.Shared = asr allocs = append(allocs, alloc) @@ -2983,7 +2986,7 @@ func TestServiceSched_NodeDown(t *testing.T) { alloc.ClientStatus = tc.client // Mark for migration if necessary - alloc.DesiredTransition.Migrate = helper.BoolToPtr(tc.migrate) + alloc.DesiredTransition.Migrate = pointer.Of(tc.migrate) allocs := []*structs.Allocation{alloc} require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) @@ -3276,7 +3279,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = fmt.Sprintf("my-job.web[%d]", i) - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) @@ -3363,7 +3366,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { for i := 0; i < 6; i++ { newAlloc := allocs[i].Copy() newAlloc.ClientStatus = structs.AllocDesiredStatusStop - newAlloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + newAlloc.DesiredTransition.Migrate = pointer.Of(true) stop = append(stop, newAlloc) } require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), stop)) @@ -3468,7 +3471,7 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = fmt.Sprintf("my-job.web[%d]", i) - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) @@ -3527,7 +3530,7 @@ func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = fmt.Sprintf("my-job.web[%d]", i) - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) alloc.TaskStates = map[string]*structs.TaskState{ "web": { TaskHandle: &structs.TaskHandle{ @@ -4176,7 +4179,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { allocs[1].TaskStates = map[string]*structs.TaskState{"web": {State: "start", StartedAt: time.Now().Add(-12 * time.Hour), FinishedAt: time.Now().Add(-10 * time.Hour)}} - allocs[1].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[1].DesiredTransition.Reschedule = pointer.Of(true) require.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) @@ -5198,7 +5201,7 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { alloc.NodeID = node.ID alloc.Job.TaskGroups[0].Count = 1 alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), alloc.Job)) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) @@ -5834,7 +5837,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { alloc.Name = "my-job.web[0]" alloc.DesiredStatus = structs.AllocDesiredStatusRun alloc.ClientStatus = structs.AllocClientStatusRunning - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation @@ -6520,7 +6523,7 @@ func TestPropagateTaskState(t *testing.T) { prevAlloc: &structs.Allocation{ ClientStatus: structs.AllocClientStatusRunning, DesiredTransition: structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), }, TaskStates: map[string]*structs.TaskState{ taskName: { @@ -6548,7 +6551,7 @@ func TestPropagateTaskState(t *testing.T) { prevAlloc: &structs.Allocation{ ClientStatus: structs.AllocClientStatusRunning, DesiredTransition: structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), }, TaskStates: map[string]*structs.TaskState{ taskName: {}, diff --git a/scheduler/propertyset.go b/scheduler/propertyset.go index f6d48fe2860..e2325744e61 100644 --- a/scheduler/propertyset.go +++ b/scheduler/propertyset.go @@ -248,7 +248,7 @@ func (p *propertySet) UsedCount(option *structs.Node, tg string) (string, string // existing and proposed allocations. It also takes into account any stopped // allocations func (p *propertySet) GetCombinedUseMap() map[string]uint64 { - combinedUse := make(map[string]uint64, helper.IntMax(len(p.existingValues), len(p.proposedValues))) + combinedUse := make(map[string]uint64, helper.Max(len(p.existingValues), len(p.proposedValues))) for _, usedValues := range []map[string]uint64{p.existingValues, p.proposedValues} { for propertyValue, usedCount := range usedValues { combinedUse[propertyValue] += usedCount diff --git a/scheduler/reconcile.go b/scheduler/reconcile.go index cfdc0996db1..7e645674c81 100644 --- a/scheduler/reconcile.go +++ b/scheduler/reconcile.go @@ -766,7 +766,7 @@ func (a *allocReconciler) computeReplacements(deploymentPlaceReady bool, desired a.markStop(failed, "", allocRescheduled) desiredChanges.Stop += uint64(len(failed)) - min := helper.IntMin(len(place), underProvisionedBy) + min := helper.Min(len(place), underProvisionedBy) underProvisionedBy -= min return underProvisionedBy } @@ -778,7 +778,7 @@ func (a *allocReconciler) computeReplacements(deploymentPlaceReady bool, desired // If allocs have been lost, determine the number of replacements that are needed // and add placements to the result for the lost allocs. if len(lost) != 0 { - allowed := helper.IntMin(len(lost), len(place)) + allowed := helper.Min(len(lost), len(place)) desiredChanges.Place += uint64(allowed) a.result.place = append(a.result.place, place[:allowed]...) } @@ -819,7 +819,7 @@ func (a *allocReconciler) computeDestructiveUpdates(destructive allocSet, underP desiredChanges *structs.DesiredUpdates, tg *structs.TaskGroup) { // Do all destructive updates - min := helper.IntMin(len(destructive), underProvisionedBy) + min := helper.Min(len(destructive), underProvisionedBy) desiredChanges.DestructiveUpdate += uint64(min) desiredChanges.Ignore += uint64(len(destructive) - min) for _, alloc := range destructive.nameOrder()[:min] { @@ -903,7 +903,7 @@ func (a *allocReconciler) isDeploymentComplete(groupName string, destructive, in // Final check to see if the deployment is complete is to ensure everything is healthy if dstate, ok := a.deployment.TaskGroups[groupName]; ok { - if dstate.HealthyAllocs < helper.IntMax(dstate.DesiredTotal, dstate.DesiredCanaries) || // Make sure we have enough healthy allocs + if dstate.HealthyAllocs < helper.Max(dstate.DesiredTotal, dstate.DesiredCanaries) || // Make sure we have enough healthy allocs (dstate.DesiredCanaries > 0 && !dstate.Promoted) { // Make sure we are promoted if we have canaries complete = false } diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index 3cb29cb901a..cb986ded62b 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -334,7 +334,7 @@ func buildDisconnectedNodes(allocs []*structs.Allocation, count int) map[string] func buildResumableAllocations(count int, clientStatus, desiredStatus string, nodeScore float64) (*structs.Job, []*structs.Allocation) { job := mock.Job() - job.TaskGroups[0].MaxClientDisconnect = helper.TimeToPtr(5 * time.Minute) + job.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Minute) job.TaskGroups[0].Count = count return job, buildAllocations(job, count, clientStatus, desiredStatus, nodeScore) @@ -1059,7 +1059,7 @@ func TestReconciler_DrainNode(t *testing.T) { for i := 0; i < 2; i++ { n := mock.DrainNode() n.ID = allocs[i].NodeID - allocs[i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[i].DesiredTransition.Migrate = pointer.Of(true) tainted[n.ID] = n } @@ -1114,7 +1114,7 @@ func TestReconciler_DrainNode_ScaleUp(t *testing.T) { for i := 0; i < 2; i++ { n := mock.DrainNode() n.ID = allocs[i].NodeID - allocs[i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[i].DesiredTransition.Migrate = pointer.Of(true) tainted[n.ID] = n } @@ -1170,7 +1170,7 @@ func TestReconciler_DrainNode_ScaleDown(t *testing.T) { for i := 0; i < 3; i++ { n := mock.DrainNode() n.ID = allocs[i].NodeID - allocs[i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[i].DesiredTransition.Migrate = pointer.Of(true) tainted[n.ID] = n } @@ -2269,7 +2269,7 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -2360,7 +2360,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -2368,7 +2368,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { // Mark the canaries as failed allocs[5].ClientStatus = structs.AllocClientStatusFailed - allocs[5].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[5].DesiredTransition.Reschedule = pointer.Of(true) // Mark one of them as already rescheduled once allocs[5].RescheduleTracker = &structs.RescheduleTracker{Events: []*structs.RescheduleEvent{ @@ -2382,7 +2382,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} allocs[6].ClientStatus = structs.AllocClientStatusFailed - allocs[6].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[6].DesiredTransition.Reschedule = pointer.Of(true) // Create 4 unhealthy canary allocations that have already been replaced for i := 0; i < 4; i++ { @@ -2395,7 +2395,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -2490,7 +2490,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -2498,7 +2498,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { // Mark the canaries as failed allocs[5].ClientStatus = structs.AllocClientStatusFailed - allocs[5].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[5].DesiredTransition.Reschedule = pointer.Of(true) // Mark one of them as already rescheduled once allocs[5].RescheduleTracker = &structs.RescheduleTracker{Events: []*structs.RescheduleEvent{ @@ -2512,7 +2512,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} allocs[6].ClientStatus = structs.AllocClientStatusFailed - allocs[6].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[6].DesiredTransition.Reschedule = pointer.Of(true) // Create 4 unhealthy canary allocations that have already been replaced for i := 0; i < 4; i++ { @@ -2525,7 +2525,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -3255,7 +3255,7 @@ func TestReconciler_DrainNode_Canary(t *testing.T) { tainted := make(map[string]*structs.Node, 1) n := mock.DrainNode() n.ID = allocs[11].NodeID - allocs[11].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[11].DesiredTransition.Migrate = pointer.Of(true) tainted[n.ID] = n mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive) @@ -3816,7 +3816,7 @@ func TestReconciler_PromoteCanaries_Unblock(t *testing.T) { s.PlacedCanaries = append(s.PlacedCanaries, canary.ID) canary.DeploymentID = d.ID canary.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, canary) handled[canary.ID] = allocUpdateFnIgnore @@ -3893,7 +3893,7 @@ func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) { s.PlacedCanaries = append(s.PlacedCanaries, canary.ID) canary.DeploymentID = d.ID canary.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, canary) handled[canary.ID] = allocUpdateFnIgnore @@ -3994,7 +3994,7 @@ func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) { new.DeploymentID = d.ID if i < c.healthy { new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } } allocs = append(allocs, new) @@ -4065,7 +4065,7 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, new) handled[new.ID] = allocUpdateFnIgnore @@ -4080,7 +4080,7 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) { n.Status = structs.NodeStatusDown } else { n.DrainStrategy = mock.DrainNode().DrainStrategy - allocs[2+i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[2+i].DesiredTransition.Migrate = pointer.Of(true) } tainted[n.ID] = n } @@ -4153,7 +4153,7 @@ func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, new) handled[new.ID] = allocUpdateFnIgnore @@ -4168,7 +4168,7 @@ func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) { n.Status = structs.NodeStatusDown } else { n.DrainStrategy = mock.DrainNode().DrainStrategy - allocs[6+i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[6+i].DesiredTransition.Migrate = pointer.Of(true) } tainted[n.ID] = n } @@ -4228,7 +4228,7 @@ func TestReconciler_CompleteDeployment(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, alloc) } @@ -4281,11 +4281,11 @@ func TestReconciler_MarkDeploymentComplete_FailedAllocations(t *testing.T) { alloc.DeploymentStatus = &structs.AllocDeploymentStatus{} if i < 10 { alloc.ClientStatus = structs.AllocClientStatusRunning - alloc.DeploymentStatus.Healthy = helper.BoolToPtr(true) + alloc.DeploymentStatus.Healthy = pointer.Of(true) } else { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.ClientStatus = structs.AllocClientStatusFailed - alloc.DeploymentStatus.Healthy = helper.BoolToPtr(false) + alloc.DeploymentStatus.Healthy = pointer.Of(false) } allocs = append(allocs, alloc) @@ -4367,7 +4367,7 @@ func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) { new.TaskGroup = job.TaskGroups[group].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, new) handled[new.ID] = allocUpdateFnIgnore @@ -4452,7 +4452,7 @@ func TestReconciler_FailedDeployment_NewJob(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, new) } @@ -4512,7 +4512,7 @@ func TestReconciler_MarkDeploymentComplete(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, alloc) } @@ -4801,7 +4801,7 @@ func TestReconciler_DeploymentWithFailedAllocs_DontReschedule(t *testing.T) { // Mark half of them as reschedulable for i := 0; i < 5; i++ { - allocs[i].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[i].DesiredTransition.Reschedule = pointer.Of(true) } reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, @@ -4870,7 +4870,7 @@ func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } new.ClientStatus = structs.AllocClientStatusRunning allocs = append(allocs, new) @@ -4885,7 +4885,7 @@ func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = uuid.Generate() new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } new.DesiredStatus = structs.AllocDesiredStatusStop new.ClientStatus = structs.AllocClientStatusFailed @@ -5022,7 +5022,7 @@ func TestReconciler_ForceReschedule_Service(t *testing.T) { }} // Mark DesiredTransition ForceReschedule - allocs[0].DesiredTransition = structs.DesiredTransition{ForceReschedule: helper.BoolToPtr(true)} + allocs[0].DesiredTransition = structs.DesiredTransition{ForceReschedule: pointer.Of(true)} reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "", 50, true) @@ -5488,7 +5488,7 @@ func TestReconciler_Disconnected_Client(t *testing.T) { serverDesiredStatus: structs.AllocDesiredStatusRun, shouldStopOnDisconnectedNode: true, nodeStatusDisconnected: true, - maxDisconnect: helper.TimeToPtr(2 * time.Second), + maxDisconnect: pointer.Of(2 * time.Second), expected: &resultExpectation{ stop: 2, desiredTGUpdates: map[string]*structs.DesiredUpdates{ @@ -5926,7 +5926,7 @@ func TestReconciler_Client_Disconnect_Canaries(t *testing.T) { Canary: true, } if alloc.ClientStatus == structs.AllocClientStatusRunning { - alloc.DeploymentStatus.Healthy = helper.BoolToPtr(true) + alloc.DeploymentStatus.Healthy = pointer.Of(true) } tc.deploymentState.PlacedCanaries = append(tc.deploymentState.PlacedCanaries, alloc.ID) handled[alloc.ID] = allocUpdateFnIgnore diff --git a/scheduler/reconcile_util_test.go b/scheduler/reconcile_util_test.go index ab386739333..21f19814e42 100644 --- a/scheduler/reconcile_util_test.go +++ b/scheduler/reconcile_util_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -61,7 +61,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { } testJob := mock.Job() - testJob.TaskGroups[0].MaxClientDisconnect = helper.TimeToPtr(5 * time.Second) + testJob.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Second) now := time.Now() testJobNoMaxDisconnect := mock.Job() @@ -143,7 +143,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { "migrating1": { ID: "migrating1", ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: helper.BoolToPtr(true)}, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, Job: testJob, NodeID: "draining", }, @@ -151,7 +151,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { "migrating2": { ID: "migrating2", ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: helper.BoolToPtr(true)}, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, Job: testJob, NodeID: "nil", }, @@ -190,7 +190,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { "migrating1": { ID: "migrating1", ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: helper.BoolToPtr(true)}, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, Job: testJob, NodeID: "draining", }, @@ -198,7 +198,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { "migrating2": { ID: "migrating2", ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: helper.BoolToPtr(true)}, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, Job: testJob, NodeID: "nil", }, diff --git a/scheduler/scheduler_sysbatch_test.go b/scheduler/scheduler_sysbatch_test.go index dcc3d6ea315..fac54369949 100644 --- a/scheduler/scheduler_sysbatch_test.go +++ b/scheduler/scheduler_sysbatch_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -538,7 +538,7 @@ func TestSysBatch_NodeDown(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain @@ -652,7 +652,7 @@ func TestSysBatch_NodeDrain(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain @@ -1308,7 +1308,7 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) alloc.TaskGroup = "pinger" alloc2 := mock.SysBatchAlloc() diff --git a/scheduler/scheduler_system_test.go b/scheduler/scheduler_system_test.go index ce3b5cc5163..bb26d7d695c 100644 --- a/scheduler/scheduler_system_test.go +++ b/scheduler/scheduler_system_test.go @@ -9,7 +9,7 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -1016,7 +1016,7 @@ func TestSystemSched_NodeDown(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain @@ -1130,7 +1130,7 @@ func TestSystemSched_NodeDrain(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain @@ -1731,7 +1731,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) alloc.TaskGroup = "web" alloc2 := mock.Alloc() @@ -2895,7 +2895,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { require.FailNow(t, "invalid jobType") } - job.TaskGroups[0].MaxClientDisconnect = helper.TimeToPtr(5 * time.Second) + job.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Second) if !tc.required { job.Stop = true @@ -2914,7 +2914,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.ClientStatus = tc.clientStatus alloc.DesiredStatus = tc.desiredStatus - alloc.DesiredTransition.Migrate = helper.BoolToPtr(tc.migrate) + alloc.DesiredTransition.Migrate = pointer.Of(tc.migrate) alloc.AllocStates = tc.allocState alloc.TaskStates = tc.taskState diff --git a/scheduler/spread_test.go b/scheduler/spread_test.go index adba6ffa52f..ea581b9a151 100644 --- a/scheduler/spread_test.go +++ b/scheduler/spread_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -877,7 +877,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { alloc.JobID = job1.ID alloc.NodeID = nodes[i].ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: time.Now(), Canary: false, ModifyIndex: h.NextIndex(), diff --git a/scheduler/util_test.go b/scheduler/util_test.go index ff7984fb514..4133e9edc18 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -163,7 +163,7 @@ func TestDiffSystemAllocsForNode(t *testing.T) { Name: "my-job.web[2]", Job: oldJob, DesiredTransition: structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), }, }, // Mark the 4th lost @@ -340,7 +340,7 @@ func TestDiffSystemAllocs(t *testing.T) { Name: "my-job.web[0]", Job: oldJob, DesiredTransition: structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), }, }, // Mark as lost on a dead node @@ -793,8 +793,8 @@ func TestTasksUpdated(t *testing.T) { j22.TaskGroups[0].Tasks[0].Templates = []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, } @@ -802,14 +802,14 @@ func TestTasksUpdated(t *testing.T) { j23.TaskGroups[0].Tasks[0].Templates = []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, } require.False(t, tasksUpdated(j22, j23, name)) // Compare changed Template wait configs - j23.TaskGroups[0].Tasks[0].Templates[0].Wait.Max = helper.TimeToPtr(10 * time.Second) + j23.TaskGroups[0].Tasks[0].Templates[0].Wait.Max = pointer.Of(10 * time.Second) require.True(t, tasksUpdated(j22, j23, name)) // Add a volume diff --git a/scripts/release/mac-remote-build b/scripts/release/mac-remote-build index 5d7689d1a28..3b91ca3aef6 100755 --- a/scripts/release/mac-remote-build +++ b/scripts/release/mac-remote-build @@ -56,7 +56,7 @@ REPO_PATH="${TMP_WORKSPACE}/gopath/src/github.com/hashicorp/nomad" mkdir -p "${TMP_WORKSPACE}/tmp" install_go() { - local go_version="1.18.3" + local go_version="1.19" local download= download="https://storage.googleapis.com/golang/go${go_version}.darwin-amd64.tar.gz" diff --git a/scripts/vagrant-linux-priv-go.sh b/scripts/vagrant-linux-priv-go.sh index b953976a3ea..10eb60eebe5 100755 --- a/scripts/vagrant-linux-priv-go.sh +++ b/scripts/vagrant-linux-priv-go.sh @@ -3,7 +3,7 @@ set -o errexit function install_go() { - local go_version="1.18.3" + local go_version="1.19" local download="https://storage.googleapis.com/golang/go${go_version}.linux-amd64.tar.gz" if go version 2>&1 | grep -q "${go_version}"; then diff --git a/tools/go.mod b/tools/go.mod index dd1d3a7f9a3..31a78edc47b 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/nomad/tools -go 1.17 +go 1.18 require ( github.com/aws/aws-sdk-go v1.37.26 diff --git a/ui/.template-lintrc.js b/ui/.template-lintrc.js index 28057918d84..d6284a06200 100644 --- a/ui/.template-lintrc.js +++ b/ui/.template-lintrc.js @@ -7,6 +7,12 @@ module.exports = { 'no-action': 'off', 'no-invalid-interactive': 'off', 'no-inline-styles': 'off', - 'no-curly-component-invocation': { allow: ['format-volume-name'] }, + 'no-curly-component-invocation': { + allow: ['format-volume-name', 'keyboard-commands'], + }, + 'no-implicit-this': { allow: ['keyboard-commands'] }, }, + ignore: [ + 'app/components/breadcrumbs/*', // using {{(modifier)}} syntax + ], }; diff --git a/ui/app/abilities/job.js b/ui/app/abilities/job.js index 892837658c7..3dca4dff7a9 100644 --- a/ui/app/abilities/job.js +++ b/ui/app/abilities/job.js @@ -31,6 +31,7 @@ export default class Job extends AbstractAbility { // For each policy record, extract all policies of all namespaces const allNamespacePolicies = policies .toArray() + .filter((policy) => get(policy, 'rulesJSON.Namespaces')) .map((policy) => get(policy, 'rulesJSON.Namespaces')) .flat() .map((namespace = {}) => { diff --git a/ui/app/abilities/variable.js b/ui/app/abilities/variable.js index caa0ae5ecbf..6d392edb9e3 100644 --- a/ui/app/abilities/variable.js +++ b/ui/app/abilities/variable.js @@ -1,3 +1,4 @@ +// @ts-check import { computed, get } from '@ember/object'; import { or } from '@ember/object/computed'; import AbstractAbility from './abstract'; @@ -20,7 +21,7 @@ export default class Variable extends AbstractAbility { @or( 'bypassAuthorization', 'selfTokenIsManagement', - 'policiesSupportVariableView' + 'policiesSupportVariableList' ) canList; @@ -38,19 +39,94 @@ export default class Variable extends AbstractAbility { ) canDestroy; - @computed('rulesForNamespace.@each.capabilities') - get policiesSupportVariableView() { - return this.rulesForNamespace.some((rules) => { - return get(rules, 'SecureVariables'); - }); + @or( + 'bypassAuthorization', + 'selfTokenIsManagement', + 'policiesSupportVariableRead' + ) + canRead; + + @computed('token.selfTokenPolicies') + get policiesSupportVariableList() { + return this.policyNamespacesIncludeSecureVariablesCapabilities( + this.token.selfTokenPolicies, + ['list', 'read', 'write', 'destroy'] + ); } @computed('path', 'allPaths') - get policiesSupportVariableWriting() { + get policiesSupportVariableRead() { const matchingPath = this._nearestMatchingPath(this.path); return this.allPaths .find((path) => path.name === matchingPath) - ?.capabilities?.includes('write'); + ?.capabilities?.includes('read'); + } + + /** + * + * Map to your policy's namespaces, + * and each of their SecureVariables blocks' paths, + * and each of their capabilities. + * Then, check to see if any of the permissions you're looking for + * are contained within at least one of them. + * + * @param {Object} policies + * @param {string[]} capabilities + * @returns {boolean} + */ + policyNamespacesIncludeSecureVariablesCapabilities( + policies = [], + capabilities = [], + path + ) { + const namespacesWithSecureVariableCapabilities = policies + .toArray() + .filter((policy) => get(policy, 'rulesJSON.Namespaces')) + .map((policy) => get(policy, 'rulesJSON.Namespaces')) + .flat() + .map((namespace = {}) => { + return namespace.SecureVariables?.Paths; + }) + .flat() + .compact() + .filter((secVarsBlock = {}) => { + if (!path || path === WILDCARD_GLOB) { + return true; + } else { + return secVarsBlock.PathSpec === path; + } + }) + .map((secVarsBlock = {}) => { + return secVarsBlock.Capabilities; + }) + .flat() + .compact(); + + // Check for requested permissions + return namespacesWithSecureVariableCapabilities.some((abilityList) => { + return capabilities.includes(abilityList); + }); + } + + @computed('allPaths', 'namespace', 'path', 'token.selfTokenPolicies') + get policiesSupportVariableWriting() { + if (this.namespace === WILDCARD_GLOB && this.path === WILDCARD_GLOB) { + // If you're checking if you can write from root, and you don't specify a namespace, + // Then if you can write in ANY path in ANY namespace, you can get to /new. + return this.policyNamespacesIncludeSecureVariablesCapabilities( + this.token.selfTokenPolicies, + ['write'], + this._nearestMatchingPath(this.path) + ); + } else { + // Checking a specific path in a specific namespace. + // TODO: This doesn't cover the case when you're checking for the * namespace at a specific path. + // Right now we require you to specify yournamespace to enable the button. + const matchingPath = this._nearestMatchingPath(this.path); + return this.allPaths + .find((path) => path.name === matchingPath) + ?.capabilities?.includes('write'); + } } @computed('path', 'allPaths') @@ -66,9 +142,13 @@ export default class Variable extends AbstractAbility { return (get(this, 'token.selfTokenPolicies') || []) .toArray() .reduce((paths, policy) => { - const matchingNamespace = this.namespace ?? 'default'; + const namespaces = get(policy, 'rulesJSON.Namespaces'); + const matchingNamespace = this._nearestMatchingNamespace( + namespaces, + this.namespace + ); - const variables = (get(policy, 'rulesJSON.Namespaces') || []).find( + const variables = (namespaces || []).find( (namespace) => namespace.Name === matchingNamespace )?.SecureVariables; @@ -85,6 +165,12 @@ export default class Variable extends AbstractAbility { }, []); } + _nearestMatchingNamespace(policyNamespaces, namespace) { + if (!namespace || !policyNamespaces) return 'default'; + + return this._findMatchingNamespace(policyNamespaces, namespace); + } + _formatMatchingPathRegEx(path, wildCardPlacement = 'end') { const replacer = () => '\\/'; if (wildCardPlacement === 'end') { @@ -110,7 +196,6 @@ export default class Variable extends AbstractAbility { _nearestMatchingPath(path) { const pathNames = this.allPaths.map((path) => path.name); - if (pathNames.includes(path)) { return path; } diff --git a/ui/app/adapters/allocation.js b/ui/app/adapters/allocation.js index 20cb6d4f5a5..63477d80c22 100644 --- a/ui/app/adapters/allocation.js +++ b/ui/app/adapters/allocation.js @@ -14,6 +14,12 @@ export default class AllocationAdapter extends Watchable { }); } + restartAll(allocation) { + const prefix = `${this.host || '/'}${this.urlPrefix()}`; + const url = `${prefix}/client/allocation/${allocation.id}/restart`; + return this.ajax(url, 'PUT', { data: { AllTasks: true } }); + } + ls(model, path) { return this.token .authorizedRequest( diff --git a/ui/app/adapters/variable.js b/ui/app/adapters/variable.js index bccceaf45b2..5d57f8c445f 100644 --- a/ui/app/adapters/variable.js +++ b/ui/app/adapters/variable.js @@ -1,6 +1,7 @@ import ApplicationAdapter from './application'; import { pluralize } from 'ember-inflector'; import classic from 'ember-classic-decorator'; +import { ConflictError } from '@ember-data/adapter/error'; @classic export default class VariableAdapter extends ApplicationAdapter { @@ -8,13 +9,11 @@ export default class VariableAdapter extends ApplicationAdapter { // PUT instead of POST on create; // /v1/var instead of /v1/vars on create (urlForFindRecord) - createRecord(_store, _type, snapshot) { + createRecord(_store, type, snapshot) { let data = this.serialize(snapshot); - return this.ajax( - this.urlForFindRecord(snapshot.id, snapshot.modelName), - 'PUT', - { data } - ); + let baseUrl = this.buildURL(type.modelName, data.ID); + const checkAndSetValue = snapshot?.attr('modifyIndex') || 0; + return this.ajax(`${baseUrl}?cas=${checkAndSetValue}`, 'PUT', { data }); } urlForFindAll(modelName) { @@ -27,21 +26,51 @@ export default class VariableAdapter extends ApplicationAdapter { return pluralize(baseUrl); } - urlForFindRecord(id, modelName, snapshot) { - const namespace = snapshot?.attr('namespace') || 'default'; - - let baseUrl = this.buildURL(modelName, id, snapshot); + urlForFindRecord(identifier, modelName, snapshot) { + const { namespace, id } = _extractIDAndNamespace(identifier, snapshot); + let baseUrl = this.buildURL(modelName, id); return `${baseUrl}?namespace=${namespace}`; } - urlForUpdateRecord(id, modelName) { - return this.buildURL(modelName, id); + urlForUpdateRecord(identifier, modelName, snapshot) { + const { id } = _extractIDAndNamespace(identifier, snapshot); + let baseUrl = this.buildURL(modelName, id); + if (snapshot?.adapterOptions?.overwrite) { + return `${baseUrl}`; + } else { + const checkAndSetValue = snapshot?.attr('modifyIndex') || 0; + return `${baseUrl}?cas=${checkAndSetValue}`; + } } - urlForDeleteRecord(id, modelName, snapshot) { - const namespace = snapshot?.attr('namespace') || 'default'; - + urlForDeleteRecord(identifier, modelName, snapshot) { + const { namespace, id } = _extractIDAndNamespace(identifier, snapshot); const baseUrl = this.buildURL(modelName, id); return `${baseUrl}?namespace=${namespace}`; } + + handleResponse(status, _, payload) { + if (status === 409) { + return new ConflictError([ + { detail: _normalizeConflictErrorObject(payload), status: 409 }, + ]); + } + return super.handleResponse(...arguments); + } +} + +function _extractIDAndNamespace(identifier, snapshot) { + const namespace = snapshot?.attr('namespace') || 'default'; + const id = snapshot?.attr('path') || identifier; + return { + namespace, + id, + }; +} + +function _normalizeConflictErrorObject(conflictingVariable) { + return { + modifyTime: Math.floor(conflictingVariable.ModifyTime / 1000000), + items: conflictingVariable.Items, + }; } diff --git a/ui/app/components/allocation-subnav.js b/ui/app/components/allocation-subnav.js index 594440223ad..09e91502a15 100644 --- a/ui/app/components/allocation-subnav.js +++ b/ui/app/components/allocation-subnav.js @@ -8,6 +8,7 @@ import classic from 'ember-classic-decorator'; @tagName('') export default class AllocationSubnav extends Component { @service router; + @service keyboard; @equal('router.currentRouteName', 'allocations.allocation.fs') fsIsActive; diff --git a/ui/app/components/app-breadcrumbs.js b/ui/app/components/app-breadcrumbs.js new file mode 100644 index 00000000000..868c81c2438 --- /dev/null +++ b/ui/app/components/app-breadcrumbs.js @@ -0,0 +1,7 @@ +import Component from '@glimmer/component'; + +export default class AppBreadcrumbsComponent extends Component { + isOneCrumbUp(iter = 0, totalNum = 0) { + return iter === totalNum - 2; + } +} diff --git a/ui/app/components/breadcrumbs/default.hbs b/ui/app/components/breadcrumbs/default.hbs index 3117a3b54e2..73e86b6f6e5 100644 --- a/ui/app/components/breadcrumbs/default.hbs +++ b/ui/app/components/breadcrumbs/default.hbs @@ -1,9 +1,17 @@ {{! template-lint-disable no-unknown-arguments-for-builtin-components }} -
  • +
  • + data-test-breadcrumb={{@crumb.args.firstObject}}> {{#if @crumb.title}}
    diff --git a/ui/app/components/breadcrumbs/default.js b/ui/app/components/breadcrumbs/default.js new file mode 100644 index 00000000000..c8c7464bf5c --- /dev/null +++ b/ui/app/components/breadcrumbs/default.js @@ -0,0 +1,18 @@ +import { action } from '@ember/object'; +import Component from '@glimmer/component'; +import KeyboardShortcutModifier from 'nomad-ui/modifiers/keyboard-shortcut'; +import { inject as service } from '@ember/service'; + +export default class BreadcrumbsTemplate extends Component { + @service router; + + @action + traverseUpALevel(args) { + const [path, ...rest] = args; + this.router.transitionTo(path, ...rest); + } + + get maybeKeyboardShortcut() { + return this.args.isOneCrumbUp() ? KeyboardShortcutModifier : null; + } +} diff --git a/ui/app/components/breadcrumbs/job.hbs b/ui/app/components/breadcrumbs/job.hbs index d3956485f06..cab6d56fa83 100644 --- a/ui/app/components/breadcrumbs/job.hbs +++ b/ui/app/components/breadcrumbs/job.hbs @@ -8,7 +8,7 @@
  • {{/if}} {{#if trigger.data.isSuccess}} - {{#if trigger.data.result}} + {{#if (and trigger.data.result this.hasParent)}}
  • {{/if}} -
  • +
  • this.closeSidebar(), + }, + ]; } diff --git a/ui/app/components/gutter-menu.js b/ui/app/components/gutter-menu.js index e73f58da351..c84a4d0e466 100644 --- a/ui/app/components/gutter-menu.js +++ b/ui/app/components/gutter-menu.js @@ -7,6 +7,7 @@ import classic from 'ember-classic-decorator'; export default class GutterMenu extends Component { @service system; @service router; + @service keyboard; @computed('system.namespaces.@each.name') get sortedNamespaces() { @@ -37,6 +38,11 @@ export default class GutterMenu extends Component { onHamburgerClick() {} + // Seemingly redundant, but serves to ensure the action is passed to the keyboard service correctly + transitionTo(destination) { + return this.router.transitionTo(destination); + } + gotoJobsForNamespace(namespace) { if (!namespace || !namespace.get('id')) return; diff --git a/ui/app/components/job-subnav.js b/ui/app/components/job-subnav.js index 4d80322d86f..3560dab8395 100644 --- a/ui/app/components/job-subnav.js +++ b/ui/app/components/job-subnav.js @@ -3,6 +3,7 @@ import Component from '@glimmer/component'; export default class JobSubnav extends Component { @service can; + @service keyboard; get shouldRenderClientsTab() { const { job } = this.args; diff --git a/ui/app/components/keyboard-shortcuts-modal.hbs b/ui/app/components/keyboard-shortcuts-modal.hbs new file mode 100644 index 00000000000..47c233d04ba --- /dev/null +++ b/ui/app/components/keyboard-shortcuts-modal.hbs @@ -0,0 +1,70 @@ +{{#if this.keyboard.shortcutsVisible}} + {{keyboard-commands (array this.escapeCommand)}} +
    +
    + +

    Keyboard Shortcuts

    +

    Click a key pattern to re-bind it to a shortcut of your choosing.

    +
    +
      + {{#each this.commands as |command|}} +
    • + {{command.label}} + + {{#if command.recording}} + Recording; ESC to cancel. + {{else}} + {{#if command.custom}} + + {{/if}} + {{/if}} + + + +
    • + {{/each}} +
    +
    + Keyboard shortcuts {{#if this.keyboard.enabled}}enabled{{else}}disabled{{/if}} + +
    +
    +{{/if}} + +{{#if (and this.keyboard.enabled this.keyboard.displayHints)}} + {{#each this.hints as |hint|}} + + {{/each}} +{{/if}} diff --git a/ui/app/components/keyboard-shortcuts-modal.js b/ui/app/components/keyboard-shortcuts-modal.js new file mode 100644 index 00000000000..117a8a66872 --- /dev/null +++ b/ui/app/components/keyboard-shortcuts-modal.js @@ -0,0 +1,70 @@ +import Component from '@glimmer/component'; +import { inject as service } from '@ember/service'; +import { computed } from '@ember/object'; +import { action } from '@ember/object'; +import Tether from 'tether'; + +export default class KeyboardShortcutsModalComponent extends Component { + @service keyboard; + @service config; + + escapeCommand = { + label: 'Hide Keyboard Shortcuts', + pattern: ['Escape'], + action: () => { + this.keyboard.shortcutsVisible = false; + }, + }; + + /** + * commands: filter keyCommands to those that have an action and a label, + * to distinguish between those that are just visual hints of existing commands + */ + @computed('keyboard.keyCommands.[]') + get commands() { + return this.keyboard.keyCommands.reduce((memo, c) => { + if (c.label && c.action && !memo.find((m) => m.label === c.label)) { + memo.push(c); + } + return memo; + }, []); + } + + /** + * hints: filter keyCommands to those that have an element property, + * and then compute a position on screen to place the hint. + */ + @computed('keyboard.{keyCommands.length,displayHints}') + get hints() { + if (this.keyboard.displayHints) { + return this.keyboard.keyCommands.filter((c) => c.element); + } else { + return []; + } + } + + @action + tetherToElement(element, hint, self) { + if (!this.config.isTest) { + let binder = new Tether({ + element: self, + target: element, + attachment: 'top left', + targetAttachment: 'top left', + targetModifier: 'visible', + }); + hint.binder = binder; + } + } + + @action + untetherFromElement(hint) { + if (!this.config.isTest) { + hint.binder.destroy(); + } + } + + @action toggleListener() { + this.keyboard.enabled = !this.keyboard.enabled; + } +} diff --git a/ui/app/components/lifecycle-chart-row.js b/ui/app/components/lifecycle-chart-row.js index 25203683b32..8da916203ec 100644 --- a/ui/app/components/lifecycle-chart-row.js +++ b/ui/app/components/lifecycle-chart-row.js @@ -15,9 +15,9 @@ export default class LifecycleChartRow extends Component { return undefined; } - @computed('taskState.finishedAt') + @computed('taskState.state') get finishedClass() { - if (this.taskState && this.taskState.finishedAt) { + if (this.taskState && this.taskState.state === 'dead') { return 'is-finished'; } diff --git a/ui/app/components/plugin-subnav.js b/ui/app/components/plugin-subnav.js new file mode 100644 index 00000000000..1333547a77e --- /dev/null +++ b/ui/app/components/plugin-subnav.js @@ -0,0 +1,6 @@ +import Component from '@glimmer/component'; +import { inject as service } from '@ember/service'; + +export default class PluginSubnavComponent extends Component { + @service keyboard; +} diff --git a/ui/app/components/safe-link-to.js b/ui/app/components/safe-link-to.js new file mode 100644 index 00000000000..d4dfc4a7451 --- /dev/null +++ b/ui/app/components/safe-link-to.js @@ -0,0 +1,9 @@ +import { LinkComponent } from '@ember/legacy-built-in-components'; +import classic from 'ember-classic-decorator'; + +// Necessary for programmatic routing away pages with s that contain @query properties. +// (There's an issue with query param calculations in the new component that uses the router service) +// https://github.com/emberjs/ember.js/issues/20051 + +@classic +export default class SafeLinkToComponent extends LinkComponent {} diff --git a/ui/app/components/secure-variable-form.hbs b/ui/app/components/secure-variable-form.hbs index 510e530e332..e50e9fe738e 100644 --- a/ui/app/components/secure-variable-form.hbs +++ b/ui/app/components/secure-variable-form.hbs @@ -9,6 +9,27 @@
    {{/if}} + {{#if this.hasConflict}} +
    +

    Heads up! Your Secure Variable has a conflict.

    +

    This might be because someone else tried saving in the time since you've had it open.

    + {{#if this.conflictingVariable.modifyTime}} + + {{moment-from-now this.conflictingVariable.modifyTime}} + + {{/if}} + {{#if this.conflictingVariable.items}} +
    {{stringify-object this.conflictingVariable.items whitespace=2}}
    + {{else}} +

    Your ACL token limits your ability to see further details about the conflicting variable.

    + {{/if}} +
    + + +
    +
    + {{/if}} +
    @@ -184,6 +198,10 @@ + + {{#if this.error}}
    diff --git a/ui/app/templates/clients/client/index.hbs b/ui/app/templates/clients/client/index.hbs index c347b6d2e2d..20a8f3506b5 100644 --- a/ui/app/templates/clients/client/index.hbs +++ b/ui/app/templates/clients/client/index.hbs @@ -546,6 +546,10 @@ diff --git a/ui/app/templates/components/allocation-subnav.hbs b/ui/app/templates/components/allocation-subnav.hbs index 335d1964174..374e4f5c9b1 100644 --- a/ui/app/templates/components/allocation-subnav.hbs +++ b/ui/app/templates/components/allocation-subnav.hbs @@ -1,4 +1,4 @@ -
    +
    • Overview
    • Files
    • diff --git a/ui/app/templates/components/app-breadcrumbs.hbs b/ui/app/templates/components/app-breadcrumbs.hbs index 7c1e105a92c..a56bf2316ee 100644 --- a/ui/app/templates/components/app-breadcrumbs.hbs +++ b/ui/app/templates/components/app-breadcrumbs.hbs @@ -1,7 +1,7 @@ - {{#each breadcrumbs as |crumb|}} + {{#each breadcrumbs as |crumb iter|}} {{#let crumb.args.crumb as |c|}} - {{component (concat "breadcrumbs/" (or c.type "default")) crumb=c}} + {{component (concat "breadcrumbs/" (or c.type "default")) crumb=c isOneCrumbUp=(action this.isOneCrumbUp iter breadcrumbs.length)}} {{/let}} {{/each}} \ No newline at end of file diff --git a/ui/app/templates/components/client-subnav.hbs b/ui/app/templates/components/client-subnav.hbs index 98978a90e59..c8a769eafb1 100644 --- a/ui/app/templates/components/client-subnav.hbs +++ b/ui/app/templates/components/client-subnav.hbs @@ -1,4 +1,4 @@ -
      +
      • Overview
      • Monitor
      • diff --git a/ui/app/templates/components/global-header.hbs b/ui/app/templates/components/global-header.hbs index eb23131f3bf..384d2c67e71 100644 --- a/ui/app/templates/components/global-header.hbs +++ b/ui/app/templates/components/global-header.hbs @@ -60,7 +60,7 @@ > Documentation - + ACL Tokens
      diff --git a/ui/app/templates/components/gutter-menu.hbs b/ui/app/templates/components/gutter-menu.hbs index 69fc6f2a8e2..561dea7dc0e 100644 --- a/ui/app/templates/components/gutter-menu.hbs +++ b/ui/app/templates/components/gutter-menu.hbs @@ -1,6 +1,7 @@
      @@ -33,7 +34,7 @@
      {{/if}}