From 949a6f60c729f4a9942cfca9598bea0257331ab6 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Mon, 30 Jan 2023 15:48:43 +0100 Subject: [PATCH] renamed stanza to block for consistency with other projects (#15941) --- .release/ci.hcl | 2 +- acl/policy.go | 6 +- api/consul.go | 8 +- api/operator.go | 2 +- api/tasks.go | 2 +- api/tasks_test.go | 4 +- client/allocrunner/consul_grpc_sock_hook.go | 2 +- client/allocrunner/network_manager_linux.go | 2 +- .../taskrunner/connect_native_hook.go | 2 +- .../taskrunner/connect_native_hook_test.go | 10 +- .../taskrunner/task_runner_hooks.go | 18 ++-- client/allocrunner/taskrunner/vault_hook.go | 30 +++--- client/serviceregistration/nsd/nsd.go | 4 +- client/serviceregistration/workload.go | 4 +- command/agent/command.go | 4 +- command/agent/command_test.go | 2 +- command/agent/consul/connect.go | 6 +- command/agent/consul/service_client.go | 6 +- command/agent/job_endpoint.go | 2 +- command/agent/job_endpoint_test.go | 6 +- command/agent/retry_join.go | 15 ++- command/agent/retry_join_test.go | 8 +- command/alloc_status.go | 4 +- command/assets/connect.nomad | 90 +++++++++--------- command/assets/example.nomad | 94 +++++++++---------- command/deployment.go | 2 +- command/job_plan.go | 4 +- command/job_run.go | 4 +- command/job_validate.go | 4 +- command/var.go | 2 +- demo/csi/ceph-csi-plugin/README.md | 2 +- demo/csi/cinder-csi-plugin/README.md | 4 +- demo/csi/kadalu-csi/controller.nomad | 2 +- drivers/docker/driver.go | 2 +- e2e/consul/on_update.go | 2 +- e2e/consultemplate/consultemplate.go | 2 +- jobspec/parse.go | 2 +- jobspec/parse_job.go | 2 +- jobspec/parse_network.go | 2 +- jobspec/parse_service.go | 10 +- jobspec/parse_task.go | 4 +- nomad/deploymentwatcher/deployment_watcher.go | 2 +- nomad/fsm.go | 4 +- nomad/job_endpoint_hook_connect.go | 2 +- nomad/job_endpoint_hooks.go | 2 +- nomad/job_endpoint_test.go | 4 +- nomad/structs/config/artifact.go | 2 +- nomad/structs/csi.go | 4 +- nomad/structs/network.go | 4 +- nomad/structs/operator.go | 2 +- nomad/structs/services.go | 28 +++--- nomad/structs/services_test.go | 6 +- nomad/structs/structs.go | 28 +++--- nomad/structs/structs_test.go | 16 ++-- plugins/device/cmd/example/README.md | 2 +- plugins/drivers/driver.go | 2 +- plugins/drivers/proto/driver.pb.go | 2 +- plugins/drivers/proto/driver.proto | 2 +- scheduler/generic_sched.go | 2 +- scheduler/preemption.go | 2 +- scheduler/propertyset.go | 4 +- scheduler/reconcile_test.go | 20 ++-- scheduler/spread.go | 4 +- scheduler/stack.go | 4 +- scheduler/util.go | 2 +- website/content/api-docs/json-jobs.mdx | 30 +++--- .../content/api-docs/operator/scheduler.mdx | 2 +- website/content/api-docs/search.mdx | 2 +- website/content/docs/commands/job/plan.mdx | 6 +- website/content/docs/commands/job/run.mdx | 12 +-- .../content/docs/commands/job/validate.mdx | 6 +- website/content/docs/commands/node/drain.mdx | 2 +- website/content/docs/concepts/filesystem.mdx | 8 +- website/content/docs/configuration/acl.mdx | 8 +- website/content/docs/configuration/audit.mdx | 24 ++--- .../content/docs/configuration/autopilot.mdx | 8 +- website/content/docs/configuration/client.mdx | 36 +++---- website/content/docs/configuration/consul.mdx | 10 +- website/content/docs/configuration/index.mdx | 8 +- website/content/docs/configuration/plugin.mdx | 8 +- website/content/docs/configuration/search.mdx | 8 +- .../content/docs/configuration/sentinel.mdx | 8 +- website/content/docs/configuration/server.mdx | 20 ++-- .../docs/configuration/server_join.mdx | 8 +- .../content/docs/configuration/telemetry.mdx | 12 +-- website/content/docs/configuration/tls.mdx | 14 +-- website/content/docs/configuration/ui.mdx | 10 +- website/content/docs/configuration/vault.mdx | 12 +-- website/content/docs/drivers/docker.mdx | 32 +++---- website/content/docs/drivers/exec.mdx | 2 +- website/content/docs/drivers/index.mdx | 4 +- website/content/docs/drivers/raw_exec.mdx | 6 +- .../docs/integrations/consul-connect.mdx | 8 +- .../docs/integrations/consul-integration.mdx | 4 +- .../docs/integrations/vault-integration.mdx | 4 +- .../docs/job-specification/affinity.mdx | 16 ++-- .../docs/job-specification/artifact.mdx | 12 +-- .../docs/job-specification/change_script.mdx | 12 +-- .../content/docs/job-specification/check.mdx | 22 ++--- .../docs/job-specification/check_restart.mdx | 28 +++--- .../docs/job-specification/connect.mdx | 14 +-- .../docs/job-specification/constraint.mdx | 10 +- .../docs/job-specification/csi_plugin.mdx | 16 ++-- .../content/docs/job-specification/device.mdx | 18 ++-- .../job-specification/dispatch_payload.mdx | 14 +-- .../content/docs/job-specification/env.mdx | 18 ++-- .../docs/job-specification/ephemeral_disk.mdx | 12 +-- .../content/docs/job-specification/expose.mdx | 14 +-- .../docs/job-specification/gateway.mdx | 12 +-- .../content/docs/job-specification/group.mdx | 20 ++-- .../content/docs/job-specification/index.mdx | 8 +- .../content/docs/job-specification/job.mdx | 16 ++-- .../docs/job-specification/lifecycle.mdx | 10 +- .../content/docs/job-specification/logs.mdx | 16 ++-- .../content/docs/job-specification/meta.mdx | 16 ++-- .../docs/job-specification/migrate.mdx | 12 +-- .../docs/job-specification/multiregion.mdx | 36 +++---- .../docs/job-specification/network.mdx | 22 ++--- .../docs/job-specification/parameterized.mdx | 12 +-- .../docs/job-specification/periodic.mdx | 12 +-- .../content/docs/job-specification/proxy.mdx | 10 +- .../docs/job-specification/reschedule.mdx | 14 +-- .../docs/job-specification/resources.mdx | 18 ++-- .../docs/job-specification/restart.mdx | 10 +- .../docs/job-specification/scaling.mdx | 8 +- .../docs/job-specification/service.mdx | 44 ++++----- .../job-specification/sidecar_service.mdx | 12 +-- .../docs/job-specification/sidecar_task.mdx | 10 +- .../content/docs/job-specification/spread.mdx | 32 +++---- .../content/docs/job-specification/task.mdx | 12 +-- .../docs/job-specification/template.mdx | 14 +-- .../content/docs/job-specification/update.mdx | 24 ++--- .../docs/job-specification/upstreams.mdx | 10 +- .../content/docs/job-specification/vault.mdx | 16 ++-- .../content/docs/job-specification/volume.mdx | 16 ++-- .../docs/job-specification/volume_mount.mdx | 8 +- .../docs/operations/monitoring-nomad.mdx | 4 +- .../docs/other-specifications/acl-policy.mdx | 2 +- website/content/docs/schedulers.mdx | 8 +- website/content/docs/upgrade/index.mdx | 2 +- .../content/docs/upgrade/upgrade-specific.mdx | 62 ++++++------ .../content/plugins/devices/community/usb.mdx | 4 +- .../plugins/drivers/community/containerd.mdx | 16 ++-- .../content/plugins/drivers/community/iis.mdx | 2 +- .../content/plugins/drivers/community/lxc.mdx | 2 +- .../content/plugins/drivers/community/rkt.mdx | 4 +- website/content/plugins/drivers/index.mdx | 4 +- website/content/plugins/drivers/podman.mdx | 10 +- .../content/plugins/drivers/remote/ecs.mdx | 2 +- .../content/plugins/drivers/remote/index.mdx | 6 +- .../content/tools/autoscaling/agent/apm.mdx | 2 +- .../agent/dynamic_application_sizing.mdx | 2 +- .../content/tools/autoscaling/agent/http.mdx | 2 +- .../content/tools/autoscaling/agent/nomad.mdx | 2 +- .../tools/autoscaling/agent/policy.mdx | 2 +- .../tools/autoscaling/agent/policy_eval.mdx | 2 +- .../tools/autoscaling/agent/strategy.mdx | 2 +- .../tools/autoscaling/agent/target.mdx | 2 +- .../tools/autoscaling/agent/telemetry.mdx | 2 +- website/content/tools/autoscaling/index.mdx | 6 +- .../tools/autoscaling/plugins/apm/nomad.mdx | 2 +- .../plugins/target/app-sizing-nomad.mdx | 20 ++-- .../autoscaling/plugins/target/nomad.mdx | 12 +-- website/content/tools/autoscaling/policy.mdx | 10 +- 164 files changed, 853 insertions(+), 854 deletions(-) diff --git a/.release/ci.hcl b/.release/ci.hcl index 8883245039b..3b0986c9e44 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -170,7 +170,7 @@ event "fossa-scan" { } ## These are promotion and post-publish events -## they should be added to the end of the file after the verify event stanza. +## they should be added to the end of the file after the verify event block. event "trigger-staging" { // This event is dispatched by the bob trigger-promotion command // and is required - do not delete. diff --git a/acl/policy.go b/acl/policy.go index b0658cb760b..39d886bbe37 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -8,7 +8,7 @@ import ( ) const ( - // The following levels are the only valid values for the `policy = "read"` stanza. + // The following levels are the only valid values for the `policy = "read"` block. // When policies are merged together, the most privilege is granted, except for deny // which always takes precedence and supersedes. PolicyDeny = "deny" @@ -20,7 +20,7 @@ const ( const ( // The following are the fine-grained capabilities that can be granted within a namespace. - // The Policy stanza is a short hand for granting several of these. When capabilities are + // The Policy block is a short hand for granting several of these. When capabilities are // combined we take the union of all capabilities. If the deny capability is present, it // takes precedence and overwrites all other capabilities. @@ -54,7 +54,7 @@ var ( const ( // The following are the fine-grained capabilities that can be granted for a volume set. - // The Policy stanza is a short hand for granting several of these. When capabilities are + // The Policy block is a short hand for granting several of these. When capabilities are // combined we take the union of all capabilities. If the deny capability is present, it // takes precedence and overwrites all other capabilities. diff --git a/api/consul.go b/api/consul.go index 0452d2ff18f..aa2b9d8ed33 100644 --- a/api/consul.go +++ b/api/consul.go @@ -40,7 +40,7 @@ func (c *Consul) MergeNamespace(namespace *string) { } } -// ConsulConnect represents a Consul Connect jobspec stanza. +// ConsulConnect represents a Consul Connect jobspec block. type ConsulConnect struct { Native bool `hcl:"native,optional"` Gateway *ConsulGateway `hcl:"gateway,block"` @@ -59,7 +59,7 @@ func (cc *ConsulConnect) Canonicalize() { } // ConsulSidecarService represents a Consul Connect SidecarService jobspec -// stanza. +// block. type ConsulSidecarService struct { Tags []string `hcl:"tags,optional"` Port string `hcl:"port,optional"` @@ -133,7 +133,7 @@ func (st *SidecarTask) Canonicalize() { } } -// ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza. +// ConsulProxy represents a Consul Connect sidecar proxy jobspec block. type ConsulProxy struct { LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"` LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"` @@ -197,7 +197,7 @@ func (c *ConsulMeshGateway) Copy() *ConsulMeshGateway { } } -// ConsulUpstream represents a Consul Connect upstream jobspec stanza. +// ConsulUpstream represents a Consul Connect upstream jobspec block. type ConsulUpstream struct { DestinationName string `mapstructure:"destination_name" hcl:"destination_name,optional"` DestinationNamespace string `mapstructure:"destination_namespace" hcl:"destination_namespace,optional"` diff --git a/api/operator.go b/api/operator.go index 8a3f74def46..9c444960f3d 100644 --- a/api/operator.go +++ b/api/operator.go @@ -162,7 +162,7 @@ type SchedulerSetConfigurationResponse struct { } // SchedulerAlgorithm is an enum string that encapsulates the valid options for a -// SchedulerConfiguration stanza's SchedulerAlgorithm. These modes will allow the +// SchedulerConfiguration block's SchedulerAlgorithm. These modes will allow the // scheduler to be user-selectable. type SchedulerAlgorithm string diff --git a/api/tasks.go b/api/tasks.go index a02e6901b9d..d997d37d96f 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -1052,7 +1052,7 @@ type TaskEvent struct { } // CSIPluginType is an enum string that encapsulates the valid options for a -// CSIPlugin stanza's Type. These modes will allow the plugin to be used in +// CSIPlugin block's Type. These modes will allow the plugin to be used in // different ways by the client. type CSIPluginType string diff --git a/api/tasks_test.go b/api/tasks_test.go index 2ee8d5db3b5..8ae4b10b85c 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -565,7 +565,7 @@ func TestTaskGroup_Merge_Update(t *testing.T) { } job.Canonicalize() - // Merge and canonicalize part of an update stanza + // Merge and canonicalize part of an update block tg := &TaskGroup{ Name: pointerOf("foo"), Update: &UpdateStrategy{ @@ -743,7 +743,7 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { } } -// TestSpread_Canonicalize asserts that the spread stanza is canonicalized correctly +// TestSpread_Canonicalize asserts that the spread block is canonicalized correctly func TestSpread_Canonicalize(t *testing.T) { testutil.Parallel(t) diff --git a/client/allocrunner/consul_grpc_sock_hook.go b/client/allocrunner/consul_grpc_sock_hook.go index ba68c2ad13d..2e6c60f7cd5 100644 --- a/client/allocrunner/consul_grpc_sock_hook.go +++ b/client/allocrunner/consul_grpc_sock_hook.go @@ -39,7 +39,7 @@ var ( // consulGRPCSocketHook creates Unix sockets to allow communication from inside a // netns to Consul gRPC endpoint. // -// Noop for allocations without a group Connect stanza using bridge networking. +// Noop for allocations without a group Connect block using bridge networking. type consulGRPCSocketHook struct { logger hclog.Logger diff --git a/client/allocrunner/network_manager_linux.go b/client/allocrunner/network_manager_linux.go index b435b1c8b82..3186f7c74c8 100644 --- a/client/allocrunner/network_manager_linux.go +++ b/client/allocrunner/network_manager_linux.go @@ -171,7 +171,7 @@ func netModeToIsolationMode(netMode string) drivers.NetIsolationMode { func newNetworkConfigurator(log hclog.Logger, alloc *structs.Allocation, config *clientconfig.Config) (NetworkConfigurator, error) { tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup) - // Check if network stanza is given + // Check if network block is given if len(tg.Networks) == 0 { return &hostNetworkConfigurator{}, nil } diff --git a/client/allocrunner/taskrunner/connect_native_hook.go b/client/allocrunner/taskrunner/connect_native_hook.go index 9e51b335886..d9a51f5b810 100644 --- a/client/allocrunner/taskrunner/connect_native_hook.go +++ b/client/allocrunner/taskrunner/connect_native_hook.go @@ -265,7 +265,7 @@ func (h *connectNativeHook) hostEnv(env map[string]string) map[string]string { func (h *connectNativeHook) maybeSetSITokenEnv(dir, task string, env map[string]string) error { if _, exists := env["CONSUL_HTTP_TOKEN"]; exists { // Consul token was already set - typically by using the Vault integration - // and a template stanza to set the environment. Ignore the SI token as + // and a template block to set the environment. Ignore the SI token as // the configured token takes precedence. return nil } diff --git a/client/allocrunner/taskrunner/connect_native_hook_test.go b/client/allocrunner/taskrunner/connect_native_hook_test.go index 99fd37ad23f..0b6e81dfd4e 100644 --- a/client/allocrunner/taskrunner/connect_native_hook_test.go +++ b/client/allocrunner/taskrunner/connect_native_hook_test.go @@ -25,7 +25,7 @@ import ( func getTestConsul(t *testing.T) *consultest.TestServer { testConsul, err := consultest.NewTestServerConfigT(t, func(c *consultest.TestServerConfig) { - c.Peering = nil // fix for older versions of Consul (<1.13.0) that don't support peering + c.Peering = nil // fix for older versions of Consul (<1.13.0) that don't support peering if !testing.Verbose() { // disable consul logging if -v not set c.Stdout = ioutil.Discard c.Stderr = ioutil.Discard @@ -114,7 +114,7 @@ func TestConnectNativeHook_tlsEnv(t *testing.T) { }, } - // existing config from task env stanza + // existing config from task env block taskEnv := map[string]string{ "CONSUL_CACERT": "fakeCA.pem", "CONSUL_CLIENT_CERT": "fakeCert.pem", @@ -490,7 +490,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) { request := &interfaces.TaskPrestartRequest{ Task: tg.Tasks[0], TaskDir: allocDir.NewTaskDir(tg.Tasks[0].Name), - TaskEnv: taskenv.NewEmptyTaskEnv(), // nothing set in env stanza + TaskEnv: taskenv.NewEmptyTaskEnv(), // nothing set in env block } require.NoError(t, request.TaskDir.Build(false, nil)) @@ -620,7 +620,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) { request := &interfaces.TaskPrestartRequest{ Task: tg.Tasks[0], TaskDir: allocDir.NewTaskDir(tg.Tasks[0].Name), - TaskEnv: taskEnv, // env stanza is configured w/ non-default tls configs + TaskEnv: taskEnv, // env block is configured w/ non-default tls configs } require.NoError(t, request.TaskDir.Build(false, nil)) @@ -634,7 +634,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) { require.True(t, response.Done) // Assert environment variable for CONSUL_HTTP_SSL is set, because it was - // the only one not overridden by task env stanza config + // the only one not overridden by task env block config require.NotEmpty(t, response.Env) require.Equal(t, map[string]string{ "CONSUL_HTTP_SSL": "true", diff --git a/client/allocrunner/taskrunner/task_runner_hooks.go b/client/allocrunner/taskrunner/task_runner_hooks.go index 98ff3f011a2..7c1b73dbd9b 100644 --- a/client/allocrunner/taskrunner/task_runner_hooks.go +++ b/client/allocrunner/taskrunner/task_runner_hooks.go @@ -70,7 +70,7 @@ func (tr *TaskRunner) initHooks() { newDeviceHook(tr.devicemanager, hookLogger), } - // If the task has a CSI stanza, add the hook. + // If the task has a CSI block, add the hook. if task.CSIPluginConfig != nil { tr.runnerHooks = append(tr.runnerHooks, newCSIPluginSupervisorHook( &csiPluginSupervisorHookConfig{ @@ -86,14 +86,14 @@ func (tr *TaskRunner) initHooks() { // If Vault is enabled, add the hook if task.Vault != nil { tr.runnerHooks = append(tr.runnerHooks, newVaultHook(&vaultHookConfig{ - vaultStanza: task.Vault, - client: tr.vaultClient, - events: tr, - lifecycle: tr, - updater: tr, - logger: hookLogger, - alloc: tr.Alloc(), - task: tr.taskName, + vaultBlock: task.Vault, + client: tr.vaultClient, + events: tr, + lifecycle: tr, + updater: tr, + logger: hookLogger, + alloc: tr.Alloc(), + task: tr.taskName, })) } diff --git a/client/allocrunner/taskrunner/vault_hook.go b/client/allocrunner/taskrunner/vault_hook.go index 8aa33a429dc..ce60a818dac 100644 --- a/client/allocrunner/taskrunner/vault_hook.go +++ b/client/allocrunner/taskrunner/vault_hook.go @@ -45,19 +45,19 @@ func (tr *TaskRunner) updatedVaultToken(token string) { } type vaultHookConfig struct { - vaultStanza *structs.Vault - client vaultclient.VaultClient - events ti.EventEmitter - lifecycle ti.TaskLifecycle - updater vaultTokenUpdateHandler - logger log.Logger - alloc *structs.Allocation - task string + vaultBlock *structs.Vault + client vaultclient.VaultClient + events ti.EventEmitter + lifecycle ti.TaskLifecycle + updater vaultTokenUpdateHandler + logger log.Logger + alloc *structs.Allocation + task string } type vaultHook struct { - // vaultStanza is the vault stanza for the task - vaultStanza *structs.Vault + // vaultBlock is the vault block for the task + vaultBlock *structs.Vault // eventEmitter is used to emit events to the task eventEmitter ti.EventEmitter @@ -97,7 +97,7 @@ type vaultHook struct { func newVaultHook(config *vaultHookConfig) *vaultHook { ctx, cancel := context.WithCancel(context.Background()) h := &vaultHook{ - vaultStanza: config.vaultStanza, + vaultBlock: config.vaultBlock, client: config.client, eventEmitter: config.events, lifecycle: config.lifecycle, @@ -239,9 +239,9 @@ OUTER: h.future.Set(token) if updatedToken { - switch h.vaultStanza.ChangeMode { + switch h.vaultBlock.ChangeMode { case structs.VaultChangeModeSignal: - s, err := signals.Parse(h.vaultStanza.ChangeSignal) + s, err := signals.Parse(h.vaultBlock.ChangeSignal) if err != nil { h.logger.Error("failed to parse signal", "error", err) h.lifecycle.Kill(h.ctx, @@ -252,7 +252,7 @@ OUTER: } event := structs.NewTaskEvent(structs.TaskSignaling).SetTaskSignal(s).SetDisplayMessage("Vault: new Vault token acquired") - if err := h.lifecycle.Signal(event, h.vaultStanza.ChangeSignal); err != nil { + if err := h.lifecycle.Signal(event, h.vaultBlock.ChangeSignal); err != nil { h.logger.Error("failed to send signal", "error", err) h.lifecycle.Kill(h.ctx, structs.NewTaskEvent(structs.TaskKilling). @@ -268,7 +268,7 @@ OUTER: case structs.VaultChangeModeNoop: fallthrough default: - h.logger.Error("invalid Vault change mode", "mode", h.vaultStanza.ChangeMode) + h.logger.Error("invalid Vault change mode", "mode", h.vaultBlock.ChangeMode) } // We have handled it diff --git a/client/serviceregistration/nsd/nsd.go b/client/serviceregistration/nsd/nsd.go index 7ebc5c14971..807f4d492b2 100644 --- a/client/serviceregistration/nsd/nsd.go +++ b/client/serviceregistration/nsd/nsd.go @@ -18,7 +18,7 @@ type ServiceRegistrationHandler struct { cfg *ServiceRegistrationHandlerCfg // checkWatcher watches checks of services in the Nomad service provider, - // and restarts associated tasks in accordance with their check_restart stanza. + // and restarts associated tasks in accordance with their check_restart block. checkWatcher serviceregistration.CheckWatcher // registrationEnabled tracks whether this handler is enabled for @@ -57,7 +57,7 @@ type ServiceRegistrationHandlerCfg struct { RPCFn func(method string, args, resp interface{}) error // CheckWatcher watches checks of services in the Nomad service provider, - // and restarts associated tasks in accordance with their check_restart stanza. + // and restarts associated tasks in accordance with their check_restart block. CheckWatcher serviceregistration.CheckWatcher } diff --git a/client/serviceregistration/workload.go b/client/serviceregistration/workload.go index 7123b7e4e2b..f752cd22c91 100644 --- a/client/serviceregistration/workload.go +++ b/client/serviceregistration/workload.go @@ -20,13 +20,13 @@ type WorkloadServices struct { ProviderNamespace string // Restarter allows restarting the task or task group depending on the - // check_restart stanzas. + // check_restart blocks. Restarter WorkloadRestarter // Services and checks to register for the task. Services []*structs.Service - // Networks from the task's resources stanza. + // Networks from the task's resources block. // TODO: remove and use Ports Networks structs.Networks diff --git a/command/agent/command.go b/command/agent/command.go index c16bbd242c0..f46c44d9248 100644 --- a/command/agent/command.go +++ b/command/agent/command.go @@ -408,7 +408,7 @@ func (c *Command) IsValidConfig(config, cmdConfig *Config) bool { } if err := config.Client.Artifact.Validate(); err != nil { - c.Ui.Error(fmt.Sprintf("client.artifact stanza invalid: %v", err)) + c.Ui.Error(fmt.Sprintf("client.artifact block invalid: %v", err)) return false } @@ -1191,7 +1191,7 @@ func (c *Command) startupJoin(config *Config) error { new = len(config.Server.ServerJoin.StartJoin) } if old != 0 && new != 0 { - return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join stanza") + return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join block") } // Nothing to do diff --git a/command/agent/command_test.go b/command/agent/command_test.go index 9ce1a0a5fa2..1e313518b95 100644 --- a/command/agent/command_test.go +++ b/command/agent/command_test.go @@ -398,7 +398,7 @@ func TestIsValidConfig(t *testing.T) { }, }, }, - err: "client.artifact stanza invalid: http_read_timeout must be > 0", + err: "client.artifact block invalid: http_read_timeout must be > 0", }, } diff --git a/command/agent/consul/connect.go b/command/agent/consul/connect.go index e2fa8570d3f..3751de2c3f5 100644 --- a/command/agent/consul/connect.go +++ b/command/agent/consul/connect.go @@ -19,7 +19,7 @@ import ( func newConnect(serviceID string, info structs.AllocInfo, serviceName string, nc *structs.ConsulConnect, networks structs.Networks, ports structs.AllocatedPorts) (*api.AgentServiceConnect, error) { switch { case nc == nil: - // no connect stanza means there is no connect service to register + // no connect block means there is no connect service to register return nil, nil case nc.IsGateway(): @@ -57,7 +57,7 @@ func newConnectGateway(connect *structs.ConsulConnect) *api.AgentServiceConnectP var envoyConfig map[string]interface{} - // Populate the envoy configuration from the gateway.proxy stanza, if + // Populate the envoy configuration from the gateway.proxy block, if // such configuration is provided. if proxy := connect.Gateway.Proxy; proxy != nil { envoyConfig = make(map[string]interface{}) @@ -94,7 +94,7 @@ func newConnectGateway(connect *structs.ConsulConnect) *api.AgentServiceConnectP func connectSidecarRegistration(serviceID string, info structs.AllocInfo, css *structs.ConsulSidecarService, networks structs.Networks, ports structs.AllocatedPorts) (*api.AgentServiceRegistration, error) { if css == nil { - // no sidecar stanza means there is no sidecar service to register + // no sidecar block means there is no sidecar service to register return nil, nil } diff --git a/command/agent/consul/service_client.go b/command/agent/consul/service_client.go index fd42d75e83b..1b98bec4910 100644 --- a/command/agent/consul/service_client.go +++ b/command/agent/consul/service_client.go @@ -87,7 +87,7 @@ const ( // Additional Consul ACLs required // - Consul Template: key:read -// Used in tasks with template stanza that use Consul keys. +// Used in tasks with template block that use Consul keys. // CatalogAPI is the consul/api.Catalog API used by Nomad. // @@ -1120,8 +1120,8 @@ func (c *ServiceClient) serviceRegs( Port: port, Meta: meta, TaggedAddresses: taggedAddresses, - Connect: connect, // will be nil if no Connect stanza - Proxy: gateway, // will be nil if no Connect Gateway stanza + Connect: connect, // will be nil if no Connect block + Proxy: gateway, // will be nil if no Connect Gateway block Checks: make([]*api.AgentServiceCheck, 0, len(service.Checks)), } ops.regServices = append(ops.regServices, serviceReg) diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index c23f46a322a..9ece74fea43 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -404,7 +404,7 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request, if args.Job.Type != nil && *args.Job.Type == api.JobTypeSystem { for _, tg := range args.Job.TaskGroups { if tg.Scaling != nil { - return nil, CodedError(400, "Task groups with job type system do not support scaling stanzas") + return nil, CodedError(400, "Task groups with job type system do not support scaling blocks") } } } diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index 163fa5147b6..33ce182c6dc 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -661,7 +661,7 @@ func TestHTTP_jobUpdate_systemScaling(t *testing.T) { // Make the request obj, err := s.Server.JobSpecificRequest(respW, req) assert.Nil(t, obj) - assert.Equal(t, CodedError(400, "Task groups with job type system do not support scaling stanzas"), err) + assert.Equal(t, CodedError(400, "Task groups with job type system do not support scaling blocks"), err) }) } @@ -3517,7 +3517,7 @@ func TestJobs_Matching_Resources(t *testing.T) { } // TestHTTP_JobValidate_SystemMigrate asserts that a system job with a migrate -// stanza fails to validate but does not panic (see #5477). +// block fails to validate but does not panic (see #5477). func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { @@ -3534,7 +3534,7 @@ func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { // System job... Type: pointer.Of("system"), - // ...with an empty migrate stanza + // ...with an empty migrate block Migrate: &api.MigrateStrategy{}, } diff --git a/command/agent/retry_join.go b/command/agent/retry_join.go index 5250cd01982..b48da2a8f06 100644 --- a/command/agent/retry_join.go +++ b/command/agent/retry_join.go @@ -2,11 +2,10 @@ package agent import ( "fmt" + golog "log" "strings" "time" - golog "log" - log "github.com/hashicorp/go-hclog" ) @@ -56,25 +55,25 @@ type retryJoiner struct { } // Validate ensures that the configuration passes validity checks for the -// retry_join stanza. If the configuration is not valid, returns an error that +// retry_join block. If the configuration is not valid, returns an error that // will be displayed to the operator, otherwise nil. func (r *retryJoiner) Validate(config *Config) error { // If retry_join is defined for the server, ensure that deprecated - // fields and the server_join stanza are not both set + // fields and the server_join block are not both set if config.Server != nil && config.Server.ServerJoin != nil && len(config.Server.ServerJoin.RetryJoin) != 0 { if len(config.Server.RetryJoin) != 0 { - return fmt.Errorf("server_join and retry_join cannot both be defined; prefer setting the server_join stanza") + return fmt.Errorf("server_join and retry_join cannot both be defined; prefer setting the server_join block") } if len(config.Server.StartJoin) != 0 { - return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join stanza") + return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join block") } if config.Server.RetryMaxAttempts != 0 { - return fmt.Errorf("server_join and retry_max cannot both be defined; prefer setting the server_join stanza") + return fmt.Errorf("server_join and retry_max cannot both be defined; prefer setting the server_join block") } if config.Server.RetryInterval != 0 { - return fmt.Errorf("server_join and retry_interval cannot both be defined; prefer setting the server_join stanza") + return fmt.Errorf("server_join and retry_interval cannot both be defined; prefer setting the server_join block") } if len(config.Server.ServerJoin.StartJoin) != 0 { diff --git a/command/agent/retry_join_test.go b/command/agent/retry_join_test.go index a2c27aa3bc9..f349021c435 100644 --- a/command/agent/retry_join_test.go +++ b/command/agent/retry_join_test.go @@ -222,7 +222,7 @@ func TestRetryJoin_Validate(t *testing.T) { }, }, isValid: false, - reason: "server_join cannot be defined if retry_join is defined on the server stanza", + reason: "server_join cannot be defined if retry_join is defined on the server block", }, { config: &Config{ @@ -240,7 +240,7 @@ func TestRetryJoin_Validate(t *testing.T) { }, }, isValid: false, - reason: "server_join cannot be defined if start_join is defined on the server stanza", + reason: "server_join cannot be defined if start_join is defined on the server block", }, { config: &Config{ @@ -258,7 +258,7 @@ func TestRetryJoin_Validate(t *testing.T) { }, }, isValid: false, - reason: "server_join cannot be defined if retry_max_attempts is defined on the server stanza", + reason: "server_join cannot be defined if retry_max_attempts is defined on the server block", }, { config: &Config{ @@ -276,7 +276,7 @@ func TestRetryJoin_Validate(t *testing.T) { }, }, isValid: false, - reason: "server_join cannot be defined if retry_interval is defined on the server stanza", + reason: "server_join cannot be defined if retry_interval is defined on the server block", }, { config: &Config{ diff --git a/command/alloc_status.go b/command/alloc_status.go index 39d7bb1cbe3..335f4f4b931 100644 --- a/command/alloc_status.go +++ b/command/alloc_status.go @@ -884,11 +884,11 @@ FOUND: if len(hostVolumesOutput) > 1 { c.Ui.Output("Host Volumes:") c.Ui.Output(formatList(hostVolumesOutput)) - c.Ui.Output("") // line padding to next stanza + c.Ui.Output("") // line padding to next block } if len(csiVolumesOutput) > 1 { c.Ui.Output("CSI Volumes:") c.Ui.Output(formatList(csiVolumesOutput)) - c.Ui.Output("") // line padding to next stanza + c.Ui.Output("") // line padding to next block } } diff --git a/command/assets/connect.nomad b/command/assets/connect.nomad index 739a75cab3d..8ac0c8dd797 100644 --- a/command/assets/connect.nomad +++ b/command/assets/connect.nomad @@ -1,12 +1,12 @@ # There can only be a single job definition per file. This job is named # "countdash" so it will create a job with the ID and Name "countdash". -# The "job" stanza is the top-most configuration option in the job +# The "job" block is the top-most configuration option in the job # specification. A job is a declarative specification of tasks that Nomad # should run. Jobs have a globally unique name, one or many task groups, which # are themselves collections of one or many tasks. # -# For more information and examples on the "job" stanza, please see +# For more information and examples on the "job" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/job.html @@ -31,11 +31,11 @@ job "countdash" { # type = "service" - # The "constraint" stanza defines additional constraints for placing this job, - # in addition to any resource or driver constraints. This stanza may be placed + # The "constraint" block defines additional constraints for placing this job, + # in addition to any resource or driver constraints. This block may be placed # at the "job", "group", or "task" level, and supports variable interpolation. # - # For more information and examples on the "constraint" stanza, please see + # For more information and examples on the "constraint" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/constraint.html @@ -45,14 +45,14 @@ job "countdash" { # value = "linux" # } - # The "update" stanza specifies the update strategy of task groups. The update + # The "update" block specifies the update strategy of task groups. The update # strategy is used to control things like rolling upgrades, canaries, and # blue/green deployments. If omitted, no update strategy is enforced. The - # "update" stanza may be placed at the job or task group. When placed at the + # "update" block may be placed at the job or task group. When placed at the # job, it applies to all groups within the job. When placed at both the job and - # group level, the stanzas are merged with the group's taking precedence. + # group level, the blocks are merged with the group's taking precedence. # - # For more information and examples on the "update" stanza, please see + # For more information and examples on the "update" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/update.html @@ -99,10 +99,10 @@ job "countdash" { # version is deployed and upon promotion the old version is stopped. canary = 0 } - # The migrate stanza specifies the group's strategy for migrating off of + # The migrate block specifies the group's strategy for migrating off of # draining nodes. If omitted, a default migration strategy is applied. # - # For more information on the "migrate" stanza, please see + # For more information on the "migrate" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/migrate.html @@ -127,11 +127,11 @@ job "countdash" { # is specified using a label suffix like "2m" or "1h". healthy_deadline = "5m" } - # The "group" stanza defines a series of tasks that should be co-located on + # The "group" block defines a series of tasks that should be co-located on # the same Nomad client. Any task within a group will be placed on the same # client. # - # For more information and examples on the "group" stanza, please see + # For more information and examples on the "group" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/group.html @@ -142,10 +142,10 @@ job "countdash" { # to 1. count = 1 - # The "restart" stanza configures a group's behavior on task failure. If + # The "restart" block configures a group's behavior on task failure. If # left unspecified, a default restart policy is used based on the job type. # - # For more information and examples on the "restart" stanza, please see + # For more information and examples on the "restart" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/restart.html @@ -166,12 +166,12 @@ job "countdash" { mode = "fail" } - # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk - # instead of a hard disk requirement. Clients using this stanza should - # not specify disk requirements in the resources stanza of the task. All + # The "ephemeral_disk" block instructs Nomad to utilize an ephemeral disk + # instead of a hard disk requirement. Clients using this block should + # not specify disk requirements in the resources block of the task. All # tasks in this group will share the same ephemeral disk. # - # For more information and examples on the "ephemeral_disk" stanza, please + # For more information and examples on the "ephemeral_disk" block, please # see the online documentation at: # # https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html @@ -192,10 +192,10 @@ job "countdash" { size = 300 } - # The "affinity" stanza enables operators to express placement preferences + # The "affinity" block enables operators to express placement preferences # based on node attributes or metadata. # - # For more information and examples on the "affinity" stanza, please + # For more information and examples on the "affinity" block, please # see the online documentation at: # # https://www.nomadproject.io/docs/job-specification/affinity.html @@ -214,11 +214,11 @@ job "countdash" { # } - # The "spread" stanza allows operators to increase the failure tolerance of + # The "spread" block allows operators to increase the failure tolerance of # their applications by specifying a node attribute that allocations # should be spread over. # - # For more information and examples on the "spread" stanza, please + # For more information and examples on the "spread" block, please # see the online documentation at: # # https://www.nomadproject.io/docs/job-specification/spread.html @@ -239,7 +239,7 @@ job "countdash" { # } # } - # The "network" stanza for a group creates a network namespace shared + # The "network" block for a group creates a network namespace shared # by all tasks within the group. network { # "mode" is the CNI plugin used to configure the network namespace. @@ -255,23 +255,23 @@ job "countdash" { # to = "8080" # } - # The "dns" stanza allows operators to override the DNS configuration + # The "dns" block allows operators to override the DNS configuration # inherited by the host client. # dns { # servers = ["1.1.1.1"] # } } - # The "service" stanza enables Consul Connect. + # The "service" block enables Consul Connect. service { name = "count-api" - # The port in the service stanza is the port the service listens on. + # The port in the service block is the port the service listens on. # The Envoy proxy will automatically route traffic to that port # inside the network namespace. If the application binds to localhost # on this port, the task needs no additional network configuration. port = "9001" - # The "check" stanza specifies a health check associated with the service. + # The "check" block specifies a health check associated with the service. # This can be specified multiple times to define multiple checks for the # service. Note that checks run inside the task indicated by the "task" # field. @@ -285,7 +285,7 @@ job "countdash" { # } connect { - # The "sidecar_service" stanza configures the Envoy sidecar admission + # The "sidecar_service" block configures the Envoy sidecar admission # controller. For each task group with a sidecar_service, Nomad will # inject an Envoy task into the task group. A group network will be # required and a dynamic port will be registered for remote services @@ -295,10 +295,10 @@ job "countdash" { sidecar_service {} } } - # The "task" stanza creates an individual unit of work, such as a Docker + # The "task" block creates an individual unit of work, such as a Docker # container, web application, or batch processing. # - # For more information and examples on the "task" stanza, please see + # For more information and examples on the "task" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/task.html @@ -308,7 +308,7 @@ job "countdash" { # run the task. driver = "docker" - # The "config" stanza specifies the driver configuration, which is passed + # The "config" block specifies the driver configuration, which is passed # directly to the driver to start the task. The details of configurations # are specific to each driver, so please see specific driver # documentation for more information. @@ -321,13 +321,13 @@ job "countdash" { auth_soft_fail = true } - # The "artifact" stanza instructs Nomad to download an artifact from a + # The "artifact" block instructs Nomad to download an artifact from a # remote source prior to starting the task. This provides a convenient # mechanism for downloading configuration files or data needed to run the - # task. It is possible to specify the "artifact" stanza multiple times to + # task. It is possible to specify the "artifact" block multiple times to # download multiple artifacts. # - # For more information and examples on the "artifact" stanza, please see + # For more information and examples on the "artifact" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/artifact.html @@ -340,12 +340,12 @@ job "countdash" { # } - # The "logs" stanza instructs the Nomad client on how many log files and + # The "logs" block instructs the Nomad client on how many log files and # the maximum size of those logs files to retain. Logging is enabled by - # default, but the "logs" stanza allows for finer-grained control over + # default, but the "logs" block allows for finer-grained control over # the log rotation and storage configuration. # - # For more information and examples on the "logs" stanza, please see + # For more information and examples on the "logs" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/logs.html @@ -355,12 +355,12 @@ job "countdash" { # max_file_size = 15 # } - # The "resources" stanza describes the requirements a task needs to + # The "resources" block describes the requirements a task needs to # execute. Resource requirements include memory, network, cpu, and more. # This ensures the task will execute on a machine that contains enough # resource capacity. # - # For more information and examples on the "resources" stanza, please see + # For more information and examples on the "resources" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/resources.html @@ -372,13 +372,13 @@ job "countdash" { } # The Envoy sidecar admission controller will inject an Envoy task into - # any task group for each service with a sidecar_service stanza it contains. + # any task group for each service with a sidecar_service block it contains. # A group network will be required and a dynamic port will be registered for # remote services to connect to Envoy with the name `connect-proxy-`. # By default, Envoy will be run via its official upstream Docker image. # # There are two ways to modify the default behavior: - # * Tasks can define a `sidecar_task` stanza in the `connect` stanza + # * Tasks can define a `sidecar_task` block in the `connect` block # that merges into the default sidecar configuration. # * Add the `kind = "connect-proxy:"` field to another task. # That task will be replace the default Envoy proxy task entirely. @@ -406,7 +406,7 @@ job "countdash" { # } # } } - # This job has a second "group" stanza to define tasks that might be placed + # This job has a second "group" block to define tasks that might be placed # on a separate Nomad client from the group above. # group "dashboard" { @@ -429,7 +429,7 @@ job "countdash" { connect { sidecar_service { proxy { - # The upstreams stanza defines the remote service to access + # The upstreams block defines the remote service to access # (count-api) and what port to expose that service on inside # the network namespace. This allows this task to reach the # upstream at localhost:8080. @@ -440,7 +440,7 @@ job "countdash" { } } - # The `sidecar_task` stanza modifies the default configuration + # The `sidecar_task` block modifies the default configuration # of the Envoy proxy task. # sidecar_task { # resources { diff --git a/command/assets/example.nomad b/command/assets/example.nomad index 2d72fa6c857..82b1bc1000d 100644 --- a/command/assets/example.nomad +++ b/command/assets/example.nomad @@ -1,12 +1,12 @@ # There can only be a single job definition per file. This job is named # "example" so it will create a job with the ID and Name "example". -# The "job" stanza is the top-most configuration option in the job +# The "job" block is the top-most configuration option in the job # specification. A job is a declarative specification of tasks that Nomad # should run. Jobs have a globally unique name, one or many task groups, which # are themselves collections of one or many tasks. # -# For more information and examples on the "job" stanza, please see +# For more information and examples on the "job" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/job @@ -31,11 +31,11 @@ job "example" { # type = "service" - # The "constraint" stanza defines additional constraints for placing this job, - # in addition to any resource or driver constraints. This stanza may be placed + # The "constraint" block defines additional constraints for placing this job, + # in addition to any resource or driver constraints. This block may be placed # at the "job", "group", or "task" level, and supports variable interpolation. # - # For more information and examples on the "constraint" stanza, please see + # For more information and examples on the "constraint" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/constraint @@ -45,14 +45,14 @@ job "example" { # value = "linux" # } - # The "update" stanza specifies the update strategy of task groups. The update + # The "update" block specifies the update strategy of task groups. The update # strategy is used to control things like rolling upgrades, canaries, and # blue/green deployments. If omitted, no update strategy is enforced. The - # "update" stanza may be placed at the job or task group. When placed at the + # "update" block may be placed at the job or task group. When placed at the # job, it applies to all groups within the job. When placed at both the job and - # group level, the stanzas are merged with the group's taking precedence. + # group level, the blocks are merged with the group's taking precedence. # - # For more information and examples on the "update" stanza, please see + # For more information and examples on the "update" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/update @@ -99,10 +99,10 @@ job "example" { # version is deployed and upon promotion the old version is stopped. canary = 0 } - # The migrate stanza specifies the group's strategy for migrating off of + # The migrate block specifies the group's strategy for migrating off of # draining nodes. If omitted, a default migration strategy is applied. # - # For more information on the "migrate" stanza, please see + # For more information on the "migrate" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/migrate @@ -127,11 +127,11 @@ job "example" { # is specified using a label suffix like "2m" or "1h". healthy_deadline = "5m" } - # The "group" stanza defines a series of tasks that should be co-located on + # The "group" block defines a series of tasks that should be co-located on # the same Nomad client. Any task within a group will be placed on the same # client. # - # For more information and examples on the "group" stanza, please see + # For more information and examples on the "group" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/group @@ -142,10 +142,10 @@ job "example" { # to 1. count = 1 - # The "network" stanza specifies the network configuration for the allocation + # The "network" block specifies the network configuration for the allocation # including requesting port bindings. # - # For more information and examples on the "network" stanza, please see + # For more information and examples on the "network" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/network @@ -156,12 +156,12 @@ job "example" { } } - # The "service" stanza instructs Nomad to register this task as a service + # The "service" block instructs Nomad to register this task as a service # in the service discovery engine, which is currently Nomad or Consul. This # will make the service discoverable after Nomad has placed it on a host and # port. # - # For more information and examples on the "service" stanza, please see + # For more information and examples on the "service" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/service @@ -172,10 +172,10 @@ job "example" { port = "db" provider = "nomad" - # The "check" stanza instructs Nomad to create a Consul health check for + # The "check" block instructs Nomad to create a Consul health check for # this service. A sample check is provided here for your convenience; - # uncomment it to enable it. The "check" stanza is documented in the - # "service" stanza documentation. + # uncomment it to enable it. The "check" block is documented in the + # "service" block documentation. # check { # name = "alive" @@ -186,10 +186,10 @@ job "example" { } - # The "restart" stanza configures a group's behavior on task failure. If + # The "restart" block configures a group's behavior on task failure. If # left unspecified, a default restart policy is used based on the job type. # - # For more information and examples on the "restart" stanza, please see + # For more information and examples on the "restart" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/restart @@ -210,12 +210,12 @@ job "example" { mode = "fail" } - # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk - # instead of a hard disk requirement. Clients using this stanza should - # not specify disk requirements in the resources stanza of the task. All + # The "ephemeral_disk" block instructs Nomad to utilize an ephemeral disk + # instead of a hard disk requirement. Clients using this block should + # not specify disk requirements in the resources block of the task. All # tasks in this group will share the same ephemeral disk. # - # For more information and examples on the "ephemeral_disk" stanza, please + # For more information and examples on the "ephemeral_disk" block, please # see the online documentation at: # # https://www.nomadproject.io/docs/job-specification/ephemeral_disk @@ -236,10 +236,10 @@ job "example" { size = 300 } - # The "affinity" stanza enables operators to express placement preferences + # The "affinity" block enables operators to express placement preferences # based on node attributes or metadata. # - # For more information and examples on the "affinity" stanza, please + # For more information and examples on the "affinity" block, please # see the online documentation at: # # https://www.nomadproject.io/docs/job-specification/affinity @@ -260,11 +260,11 @@ job "example" { # } - # The "spread" stanza allows operators to increase the failure tolerance of + # The "spread" block allows operators to increase the failure tolerance of # their applications by specifying a node attribute that allocations # should be spread over. # - # For more information and examples on the "spread" stanza, please + # For more information and examples on the "spread" block, please # see the online documentation at: # # https://www.nomadproject.io/docs/job-specification/spread @@ -285,10 +285,10 @@ job "example" { # } # } - # The "task" stanza creates an individual unit of work, such as a Docker + # The "task" block creates an individual unit of work, such as a Docker # container, web application, or batch processing. # - # For more information and examples on the "task" stanza, please see + # For more information and examples on the "task" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/task @@ -298,7 +298,7 @@ job "example" { # run the task. driver = "docker" - # The "config" stanza specifies the driver configuration, which is passed + # The "config" block specifies the driver configuration, which is passed # directly to the driver to start the task. The details of configurations # are specific to each driver, so please see specific driver # documentation for more information. @@ -312,13 +312,13 @@ job "example" { auth_soft_fail = true } - # The "artifact" stanza instructs Nomad to download an artifact from a + # The "artifact" block instructs Nomad to download an artifact from a # remote source prior to starting the task. This provides a convenient # mechanism for downloading configuration files or data needed to run the - # task. It is possible to specify the "artifact" stanza multiple times to + # task. It is possible to specify the "artifact" block multiple times to # download multiple artifacts. # - # For more information and examples on the "artifact" stanza, please see + # For more information and examples on the "artifact" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/artifact @@ -331,12 +331,12 @@ job "example" { # } - # The "logs" stanza instructs the Nomad client on how many log files and + # The "logs" block instructs the Nomad client on how many log files and # the maximum size of those logs files to retain. Logging is enabled by - # default, but the "logs" stanza allows for finer-grained control over + # default, but the "logs" block allows for finer-grained control over # the log rotation and storage configuration. # - # For more information and examples on the "logs" stanza, please see + # For more information and examples on the "logs" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/logs @@ -346,12 +346,12 @@ job "example" { # max_file_size = 15 # } - # The "resources" stanza describes the requirements a task needs to + # The "resources" block describes the requirements a task needs to # execute. Resource requirements include memory, cpu, and more. # This ensures the task will execute on a machine that contains enough # resource capacity. # - # For more information and examples on the "resources" stanza, please see + # For more information and examples on the "resources" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/resources @@ -362,11 +362,11 @@ job "example" { } - # The "template" stanza instructs Nomad to manage a template, such as + # The "template" block instructs Nomad to manage a template, such as # a configuration file or script. This template can optionally pull data # from Consul or Vault to populate runtime configuration data. # - # For more information and examples on the "template" stanza, please see + # For more information and examples on the "template" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/template @@ -378,7 +378,7 @@ job "example" { # change_signal = "SIGHUP" # } - # The "template" stanza can also be used to create environment variables + # The "template" block can also be used to create environment variables # for tasks that prefer those to config files. The task will be restarted # when data pulled from Consul or Vault changes. # @@ -388,14 +388,14 @@ job "example" { # env = true # } - # The "vault" stanza instructs the Nomad client to acquire a token from + # The "vault" block instructs the Nomad client to acquire a token from # a HashiCorp Vault server. The Nomad servers must be configured and # authorized to communicate with Vault. By default, Nomad will inject # The token into the job via an environment variable and make the token - # available to the "template" stanza. The Nomad client handles the renewal + # available to the "template" block. The Nomad client handles the renewal # and revocation of the Vault token. # - # For more information and examples on the "vault" stanza, please see + # For more information and examples on the "vault" block, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/vault diff --git a/command/deployment.go b/command/deployment.go index 56e7ea5c05b..6d20de9f566 100644 --- a/command/deployment.go +++ b/command/deployment.go @@ -29,7 +29,7 @@ Usage: nomad deployment [options] [args] $ nomad deployment promote Mark a deployment as failed. This will stop new allocations from being placed - and if the job's upgrade stanza specifies auto_revert, causes the job to + and if the job's upgrade block specifies auto_revert, causes the job to revert back to the last stable version of the job: $ nomad deployment fail diff --git a/command/job_plan.go b/command/job_plan.go index 7f71295cca1..204899c5267 100644 --- a/command/job_plan.go +++ b/command/job_plan.go @@ -100,13 +100,13 @@ Plan Options: -vault-token Used to validate if the user submitting the job has permission to run the job according to its Vault policies. A Vault token must be supplied if the vault - stanza allow_unauthenticated is disabled in the Nomad server configuration. + block allow_unauthenticated is disabled in the Nomad server configuration. If the -vault-token flag is set, the passed Vault token is added to the jobspec before sending to the Nomad servers. This allows passing the Vault token without storing it in the job file. This overrides the token found in the $VAULT_TOKEN environment variable and the vault_token field in the job file. This token is cleared from the job after validating and cannot be used within - the job executing environment. Use the vault stanza when templating in a job + the job executing environment. Use the vault block when templating in a job with a Vault token. -vault-namespace diff --git a/command/job_run.go b/command/job_run.go index 3f24b1178e9..7e7090f1646 100644 --- a/command/job_run.go +++ b/command/job_run.go @@ -121,13 +121,13 @@ Run Options: -vault-token Used to validate if the user submitting the job has permission to run the job according to its Vault policies. A Vault token must be supplied if the vault - stanza allow_unauthenticated is disabled in the Nomad server configuration. + block allow_unauthenticated is disabled in the Nomad server configuration. If the -vault-token flag is set, the passed Vault token is added to the jobspec before sending to the Nomad servers. This allows passing the Vault token without storing it in the job file. This overrides the token found in the $VAULT_TOKEN environment variable and the vault_token field in the job file. This token is cleared from the job after validating and cannot be used within - the job executing environment. Use the vault stanza when templating in a job + the job executing environment. Use the vault block when templating in a job with a Vault token. -vault-namespace diff --git a/command/job_validate.go b/command/job_validate.go index afc0ef44684..85031a1b034 100644 --- a/command/job_validate.go +++ b/command/job_validate.go @@ -58,13 +58,13 @@ Validate Options: -vault-token Used to validate if the user submitting the job has permission to run the job according to its Vault policies. A Vault token must be supplied if the vault - stanza allow_unauthenticated is disabled in the Nomad server configuration. + block allow_unauthenticated is disabled in the Nomad server configuration. If the -vault-token flag is set, the passed Vault token is added to the jobspec before sending to the Nomad servers. This allows passing the Vault token without storing it in the job file. This overrides the token found in the $VAULT_TOKEN environment variable and the vault_token field in the job file. This token is cleared from the job after validating and cannot be used within - the job executing environment. Use the vault stanza when templating in a job + the job executing environment. Use the vault block when templating in a job with a Vault token. -vault-namespace diff --git a/command/var.go b/command/var.go index 123ed391a9a..d858f4b9cdb 100644 --- a/command/var.go +++ b/command/var.go @@ -31,7 +31,7 @@ Usage: nomad var [options] [args] This command groups subcommands for interacting with variables. Variables allow operators to provide credentials and otherwise sensitive material to - Nomad jobs at runtime via the template stanza or directly through + Nomad jobs at runtime via the template block or directly through the Nomad API and CLI. Users can create new variables; list, inspect, and delete existing diff --git a/demo/csi/ceph-csi-plugin/README.md b/demo/csi/ceph-csi-plugin/README.md index f00b30ed653..b2d08d246d0 100644 --- a/demo/csi/ceph-csi-plugin/README.md +++ b/demo/csi/ceph-csi-plugin/README.md @@ -21,7 +21,7 @@ Refer to the official plugin * `--endpoint=${CSI_ENDPOINT}`: if you don't use the `CSI_ENDPOINT` environment variable, this option must match the `mount_dir` - specified in the `csi_plugin` stanza for the task. + specified in the `csi_plugin` block for the task. * `--nodeid=${node.unique.id}`: a unique ID for the node the task is running on. diff --git a/demo/csi/cinder-csi-plugin/README.md b/demo/csi/cinder-csi-plugin/README.md index 43b1652d086..f8e8e5b8cd6 100644 --- a/demo/csi/cinder-csi-plugin/README.md +++ b/demo/csi/cinder-csi-plugin/README.md @@ -4,7 +4,7 @@ The containers that run the Node/Controller applications require a cloud-config file be mounted in the containers and the path specified in the containers `args`. -The example plugin job creates a file at `local/cloud.conf` using a [`template`](https://www.nomadproject.io/docs/job-specification/template) stanza which pulls the necessary credentials from a [Vault kv-v2](https://www.vaultproject.io/docs/secrets/kv/kv-v2) secrets store. However, other methods, such as using the [`artifact`](https://www.nomadproject.io/docs/job-specification/artifact) stanza, will work as well for delivering the `cloud.conf` file to the CSI drivers. +The example plugin job creates a file at `local/cloud.conf` using a [`template`](https://www.nomadproject.io/docs/job-specification/template) block which pulls the necessary credentials from a [Vault kv-v2](https://www.vaultproject.io/docs/secrets/kv/kv-v2) secrets store. However, other methods, such as using the [`artifact`](https://www.nomadproject.io/docs/job-specification/artifact) block, will work as well for delivering the `cloud.conf` file to the CSI drivers. ### Example cloud.conf @@ -26,7 +26,7 @@ The Cinder CSI Node task requires that [`privileged = true`](https://www.nomadpr * `--endpoint=${CSI_ENDPOINT}`: If you don't use the `CSI_ENDPOINT` environment variable, this option must match the `mount_dir` - specified in the `csi_plugin` stanza for the task. + specified in the `csi_plugin` block for the task. * `--cloud-config=/etc/config/cloud.conf`: The location that the cloud.conf file was mounted inside the container diff --git a/demo/csi/kadalu-csi/controller.nomad b/demo/csi/kadalu-csi/controller.nomad index 1f7742e86cf..5b3dfe90bd2 100644 --- a/demo/csi/kadalu-csi/controller.nomad +++ b/demo/csi/kadalu-csi/controller.nomad @@ -134,7 +134,7 @@ job "kadalu-csi-controller" { } mount { - # If you are not using gluster native quota comment out this stanza + # If you are not using gluster native quota comment out this block type = "bind" source = "./${NOMAD_SECRETS_DIR}/ssh-privatekey" target = "/etc/secret-volume/ssh-privatekey" diff --git a/drivers/docker/driver.go b/drivers/docker/driver.go index 84b295d511f..a041034d735 100644 --- a/drivers/docker/driver.go +++ b/drivers/docker/driver.go @@ -1128,7 +1128,7 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T if mapping, ok := task.Resources.Ports.Get(port); ok { ports.add(mapping.Label, mapping.HostIP, mapping.Value, mapping.To) } else { - return c, fmt.Errorf("Port %q not found, check network stanza", port) + return c, fmt.Errorf("Port %q not found, check network block", port) } } case len(task.Resources.NomadResources.Networks) > 0: diff --git a/e2e/consul/on_update.go b/e2e/consul/on_update.go index b8caa6f4f6c..39cd34463b2 100644 --- a/e2e/consul/on_update.go +++ b/e2e/consul/on_update.go @@ -69,7 +69,7 @@ func (tc *OnUpdateChecksTest) TestOnUpdateCheck_IgnoreWarning_IgnoreErrors(f *fr } // TestOnUpdate_CheckRestart ensures that a service check set to ignore -// warnings still follows the check_restart stanza if the task becomes +// warnings still follows the check_restart block if the task becomes // unhealthy after a deployment is successful. on_update_check_restart has a // script check that should report as a warning status for the deployment to // become healthy. The script check then reports unhealthy and the diff --git a/e2e/consultemplate/consultemplate.go b/e2e/consultemplate/consultemplate.go index e92f1ca24ac..f2079162e73 100644 --- a/e2e/consultemplate/consultemplate.go +++ b/e2e/consultemplate/consultemplate.go @@ -107,7 +107,7 @@ job: {{ env "NOMAD_JOB_NAME" }} _, err := tc.Consul().KV().Delete(key, nil) f.NoError(err) - // Parse job so we can replace the template stanza with isolated keys + // Parse job so we can replace the template block with isolated keys job, err := jobspec.ParseFile("consultemplate/input/templating.nomad") f.NoError(err) job.ID = &jobID diff --git a/jobspec/parse.go b/jobspec/parse.go index aad9d9fbe1b..b6943d78d3b 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -56,7 +56,7 @@ func Parse(r io.Reader) (*api.Job, error) { // Parse the job out matches := list.Filter("job") if len(matches.Items) == 0 { - return nil, fmt.Errorf("'job' stanza not found") + return nil, fmt.Errorf("'job' block not found") } if err := parseJob(&job, matches); err != nil { return nil, fmt.Errorf("error parsing 'job': %s", err) diff --git a/jobspec/parse_job.go b/jobspec/parse_job.go index b59b3b59456..eb98bc2dc28 100644 --- a/jobspec/parse_job.go +++ b/jobspec/parse_job.go @@ -128,7 +128,7 @@ func parseJob(result *api.Job, list *ast.ObjectList) error { } } - // If we have a reschedule stanza, then parse that + // If we have a reschedule block, then parse that if o := listVal.Filter("reschedule"); len(o.Items) > 0 { if err := parseReschedulePolicy(&result.Reschedule, o); err != nil { return multierror.Prefix(err, "reschedule ->") diff --git a/jobspec/parse_network.go b/jobspec/parse_network.go index a97bea38fae..2d27b63ac5c 100644 --- a/jobspec/parse_network.go +++ b/jobspec/parse_network.go @@ -53,7 +53,7 @@ func ParseNetwork(o *ast.ObjectList) (*api.NetworkResource, error) { // Filter dns if dns := networkObj.Filter("dns"); len(dns.Items) > 0 { if len(dns.Items) > 1 { - return nil, multierror.Prefix(fmt.Errorf("cannot have more than 1 dns stanza"), "network ->") + return nil, multierror.Prefix(fmt.Errorf("cannot have more than 1 dns block"), "network ->") } d, err := parseDNS(dns.Items[0]) diff --git a/jobspec/parse_service.go b/jobspec/parse_service.go index 7dc10214b38..5d6858bb2f7 100644 --- a/jobspec/parse_service.go +++ b/jobspec/parse_service.go @@ -105,7 +105,7 @@ func parseService(o *ast.ObjectItem) (*api.Service, error) { // Filter connect if co := listVal.Filter("connect"); len(co.Items) > 0 { if len(co.Items) > 1 { - return nil, fmt.Errorf("connect '%s': cannot have more than 1 connect stanza", service.Name) + return nil, fmt.Errorf("connect '%s': cannot have more than 1 connect block", service.Name) } c, err := parseConnect(co.Items[0]) if err != nil { @@ -290,7 +290,7 @@ func parseGateway(o *ast.ObjectItem) (*api.ConsulGateway, error) { // extract and parse the ingress block if io := listVal.Filter("ingress"); len(io.Items) > 0 { if len(io.Items) > 1 { - return nil, fmt.Errorf("ingress, %s", "multiple ingress stanzas not allowed") + return nil, fmt.Errorf("ingress, %s", "multiple ingress blocks not allowed") } ingress, err := parseIngressConfigEntry(io.Items[0]) @@ -302,7 +302,7 @@ func parseGateway(o *ast.ObjectItem) (*api.ConsulGateway, error) { if to := listVal.Filter("terminating"); len(to.Items) > 0 { if len(to.Items) > 1 { - return nil, fmt.Errorf("terminating, %s", "multiple terminating stanzas not allowed") + return nil, fmt.Errorf("terminating, %s", "multiple terminating blocks not allowed") } terminating, err := parseTerminatingConfigEntry(to.Items[0]) @@ -314,7 +314,7 @@ func parseGateway(o *ast.ObjectItem) (*api.ConsulGateway, error) { if mo := listVal.Filter("mesh"); len(mo.Items) > 0 { if len(mo.Items) > 1 { - return nil, fmt.Errorf("mesh, %s", "multiple mesh stanzas not allowed") + return nil, fmt.Errorf("mesh, %s", "multiple mesh blocks not allowed") } // mesh should have no keys @@ -1033,7 +1033,7 @@ func parseChecks(service *api.Service, checkObjs *ast.ObjectList) error { return err } - // HCL allows repeating stanzas so merge 'header' into a single + // HCL allows repeating blocks so merge 'header' into a single // map[string][]string. if headerI, ok := cm["header"]; ok { headerRaw, ok := headerI.([]map[string]interface{}) diff --git a/jobspec/parse_task.go b/jobspec/parse_task.go index 4465b8f24a4..018f27149bf 100644 --- a/jobspec/parse_task.go +++ b/jobspec/parse_task.go @@ -154,7 +154,7 @@ func parseTask(item *ast.ObjectItem, keys []string) (*api.Task, error) { if o := listVal.Filter("csi_plugin"); len(o.Items) > 0 { if len(o.Items) != 1 { - return nil, fmt.Errorf("csi_plugin -> Expected single stanza, got %d", len(o.Items)) + return nil, fmt.Errorf("csi_plugin -> Expected single block, got %d", len(o.Items)) } i := o.Elem().Items[0] @@ -497,7 +497,7 @@ func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error { if o := listVal.Filter("change_script"); len(o.Items) > 0 { if len(o.Items) != 1 { return fmt.Errorf( - "change_script -> expected single stanza, got %d", len(o.Items), + "change_script -> expected single block, got %d", len(o.Items), ) } var m map[string]interface{} diff --git a/nomad/deploymentwatcher/deployment_watcher.go b/nomad/deploymentwatcher/deployment_watcher.go index 36a8f4d7c0f..cbd5c09a52b 100644 --- a/nomad/deploymentwatcher/deployment_watcher.go +++ b/nomad/deploymentwatcher/deployment_watcher.go @@ -612,7 +612,7 @@ func (w *deploymentWatcher) handleAllocUpdate(allocs []*structs.AllocListStub) ( continue } - // Determine if the update stanza for this group is progress based + // Determine if the update block for this group is progress based progressBased := dstate.ProgressDeadline != 0 // Check if the allocation has failed and we need to mark it for allow diff --git a/nomad/fsm.go b/nomad/fsm.go index 838f1c812e8..9a9520f710c 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -540,7 +540,7 @@ func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index * un-intended destructive updates in scheduler since we use * reflect.DeepEqual. Starting Nomad 0.4.1, job submission sanitizes * the incoming job. - * - Migrate from old style upgrade stanza that used only a stagger. + * - Migrate from old style upgrade block that used only a stagger. */ req.Job.Canonicalize() @@ -1523,7 +1523,7 @@ func (n *nomadFSM) restoreImpl(old io.ReadCloser, filter *FSMFilter) error { * - Empty maps and slices should be treated as nil to avoid * un-intended destructive updates in scheduler since we use * reflect.DeepEqual. Job submission sanitizes the incoming job. - * - Migrate from old style upgrade stanza that used only a stagger. + * - Migrate from old style upgrade block that used only a stagger. */ job.Canonicalize() if err := restore.JobRestore(job); err != nil { diff --git a/nomad/job_endpoint_hook_connect.go b/nomad/job_endpoint_hook_connect.go index 728729e80b1..e32239fd205 100644 --- a/nomad/job_endpoint_hook_connect.go +++ b/nomad/job_endpoint_hook_connect.go @@ -332,7 +332,7 @@ func groupConnectHook(job *structs.Job, g *structs.TaskGroup) error { task := newConnectGatewayTask(prefix, service.Name, netHost, customizedTLS) g.Tasks = append(g.Tasks, task) - // the connect.sidecar_task stanza can also be used to configure + // the connect.sidecar_task block can also be used to configure // a custom task to use as a gateway proxy if service.Connect.SidecarTask != nil { service.Connect.SidecarTask.MergeIntoTask(task) diff --git a/nomad/job_endpoint_hooks.go b/nomad/job_endpoint_hooks.go index 1fe5dbe9f1f..8c6c5c1bf66 100644 --- a/nomad/job_endpoint_hooks.go +++ b/nomad/job_endpoint_hooks.go @@ -138,7 +138,7 @@ func (jobCanonicalizer) Mutate(job *structs.Job) (*structs.Job, []error, error) } // jobImpliedConstraints adds constraints to a job implied by other job fields -// and stanzas. +// and blocks. type jobImpliedConstraints struct{} func (jobImpliedConstraints) Name() string { diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 655baf9c6a1..9562167ad28 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -829,7 +829,7 @@ func TestJobEndpoint_Register_ConnectWithSidecarTask(t *testing.T) { require.Equal("connect-proxy:backend", string(sidecarTask.Kind)) require.Equal("connect-proxy-backend", out.TaskGroups[0].Networks[0].DynamicPorts[0].Label) - // Check that the correct fields were overridden from the sidecar_task stanza + // Check that the correct fields were overridden from the sidecar_task block require.Equal("test", sidecarTask.Meta["source"]) require.Equal(500, sidecarTask.Resources.CPU) require.Equal(connectSidecarResources().MemoryMB, sidecarTask.Resources.MemoryMB) @@ -6316,7 +6316,7 @@ func TestJobEndpoint_Plan_NoDiff(t *testing.T) { } // TestJobEndpoint_Plan_Scaling asserts that the plan endpoint handles -// jobs with scaling stanza +// jobs with scaling block func TestJobEndpoint_Plan_Scaling(t *testing.T) { ci.Parallel(t) diff --git a/nomad/structs/config/artifact.go b/nomad/structs/config/artifact.go index f21e3fee83d..3e1beef61cd 100644 --- a/nomad/structs/config/artifact.go +++ b/nomad/structs/config/artifact.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/nomad/helper/pointer" ) -// ArtifactConfig is the configuration specific to the Artifact stanza +// ArtifactConfig is the configuration specific to the Artifact block type ArtifactConfig struct { // HTTPReadTimeout is the duration in which a download must complete or // it will be canceled. Defaults to 30m. diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 2f2d3423b6d..5e4c8996de4 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -20,11 +20,11 @@ const CSISocketName = "csi.sock" // where Nomad will expect plugins to create intermediary mounts for volumes. const CSIIntermediaryDirname = "volumes" -// VolumeTypeCSI is the type in the volume stanza of a TaskGroup +// VolumeTypeCSI is the type in the volume block of a TaskGroup const VolumeTypeCSI = "csi" // CSIPluginType is an enum string that encapsulates the valid options for a -// CSIPlugin stanza's Type. These modes will allow the plugin to be used in +// CSIPlugin block's Type. These modes will allow the plugin to be used in // different ways by the client. type CSIPluginType string diff --git a/nomad/structs/network.go b/nomad/structs/network.go index 81ae54cc7a1..544ca2c41c9 100644 --- a/nomad/structs/network.go +++ b/nomad/structs/network.go @@ -497,9 +497,9 @@ func incIP(ip net.IP) { } // AssignPorts based on an ask from the scheduler processing a group.network -// stanza. Supports multi-interfaces through node configured host_networks. +// block. Supports multi-interfaces through node configured host_networks. // -// AssignTaskNetwork supports the deprecated task.resources.network stanza. +// AssignTaskNetwork supports the deprecated task.resources.network block. func (idx *NetworkIndex) AssignPorts(ask *NetworkResource) (AllocatedPorts, error) { var offer AllocatedPorts diff --git a/nomad/structs/operator.go b/nomad/structs/operator.go index a6cfced9c71..667890eeaa5 100644 --- a/nomad/structs/operator.go +++ b/nomad/structs/operator.go @@ -135,7 +135,7 @@ func (a *AutopilotConfig) Copy() *AutopilotConfig { } // SchedulerAlgorithm is an enum string that encapsulates the valid options for a -// SchedulerConfiguration stanza's SchedulerAlgorithm. These modes will allow the +// SchedulerConfiguration block's SchedulerAlgorithm. These modes will allow the // scheduler to be user-selectable. type SchedulerAlgorithm string diff --git a/nomad/structs/services.go b/nomad/structs/services.go index fa3f99f8d38..a2b8a4493f1 100644 --- a/nomad/structs/services.go +++ b/nomad/structs/services.go @@ -84,7 +84,7 @@ func (sc *ServiceCheck) IsReadiness() bool { return sc != nil && sc.OnUpdate == OnUpdateIgnore } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (sc *ServiceCheck) Copy() *ServiceCheck { if sc == nil { return nil @@ -595,7 +595,7 @@ type Service struct { Provider string } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (s *Service) Copy() *Service { if s == nil { return nil @@ -953,7 +953,7 @@ func (s *Service) Equal(o *Service) bool { return true } -// ConsulConnect represents a Consul Connect jobspec stanza. +// ConsulConnect represents a Consul Connect jobspec block. type ConsulConnect struct { // Native indicates whether the service is Consul Connect Native enabled. Native bool @@ -968,7 +968,7 @@ type ConsulConnect struct { Gateway *ConsulGateway } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (c *ConsulConnect) Copy() *ConsulConnect { if c == nil { return nil @@ -1085,7 +1085,7 @@ func (c *ConsulConnect) Validate() error { } // ConsulSidecarService represents a Consul Connect SidecarService jobspec -// stanza. +// block. type ConsulSidecarService struct { // Tags are optional service tags that get registered with the sidecar service // in Consul. If unset, the sidecar service inherits the parent service tags. @@ -1095,7 +1095,7 @@ type ConsulSidecarService struct { // a port label or a literal port number. Port string - // Proxy stanza defining the sidecar proxy configuration. + // Proxy block defining the sidecar proxy configuration. Proxy *ConsulProxy // DisableDefaultTCPCheck, if true, instructs Nomad to avoid setting a @@ -1108,7 +1108,7 @@ func (s *ConsulSidecarService) HasUpstreams() bool { return s != nil && s.Proxy != nil && len(s.Proxy.Upstreams) > 0 } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (s *ConsulSidecarService) Copy() *ConsulSidecarService { if s == nil { return nil @@ -1143,7 +1143,7 @@ func (s *ConsulSidecarService) Equal(o *ConsulSidecarService) bool { } // SidecarTask represents a subset of Task fields that are able to be overridden -// from the sidecar_task stanza +// from the sidecar_task block type SidecarTask struct { // Name of the task Name string @@ -1337,7 +1337,7 @@ func (t *SidecarTask) MergeIntoTask(task *Task) { } } -// ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza. +// ConsulProxy represents a Consul Connect sidecar proxy jobspec block. type ConsulProxy struct { // LocalServiceAddress is the address the local service binds to. @@ -1354,7 +1354,7 @@ type ConsulProxy struct { // connect to. Upstreams []ConsulUpstream - // Expose configures the consul proxy.expose stanza to "open up" endpoints + // Expose configures the consul proxy.expose block to "open up" endpoints // used by task-group level service checks using HTTP or gRPC protocols. // // Use json tag to match with field name in api/ @@ -1365,7 +1365,7 @@ type ConsulProxy struct { Config map[string]interface{} } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (p *ConsulProxy) Copy() *ConsulProxy { if p == nil { return nil @@ -1460,7 +1460,7 @@ func (c *ConsulMeshGateway) Validate() error { } } -// ConsulUpstream represents a Consul Connect upstream jobspec stanza. +// ConsulUpstream represents a Consul Connect upstream jobspec block. type ConsulUpstream struct { // DestinationName is the name of the upstream service. DestinationName string @@ -1524,7 +1524,7 @@ func upstreamsEquals(a, b []ConsulUpstream) bool { return setA.Equal(setB) } -// ConsulExposeConfig represents a Consul Connect expose jobspec stanza. +// ConsulExposeConfig represents a Consul Connect expose jobspec block. type ConsulExposeConfig struct { // Use json tag to match with field name in api/ Paths []ConsulExposePath `json:"Path"` @@ -1541,7 +1541,7 @@ func exposePathsEqual(a, b []ConsulExposePath) bool { return helper.SliceSetEq(a, b) } -// Copy the stanza. Returns nil if e is nil. +// Copy the block. Returns nil if e is nil. func (e *ConsulExposeConfig) Copy() *ConsulExposeConfig { if e == nil { return nil diff --git a/nomad/structs/services_test.go b/nomad/structs/services_test.go index c9b2093d18d..bad1dd57af0 100644 --- a/nomad/structs/services_test.go +++ b/nomad/structs/services_test.go @@ -490,7 +490,7 @@ func TestConsulConnect_Validate(t *testing.T) { c := &ConsulConnect{} - // An empty Connect stanza is invalid + // An empty Connect block is invalid require.Error(t, c.Validate()) c.Native = true @@ -1004,7 +1004,7 @@ func TestConsulGateway_Equal_ingress(t *testing.T) { require.True(t, modifiable.Equal(modifiable)) } - // proxy stanza equality checks + // proxy block equality checks t.Run("mod gateway timeout", func(t *testing.T) { try(t, func(g *cg) { g.Proxy.ConnectTimeout = pointer.Of(9 * time.Second) }) @@ -1090,7 +1090,7 @@ func TestConsulGateway_Equal_terminating(t *testing.T) { require.True(t, modifiable.Equal(modifiable)) } - // proxy stanza equality checks + // proxy block equality checks t.Run("mod dns discovery type", func(t *testing.T) { try(t, func(g *cg) { g.Proxy.EnvoyDNSDiscoveryType = "LOGICAL_DNS" }) diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index c9056716ea7..d6ae0f31871 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -2690,7 +2690,7 @@ func (p AllocatedPorts) Get(label string) (AllocatedPortMapping, bool) { } type Port struct { - // Label is the key for HCL port stanzas: port "foo" {} + // Label is the key for HCL port blocks: port "foo" {} Label string // Value is the static or dynamic port value. For dynamic ports this @@ -3002,7 +3002,7 @@ type NodeResources struct { // Networks is the node's bridge network and default interface. It is // only used when scheduling jobs with a deprecated - // task.resources.network stanza. + // task.resources.network block. Networks Networks // MinDynamicPort and MaxDynamicPort represent the inclusive port range @@ -4235,7 +4235,7 @@ type Job struct { TaskGroups []*TaskGroup // See agent.ApiJobToStructJob - // Update provides defaults for the TaskGroup Update stanzas + // Update provides defaults for the TaskGroup Update blocks Update UpdateStrategy Multiregion *Multiregion @@ -4294,7 +4294,7 @@ type Job struct { // of a deployment and can be manually set via APIs. This field is updated // when the status of a corresponding deployment transitions to Failed // or Successful. This field is not meaningful for jobs that don't have an - // update stanza. + // update block. Stable bool // Version is a monotonically increasing version number that is incremented @@ -4460,7 +4460,7 @@ func (j *Job) Validate() error { } if j.Type == JobTypeSystem { if j.Affinities != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block")) } } else { for idx, affinity := range j.Affinities { @@ -4473,7 +4473,7 @@ func (j *Job) Validate() error { if j.Type == JobTypeSystem { if j.Spreads != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread block")) } } else { for idx, spread := range j.Spreads { @@ -6492,7 +6492,7 @@ func (tg *TaskGroup) Validate(j *Job) error { } if j.Type == JobTypeSystem { if tg.Affinities != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block")) } } else { for idx, affinity := range tg.Affinities { @@ -6513,7 +6513,7 @@ func (tg *TaskGroup) Validate(j *Job) error { if j.Type == JobTypeSystem { if tg.Spreads != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread block")) } } else { for idx, spread := range tg.Spreads { @@ -7389,7 +7389,7 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices if jobType == JobTypeSystem { if t.Affinities != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block")) } } else { for idx, affinity := range t.Affinities { @@ -7459,9 +7459,9 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices // Validation for TaskKind field which is used for Consul Connect integration if t.Kind.IsConnectProxy() { - // This task is a Connect proxy so it should not have service stanzas + // This task is a Connect proxy so it should not have service blocks if len(t.Services) > 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service block")) } if t.Leader { mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have leader set")) @@ -7654,7 +7654,7 @@ func (t *Task) Warnings() error { // Validate the resources if t.Resources != nil && t.Resources.IOPS != 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource stanza.")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource block.")) } if t.Resources != nil && len(t.Resources.Networks) != 0 { @@ -7981,7 +7981,7 @@ func (t *Template) Warnings() error { // Deprecation notice for vault_grace if t.VaultGrace != 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza.")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template block.")) } return mErr.ErrorOrNil() @@ -9151,7 +9151,7 @@ func (s *Spread) Validate() error { mErr.Errors = append(mErr.Errors, errors.New("Missing spread attribute")) } if s.Weight <= 0 || s.Weight > 100 { - mErr.Errors = append(mErr.Errors, errors.New("Spread stanza must have a positive weight from 0 to 100")) + mErr.Errors = append(mErr.Errors, errors.New("Spread block must have a positive weight from 0 to 100")) } seen := make(map[string]struct{}) sumPercent := uint32(0) diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index d00cddca8b8..584b7c8a606 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -180,7 +180,7 @@ func TestJob_Warnings(t *testing.T) { Expected []string }{ { - Name: "Higher counts for update stanza", + Name: "Higher counts for update block", Expected: []string{"max parallel count is greater"}, Job: &Job{ Type: JobTypeService, @@ -237,7 +237,7 @@ func TestJob_Warnings(t *testing.T) { }, { Name: "Template.VaultGrace Deprecated", - Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."}, + Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template block."}, Job: &Job{ Type: JobTypeService, TaskGroups: []*TaskGroup{ @@ -577,7 +577,7 @@ func TestJob_SystemJob_Validate(t *testing.T) { }} err = j.Validate() require.NotNil(t, err) - require.Contains(t, err.Error(), "System jobs may not have an affinity stanza") + require.Contains(t, err.Error(), "System jobs may not have an affinity block") // Add spread at job and task group level, that should fail validation j.Spreads = []*Spread{{ @@ -591,7 +591,7 @@ func TestJob_SystemJob_Validate(t *testing.T) { err = j.Validate() require.NotNil(t, err) - require.Contains(t, err.Error(), "System jobs may not have a spread stanza") + require.Contains(t, err.Error(), "System jobs may not have a spread block") } @@ -2400,7 +2400,7 @@ func TestTask_Validate_ConnectProxyKind(t *testing.T) { Service: &Service{ Name: "redis", }, - ErrContains: "Connect proxy task must not have a service stanza", + ErrContains: "Connect proxy task must not have a service block", }, { Desc: "Leader should not be set", @@ -2425,7 +2425,7 @@ func TestTask_Validate_ConnectProxyKind(t *testing.T) { ErrContains: `No Connect services in task group with Connect proxy ("redis")`, }, { - Desc: "Connect stanza not configured in group", + Desc: "Connect block not configured in group", Kind: "connect-proxy:redis", TgService: []*Service{{ Name: "redis", @@ -6536,7 +6536,7 @@ func TestSpread_Validate(t *testing.T) { Attribute: "${node.datacenter}", Weight: -1, }, - err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), + err: fmt.Errorf("Spread block must have a positive weight from 0 to 100"), name: "Invalid weight", }, { @@ -6544,7 +6544,7 @@ func TestSpread_Validate(t *testing.T) { Attribute: "${node.datacenter}", Weight: 110, }, - err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), + err: fmt.Errorf("Spread block must have a positive weight from 0 to 100"), name: "Invalid weight", }, { diff --git a/plugins/device/cmd/example/README.md b/plugins/device/cmd/example/README.md index 04fbe833dc4..036aa515b3f 100644 --- a/plugins/device/cmd/example/README.md +++ b/plugins/device/cmd/example/README.md @@ -7,7 +7,7 @@ The example device plugin models files within a specified directory as devices. # Config -The configuration should be passed via an HCL file that begins with a top level `config` stanza: +The configuration should be passed via an HCL file that begins with a top level `config` block: ``` config { diff --git a/plugins/drivers/driver.go b/plugins/drivers/driver.go index 505591ea1af..616e4f7801c 100644 --- a/plugins/drivers/driver.go +++ b/plugins/drivers/driver.go @@ -405,7 +405,7 @@ type LinuxResources struct { // and thus the calculation for CPUQuota cannot be done on the client. // This is a capatability and should only be used by docker until the docker // specific options are deprecated in favor of exposes CPUPeriod and - // CPUQuota at the task resource stanza. + // CPUQuota at the task resource block. PercentTicks float64 } diff --git a/plugins/drivers/proto/driver.pb.go b/plugins/drivers/proto/driver.pb.go index d956f718c3b..180d5a2340d 100644 --- a/plugins/drivers/proto/driver.pb.go +++ b/plugins/drivers/proto/driver.pb.go @@ -308,7 +308,7 @@ func (m *TaskConfigSchemaRequest) XXX_DiscardUnknown() { var xxx_messageInfo_TaskConfigSchemaRequest proto.InternalMessageInfo type TaskConfigSchemaResponse struct { - // Spec is the configuration schema for the job driver config stanza + // Spec is the configuration schema for the job driver config block Spec *hclspec.Spec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` diff --git a/plugins/drivers/proto/driver.proto b/plugins/drivers/proto/driver.proto index 955c79be0c6..8511e61c279 100644 --- a/plugins/drivers/proto/driver.proto +++ b/plugins/drivers/proto/driver.proto @@ -91,7 +91,7 @@ message TaskConfigSchemaRequest {} message TaskConfigSchemaResponse { - // Spec is the configuration schema for the job driver config stanza + // Spec is the configuration schema for the job driver config block hashicorp.nomad.plugins.shared.hclspec.Spec spec = 1; } diff --git a/scheduler/generic_sched.go b/scheduler/generic_sched.go index 4f0f5b88900..8180d3080bf 100644 --- a/scheduler/generic_sched.go +++ b/scheduler/generic_sched.go @@ -492,7 +492,7 @@ func (s *GenericScheduler) downgradedJobForPlacement(p placementResult) (string, } } - // check if the non-promoted version is a job without update stanza. This version should be the latest "stable" version, + // check if the non-promoted version is a job without update block. This version should be the latest "stable" version, // as all subsequent versions must be canaried deployments. Otherwise, we would have found a deployment above, // or the alloc would have been replaced already by a newer non-deployment job. if job, err := s.state.JobByIDAndVersion(nil, ns, jobID, p.MinJobVersion()); err == nil && job != nil && job.Update.IsEmpty() { diff --git a/scheduler/preemption.go b/scheduler/preemption.go index d45e7a2f42d..ee524838d18 100644 --- a/scheduler/preemption.go +++ b/scheduler/preemption.go @@ -9,7 +9,7 @@ import ( // maxParallelPenalty is a score penalty applied to allocations to mitigate against // too many allocations of the same job being preempted. This penalty is applied after the -// number of allocations being preempted exceeds max_parallel value in the job's migrate stanza +// number of allocations being preempted exceeds max_parallel value in the job's migrate block const maxParallelPenalty = 50.0 type groupedAllocs struct { diff --git a/scheduler/propertyset.go b/scheduler/propertyset.go index e2325744e61..5d726acdf2c 100644 --- a/scheduler/propertyset.go +++ b/scheduler/propertyset.go @@ -98,13 +98,13 @@ func (p *propertySet) setConstraint(constraint *structs.Constraint, taskGroup st } // SetTargetAttribute is used to populate this property set without also storing allowed count -// This is used when evaluating spread stanzas +// This is used when evaluating spread blocks func (p *propertySet) SetTargetAttribute(targetAttribute string, taskGroup string) { p.setTargetAttributeWithCount(targetAttribute, 0, taskGroup) } // setTargetAttributeWithCount is a shared helper for setting a job or task group attribute and allowedCount -// allowedCount can be zero when this is used in evaluating spread stanzas +// allowedCount can be zero when this is used in evaluating spread blocks func (p *propertySet) setTargetAttributeWithCount(targetAttribute string, allowedCount uint64, taskGroup string) { // Store that this is for a task group if taskGroup != "" { diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index d30e65a5ca9..e3a9f01d36b 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -1421,8 +1421,8 @@ func TestReconciler_MultiTG(t *testing.T) { } // Tests the reconciler properly handles jobs with multiple task groups with -// only one having an update stanza and a deployment already being created -func TestReconciler_MultiTG_SingleUpdateStanza(t *testing.T) { +// only one having an update block and a deployment already being created +func TestReconciler_MultiTG_SingleUpdateBlock(t *testing.T) { ci.Parallel(t) job := mock.Job() @@ -1957,7 +1957,7 @@ func TestReconciler_RescheduleNow_Service(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - // Set up reschedule policy and update stanza + // Set up reschedule policy and update block job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ Attempts: 1, Interval: 24 * time.Hour, @@ -2040,7 +2040,7 @@ func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - // Set up reschedule policy and update stanza + // Set up reschedule policy and update block job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ Attempts: 1, Interval: 24 * time.Hour, @@ -2122,7 +2122,7 @@ func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - // Set up reschedule policy and update stanza + // Set up reschedule policy and update block job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ Attempts: 1, Interval: 24 * time.Hour, @@ -2206,7 +2206,7 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - // Set up reschedule policy and update stanza + // Set up reschedule policy and update block job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ Attempts: 1, Interval: 24 * time.Hour, @@ -2317,7 +2317,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - // Set up reschedule policy and update stanza + // Set up reschedule policy and update block job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ Delay: 5 * time.Second, DelayFunction: "constant", @@ -2445,7 +2445,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - // Set up reschedule policy and update stanza + // Set up reschedule policy and update block job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ Attempts: 1, Interval: 24 * time.Hour, @@ -4989,7 +4989,7 @@ func TestReconciler_ForceReschedule_Service(t *testing.T) { job.TaskGroups[0].Count = 5 tgName := job.TaskGroups[0].Name - // Set up reschedule policy and update stanza + // Set up reschedule policy and update block job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ Attempts: 1, Interval: 24 * time.Hour, @@ -5068,7 +5068,7 @@ func TestReconciler_RescheduleNot_Service(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - // Set up reschedule policy and update stanza + // Set up reschedule policy and update block job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ Attempts: 0, Interval: 24 * time.Hour, diff --git a/scheduler/spread.go b/scheduler/spread.go index 842251c28ba..5bc50b97041 100644 --- a/scheduler/spread.go +++ b/scheduler/spread.go @@ -27,7 +27,7 @@ type SpreadIterator struct { tgSpreadInfo map[string]spreadAttributeMap // sumSpreadWeights tracks the total weight across all spread - // stanzas + // blocks sumSpreadWeights int32 // hasSpread is used to early return when the job/task group @@ -248,7 +248,7 @@ func (iter *SpreadIterator) computeSpreadInfo(tg *structs.TaskGroup) { spreadInfos := make(spreadAttributeMap, len(tg.Spreads)) totalCount := tg.Count - // Always combine any spread stanzas defined at the job level here + // Always combine any spread blocks defined at the job level here combinedSpreads := make([]*structs.Spread, 0, len(tg.Spreads)+len(iter.jobSpreads)) combinedSpreads = append(combinedSpreads, tg.Spreads...) combinedSpreads = append(combinedSpreads, iter.jobSpreads...) diff --git a/scheduler/stack.go b/scheduler/stack.go index c48fc78f310..cbab8d469ac 100644 --- a/scheduler/stack.go +++ b/scheduler/stack.go @@ -420,10 +420,10 @@ func NewGenericStack(batch bool, ctx Context) *GenericStack { // node where the allocation failed previously s.nodeReschedulingPenalty = NewNodeReschedulingPenaltyIterator(ctx, s.jobAntiAff) - // Apply scores based on affinity stanza + // Apply scores based on affinity block s.nodeAffinity = NewNodeAffinityIterator(ctx, s.nodeReschedulingPenalty) - // Apply scores based on spread stanza + // Apply scores based on spread block s.spread = NewSpreadIterator(ctx, s.nodeAffinity) // Add the preemption options scoring iterator diff --git a/scheduler/util.go b/scheduler/util.go index 2db7a3927cf..4e7d2bd438a 100644 --- a/scheduler/util.go +++ b/scheduler/util.go @@ -615,7 +615,7 @@ func consulNamespaceUpdated(tgA, tgB *structs.TaskGroup) bool { return tgA.Consul.GetNamespace() != tgB.Consul.GetNamespace() } -// connectServiceUpdated returns true if any services with a connect stanza have +// connectServiceUpdated returns true if any services with a connect block have // been changed in such a way that requires a destructive update. // // Ordinary services can be updated in-place by updating the service definition diff --git a/website/content/api-docs/json-jobs.mdx b/website/content/api-docs/json-jobs.mdx index 863334a0b96..a8ea3c0ab29 100644 --- a/website/content/api-docs/json-jobs.mdx +++ b/website/content/api-docs/json-jobs.mdx @@ -261,7 +261,7 @@ The `Job` object supports the following keys: token and is not stored after job submission. - `VaultToken` - Specifies the Vault token that proves the submitter of the job - has access to the specified policies in the `vault` stanza. This field is + has access to the specified policies in the `vault` block. This field is only used to transfer the token and is not stored after job submission. - `Namespace` - The namespace to execute the job in, defaults to "default". @@ -303,7 +303,7 @@ The `Job` object supports the following keys: - `Update` - Specifies an update strategy to be applied to all task groups within the job. When specified both at the job level and the task group level, the update blocks are merged with the task group's taking precedence. For more - details on the update stanza, please see below. + details on the update block, please see below. - `Periodic` - `Periodic` allows the job to be scheduled at fixed times, dates or intervals. The periodic expression is always evaluated in the UTC @@ -403,7 +403,7 @@ attributes: - `Update` - Specifies an update strategy to be applied to all task groups within the job. When specified both at the job level and the task group level, the update blocks are merged with the task group's taking precedence. For more - details on the update stanza, please see below. + details on the update block, please see below. - `Tasks` - A list of `Task` object that are part of the task group. @@ -569,7 +569,7 @@ The `Task` object supports the following keys: - `PortLabel`: Specifies the label of the port on which the check will be performed. Note this is the _label_ of the port and not the port number unless `AddressMode: "driver"`. The port label must match one - defined in the Network stanza. If a port value was declared on the + defined in the Network block. If a port value was declared on the `Service`, this will inherit from that value if not supplied. If supplied, this value takes precedence over the `Service.PortLabel` value. This is useful for services which operate on multiple ports. @@ -759,9 +759,9 @@ The `RestartPolicy` object supports the following keys: ### Update Specifies the task group update strategy. When omitted, rolling updates are -disabled. The update stanza can be specified at the job or task group level. -When specified at the job, the update stanza is inherited by all task groups. -When specified in both the job and in a task group, the stanzas are merged with +disabled. The update block can be specified at the job or task group level. +When specified at the job, the update block is inherited by all task groups. +When specified in both the job and in a task group, the blocks are merged with the task group's taking precedence. The `Update` object supports the following attributes: @@ -1070,13 +1070,13 @@ README][ct]. executed on template change. Path is relative to the driver, e.g., if running with a container driver the path must be existing in the container. This option is required is the `change_mode` is `script`. - + - `Args` - List of arguments that are passed to the script that is to be - executed on template change. - + executed on template change. + - `Timeout` - Timeout for script execution specified using a label suffix like "30s" or "1h". Default value is `"5s"`. - + - `FailOnError` - If `true`, Nomad will kill the task if the script execution fails. If `false`, script failure will be logged but the task will continue uninterrupted. Default value is `false`. @@ -1099,18 +1099,18 @@ README][ct]. given as octal of the Unix file permissions `rwxrwxrwx`. - `Uid` - Specifies the rendered template owner's user ID. - + ~> **Caveat:** Works only on Unix-based systems. Be careful when using containerized drivers, such as `docker` or `podman`, as groups and users inside the container may have different IDs than on the host system. This - feature will also **not** work with Docker Desktop. + feature will also **not** work with Docker Desktop. - `Gid` - Specifies the rendered template owner's group ID. - + ~> **Caveat:** Works only on Unix-based systems. Be careful when using containerized drivers, such as `docker` or `podman`, as groups and users inside the container may have different IDs than on the host system. This - feature will also **not** work with Docker Desktop. + feature will also **not** work with Docker Desktop. - `RightDelim` - Specifies the right delimiter to use in the template. The default is "}}" for some templates, it may be easier to use a different delimiter that diff --git a/website/content/api-docs/operator/scheduler.mdx b/website/content/api-docs/operator/scheduler.mdx index 84c8e71b645..fdad37cda33 100644 --- a/website/content/api-docs/operator/scheduler.mdx +++ b/website/content/api-docs/operator/scheduler.mdx @@ -119,7 +119,7 @@ The table below shows this endpoint's support for ### Bootstrap Configuration Element -The [`default_scheduler_config`][] attribute of the server stanza will provide a +The [`default_scheduler_config`][] attribute of the server block will provide a starting value for this configuration. Once bootstrapped, the value in the server state is authoritative. diff --git a/website/content/api-docs/search.mdx b/website/content/api-docs/search.mdx index 8f1741a4eae..4c3762ffb07 100644 --- a/website/content/api-docs/search.mdx +++ b/website/content/api-docs/search.mdx @@ -127,7 +127,7 @@ The `/search/fuzzy` endpoint returns partial substring matches for a given searc term and context, where a context can be jobs, allocations, nodes, plugins, or namespaces. Additionally, fuzzy searching can be done across all contexts. For better control over the performance implications of fuzzy searching on Nomad servers, aspects of -fuzzy searching can be tuned through the [search] stanza in Nomad agent config. +fuzzy searching can be tuned through the [search] block in Nomad agent config. Fuzzy search results are ordered starting with closest matching terms. Items of a name that exactly matches the search term are listed first. diff --git a/website/content/docs/commands/job/plan.mdx b/website/content/docs/commands/job/plan.mdx index f455544ff53..b45d45b1c6d 100644 --- a/website/content/docs/commands/job/plan.mdx +++ b/website/content/docs/commands/job/plan.mdx @@ -80,14 +80,14 @@ capability for the job's namespace. - `-vault-token`: Used to validate if the user submitting the job has permission to run the job according to its Vault policies. A Vault token must - be supplied if the [`vault` stanza `allow_unauthenticated`] is disabled in + be supplied if the [`vault` block `allow_unauthenticated`] is disabled in the Nomad server configuration. If the `-vault-token` flag is set, the passed Vault token is added to the jobspec before sending to the Nomad servers. This allows passing the Vault token without storing it in the job file. This overrides the token found in the `$VAULT_TOKEN` environment variable and the [`vault_token`] field in the job file. This token is cleared from the job after planning and cannot be used within the job executing environment. Use - the `vault` stanza when templating in a job with a Vault token. + the `vault` block when templating in a job with a Vault token. - `-vault-namespace`: If set, the passed Vault namespace is stored in the job before sending to the Nomad servers. @@ -260,5 +260,5 @@ if a change is detected. [`go-getter`]: https://github.com/hashicorp/go-getter [`nomad job run -check-index`]: /nomad/docs/commands/job/run#check-index [`tee`]: https://man7.org/linux/man-pages/man1/tee.1.html -[`vault` stanza `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated +[`vault` block `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated [`vault_token`]: /nomad/docs/job-specification/job#vault_token diff --git a/website/content/docs/commands/job/run.mdx b/website/content/docs/commands/job/run.mdx index 12ccfc10985..f92a740f4d6 100644 --- a/website/content/docs/commands/job/run.mdx +++ b/website/content/docs/commands/job/run.mdx @@ -100,22 +100,22 @@ that volume. `$CONSUL_HTTP_TOKEN` environment variable and that found in the job. - `-consul-namespace`: If set, any services in the job will be registered into the - specified Consul namespace. Any `template` stanza reading from Consul KV will + specified Consul namespace. Any `template` block reading from Consul KV will scoped to the specified Consul namespace. If Consul ACLs are enabled and the - [`consul` stanza `allow_unauthenticated`] is disabled in the Nomad server configuration, then + [`consul` block `allow_unauthenticated`] is disabled in the Nomad server configuration, then a Consul token must be supplied with appropriate service and kv Consul ACL policy permissions. - `-vault-token`: Used to validate if the user submitting the job has permission to run the job according to its Vault policies. A Vault token must - be supplied if the [`vault` stanza `allow_unauthenticated`] is disabled in + be supplied if the [`vault` block `allow_unauthenticated`] is disabled in the Nomad server configuration. If the `-vault-token` flag is set, the passed Vault token is added to the jobspec before sending to the Nomad servers. This allows passing the Vault token without storing it in the job file. This overrides the token found in the `$VAULT_TOKEN` environment variable and the [`vault_token`] field in the job file. This token is cleared from the job after validating and cannot be used within the job executing environment. Use - the `vault` stanza when templating in a job with a Vault token. + the `vault` block when templating in a job with a Vault token. - `-vault-namespace`: If set, the passed Vault namespace is stored in the job before sending to the Nomad servers. @@ -237,7 +237,7 @@ $ nomad job run example.nomad ``` [`batch`]: /nomad/docs/schedulers#batch -[`consul` stanza `allow_unauthenticated`]: /nomad/docs/configuration/consul#allow_unauthenticated +[`consul` block `allow_unauthenticated`]: /nomad/docs/configuration/consul#allow_unauthenticated [deployment status]: /nomad/docs/commands/deployment#status [eval status]: /nomad/docs/commands/eval/status [`go-getter`]: https://github.com/hashicorp/go-getter @@ -245,5 +245,5 @@ $ nomad job run example.nomad [job specification]: /nomad/docs/job-specification [JSON jobs]: /nomad/api-docs/json-jobs [`system`]: /nomad/docs/schedulers#system -[`vault` stanza `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated +[`vault` block `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated [`vault_token`]: /nomad/docs/job-specification/job#vault_token diff --git a/website/content/docs/commands/job/validate.mdx b/website/content/docs/commands/job/validate.mdx index 95a4a71e3b1..94d0273d87a 100644 --- a/website/content/docs/commands/job/validate.mdx +++ b/website/content/docs/commands/job/validate.mdx @@ -55,14 +55,14 @@ capability for the job's namespace. - `-vault-token`: Used to validate if the user submitting the job has permission to run the job according to its Vault policies. A Vault token must - be supplied if the [`vault` stanza `allow_unauthenticated`] is disabled in + be supplied if the [`vault` block `allow_unauthenticated`] is disabled in the Nomad server configuration. If the `-vault-token` flag is set, the passed Vault token is added to the jobspec before sending to the Nomad servers. This allows passing the Vault token without storing it in the job file. This overrides the token found in the `$VAULT_TOKEN` environment variable and the [`vault_token`] field in the job file. This token is cleared from the job after validating and cannot be used within the job executing environment. Use - the `vault` stanza when templating in a job with a Vault token. + the `vault` block when templating in a job with a Vault token. - `-vault-namespace`: If set, the passed Vault namespace is stored in the job before sending to the Nomad servers. @@ -98,5 +98,5 @@ Job validation successful [`go-getter`]: https://github.com/hashicorp/go-getter [job specification]: /nomad/docs/job-specification -[`vault` stanza `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated +[`vault` block `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated [`vault_token`]: /nomad/docs/job-specification/job#vault_token diff --git a/website/content/docs/commands/node/drain.mdx b/website/content/docs/commands/node/drain.mdx index bc1eae6065a..3422e3ddcb2 100644 --- a/website/content/docs/commands/node/drain.mdx +++ b/website/content/docs/commands/node/drain.mdx @@ -10,7 +10,7 @@ description: | The `node drain` command is used to toggle drain mode on a given node. Drain mode prevents any new tasks from being allocated to the node, and begins migrating all existing allocations away. Allocations will be migrated according -to their [`migrate`][migrate] stanza until the drain's deadline is reached. +to their [`migrate`][migrate] block until the drain's deadline is reached. By default the `node drain` command blocks until a node is done draining and all allocations have terminated. Canceling the `node drain` command _will not_ diff --git a/website/content/docs/concepts/filesystem.mdx b/website/content/docs/concepts/filesystem.mdx index bb819a58be3..72833fd8054 100644 --- a/website/content/docs/concepts/filesystem.mdx +++ b/website/content/docs/concepts/filesystem.mdx @@ -42,15 +42,15 @@ allocation directory like the one below. log shipper. This is the directory that's provided to the task as the `NOMAD_ALLOC_DIR`. Note that this `alloc/` directory is not the same as the "allocation working directory", which is the top-level directory. All tasks - in a task group can read and write to the `alloc/` directory. But the full host + in a task group can read and write to the `alloc/` directory. But the full host path may differ depending on the task driver's [filesystem isolation mode], so tasks should always used the `NOMAD_ALLOC_DIR` environment variable - to find this path rather than relying on the specific implementation of the - [`none`](#none-isolation), [`chroot`](#chroot-isolation), or [`image`](#image-isolation) + to find this path rather than relying on the specific implementation of the + [`none`](#none-isolation), [`chroot`](#chroot-isolation), or [`image`](#image-isolation) modes. Within the `alloc/` directory are three standard directories: - **alloc/data/**: This directory is the location used by the - [`ephemeral_disk`] stanza for shared data. + [`ephemeral_disk`] block for shared data. - **alloc/logs/**: This directory is the location of the log files for every task within an allocation. The `nomad alloc logs` command streams these diff --git a/website/content/docs/configuration/acl.mdx b/website/content/docs/configuration/acl.mdx index ff846c3abfc..19db95fcc23 100644 --- a/website/content/docs/configuration/acl.mdx +++ b/website/content/docs/configuration/acl.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: acl Stanza - Agent Configuration +page_title: acl Block - Agent Configuration description: >- - The "acl" stanza configures the Nomad agent to enable ACLs and tune various + The "acl" block configures the Nomad agent to enable ACLs and tune various parameters. --- -# `acl` Stanza +# `acl` Block -The `acl` stanza configures the Nomad agent to enable ACLs and tunes various +The `acl` block configures the Nomad agent to enable ACLs and tunes various ACL parameters. Learn more about configuring Nomad's ACL system in the [Secure Nomad with Access Control guide][secure-guide]. diff --git a/website/content/docs/configuration/audit.mdx b/website/content/docs/configuration/audit.mdx index 6c17a37389e..1839d935236 100644 --- a/website/content/docs/configuration/audit.mdx +++ b/website/content/docs/configuration/audit.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: audit Stanza - Agent Configuration +page_title: audit Block - Agent Configuration description: >- - The "audit" stanza configures the Nomad agent to configure Audit Logging + The "audit" block configures the Nomad agent to configure Audit Logging behavior. This is an Enterprise-only feature. --- -# `audit` Stanza +# `audit` Block -The `audit` stanza configures the Nomad agent to configure Audit logging behavior. +The `audit` block configures the Nomad agent to configure Audit logging behavior. Audit logging is an Enterprise-only feature. ```hcl @@ -26,7 +26,7 @@ generate two audit log entries. These two entries correspond to a stage, event will be sent after the request has been processed, but before the response body is returned to the end user. -By default, with a minimally configured audit stanza (`audit { enabled = true }`) +By default, with a minimally configured audit block (`audit { enabled = true }`) The following default sink will be added with no filters. ```hcl @@ -52,18 +52,18 @@ in order for HTTP requests to successfully complete. When enabled, audit logging will occur for every request, unless it is filtered by a `filter`. -- `sink` ([sink](#sink-stanza): default) - Configures a sink +- `sink` ([sink](#sink-block): default) - Configures a sink for audit logs to be sent to. -- `filter` (array<[filter](#filter-stanza)>: []) - Configures a filter +- `filter` (array<[filter](#filter-block)>: []) - Configures a filter to exclude matching events from being sent to audit logging sinks. -### `sink` Stanza +### `sink` Block -The `sink` stanza is used to make audit logging sinks for events to be +The `sink` block is used to make audit logging sinks for events to be sent to. Currently only a single sink is supported. -The key of the stanza corresponds to the name of the sink which is used +The key of the block corresponds to the name of the sink which is used for logging purposes ```hcl @@ -119,9 +119,9 @@ audit { - `rotate_max_files` `(int: 0)` - Specifies the maximum number of older audit log file archives to keep. If 0, no files are ever deleted. -### `filter` Stanza +### `filter` Block -The `filter` stanza is used to create filters to filter **out** matching events +The `filter` block is used to create filters to filter **out** matching events from being written to the audit log. By default, all events will be sent to an audit log for all stages (OperationReceived and OperationComplete). Filters are useful for operators who want to limit the performance impact of audit diff --git a/website/content/docs/configuration/autopilot.mdx b/website/content/docs/configuration/autopilot.mdx index 13223f9bc48..53cb78c19dc 100644 --- a/website/content/docs/configuration/autopilot.mdx +++ b/website/content/docs/configuration/autopilot.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: autopilot Stanza - Agent Configuration +page_title: autopilot Block - Agent Configuration description: >- - The "autopilot" stanza configures the Nomad agent to configure Autopilot + The "autopilot" block configures the Nomad agent to configure Autopilot behavior. --- -# `autopilot` Stanza +# `autopilot` Block -The `autopilot` stanza configures the Nomad agent to configure Autopilot behavior. +The `autopilot` block configures the Nomad agent to configure Autopilot behavior. For more information about Autopilot, see the [Autopilot Guide](/nomad/tutorials/manage-clusters/autopilot). ```hcl diff --git a/website/content/docs/configuration/client.mdx b/website/content/docs/configuration/client.mdx index 181610200bb..9369d8fc852 100644 --- a/website/content/docs/configuration/client.mdx +++ b/website/content/docs/configuration/client.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: client Stanza - Agent Configuration +page_title: client Block - Agent Configuration description: |- - The "client" stanza configures the Nomad agent to accept jobs as assigned by + The "client" block configures the Nomad agent to accept jobs as assigned by the Nomad server, join the cluster, and specify driver-specific configuration. --- -# `client` Stanza +# `client` Block -The `client` stanza configures the Nomad agent to accept jobs as assigned by +The `client` block configures the Nomad agent to accept jobs as assigned by the Nomad server, join the cluster, and specify driver-specific configuration. ```hcl @@ -155,16 +155,16 @@ client { - `artifact` ([Artifact](#artifact-parameters): varied) - Specifies controls on the behavior of task - [`artifact`](/nomad/docs/job-specification/artifact) stanzas. + [`artifact`](/nomad/docs/job-specification/artifact) blocks. - `template` ([Template](#template-parameters): nil) - Specifies controls on the behavior of task - [`template`](/nomad/docs/job-specification/template) stanzas. + [`template`](/nomad/docs/job-specification/template) blocks. -- `host_volume` ([host_volume](#host_volume-stanza): nil) - Exposes +- `host_volume` ([host_volume](#host_volume-block): nil) - Exposes paths from the host as volumes that can be mounted into jobs. -- `host_network` ([host_network](#host_network-stanza): nil) - Registers +- `host_network` ([host_network](#host_network-block): nil) - Registers additional host networks with the node that can be selected when port mapping. - `cgroup_parent` `(string: "/nomad")` - Specifies the cgroup parent for which cgroup @@ -210,7 +210,7 @@ chroot as doing so would cause infinite recursion. ### `options` Parameters ~> Note: In Nomad 0.9 client configuration options for drivers were deprecated. -See the [plugin stanza][plugin-stanza] documentation for more information. +See the [plugin block][plugin-block] documentation for more information. The following is not an exhaustive list of options for only the Nomad client. To find the options supported by each individual Nomad driver, please @@ -360,7 +360,7 @@ see the [drivers documentation](/nomad/docs/drivers). - `reserved_ports` `(string: "")` - Specifies a comma-separated list of ports to reserve on all fingerprinted network devices. Ranges can be specified by using a hyphen separating the two inclusive ends. See also - [`host_network`](#host_network-stanza) for reserving ports on specific host + [`host_network`](#host_network-block) for reserving ports on specific host networks. @@ -430,7 +430,7 @@ see the [drivers documentation](/nomad/docs/drivers). re-render the template with the data available at the time. This is useful to enable in systems where Consul is in a degraded state, or the referenced data values are changing rapidly, because it will reduce the number of times a template is rendered. This - configuration is also exposed in the _task template stanza_ to allow overrides per task. + configuration is also exposed in the _task template block_ to allow overrides per task. ```hcl wait { @@ -532,11 +532,11 @@ see the [drivers documentation](/nomad/docs/drivers). } ``` -### `host_volume` Stanza +### `host_volume` Block -The `host_volume` stanza is used to make volumes available to jobs. +The `host_volume` block is used to make volumes available to jobs. -The key of the stanza corresponds to the name of the volume for use in the +The key of the block corresponds to the name of the volume for use in the `source` parameter of a `"host"` type [`volume`](/nomad/docs/job-specification/volume) and ACLs. @@ -558,12 +558,12 @@ client { - `read_only` `(bool: false)` - Specifies whether the volume should only ever be allowed to be mounted `read_only`, or if it should be writeable. -### `host_network` Stanza +### `host_network` Block -The `host_network` stanza is used to register additional host networks with +The `host_network` block is used to register additional host networks with the node that can be used when port mapping. -The key of the stanza corresponds to the name of the network used in the +The key of the block corresponds to the name of the network used in the [`host_network`](/nomad/docs/job-specification/network#host-networks). ```hcl @@ -647,7 +647,7 @@ client { ``` [plugin-options]: #plugin-options -[plugin-stanza]: /nomad/docs/configuration/plugin +[plugin-block]: /nomad/docs/configuration/plugin [server-join]: /nomad/docs/configuration/server_join 'Server Join' [metadata_constraint]: /nomad/docs/job-specification/constraint#user-specified-metadata 'Nomad User-Specified Metadata Constraint Example' [task working directory]: /nomad/docs/runtime/environment#task-directories 'Task directories' diff --git a/website/content/docs/configuration/consul.mdx b/website/content/docs/configuration/consul.mdx index 3bbd2f98ac0..d52c61da2d1 100644 --- a/website/content/docs/configuration/consul.mdx +++ b/website/content/docs/configuration/consul.mdx @@ -1,18 +1,18 @@ --- layout: docs -page_title: consul Stanza - Agent Configuration +page_title: consul Block - Agent Configuration description: |- - The "consul" stanza configures the Nomad agent's communication with + The "consul" block configures the Nomad agent's communication with Consul for service discovery and key-value integration. When configured, tasks can register themselves with Consul, and the Nomad cluster can automatically bootstrap itself. --- -# `consul` Stanza +# `consul` Block -The `consul` stanza configures the Nomad agent's communication with +The `consul` block configures the Nomad agent's communication with [Consul][consul] for service discovery and key-value integration. When configured, tasks can register themselves with Consul, and the Nomad cluster can [automatically bootstrap][bootstrap] itself. @@ -25,7 +25,7 @@ consul { } ``` -A default `consul` stanza is automatically merged with all Nomad agent +A default `consul` block is automatically merged with all Nomad agent configurations. These sane defaults automatically enable Consul integration if Consul is detected on the system. This allows for seamless bootstrapping of the cluster with zero configuration. To put it another way: if you have a Consul diff --git a/website/content/docs/configuration/index.mdx b/website/content/docs/configuration/index.mdx index 08038e00140..4366ff2ece6 100644 --- a/website/content/docs/configuration/index.mdx +++ b/website/content/docs/configuration/index.mdx @@ -82,7 +82,7 @@ testing. - `acl` `(`[`ACL`]`: nil)` - Specifies configuration which is specific to ACLs. - `addresses` `(Addresses: see below)` - Specifies the bind address for - individual network services. Any values configured in this stanza take + individual network services. Any values configured in this block take precedence over the default [bind_addr](#bind_addr). These values should be specified in IP format without a port (ex. `"0.0.0.0"`). To set the port, see the [`ports`](#ports) field. The values support [go-sockaddr/template @@ -104,7 +104,7 @@ testing. to the peers of a server or a client node to support more complex network configurations such as NAT. This configuration is optional, and defaults to the bind address of the specific network service if it is not provided. Any - values configured in this stanza take precedence over the default + values configured in this block take precedence over the default [bind_addr](#bind_addr). If the bind address is `0.0.0.0` then the IP address of the default private @@ -269,8 +269,8 @@ testing. This must be an absolute path. - `plugin` `(`[`Plugin`]`: nil)` - Specifies configuration for a - specific plugin. The plugin stanza may be repeated, once for each plugin being - configured. The key of the stanza is the plugin's executable name relative to + specific plugin. The plugin block may be repeated, once for each plugin being + configured. The key of the block is the plugin's executable name relative to the [plugin_dir](#plugin_dir). - `ports` `(Port: see below)` - Specifies the network ports used for different diff --git a/website/content/docs/configuration/plugin.mdx b/website/content/docs/configuration/plugin.mdx index 2abc71c290e..92fa1842fbb 100644 --- a/website/content/docs/configuration/plugin.mdx +++ b/website/content/docs/configuration/plugin.mdx @@ -1,14 +1,14 @@ --- layout: docs -page_title: plugin Stanza - Agent Configuration -description: The "plugin" stanza is used to configure a Nomad plugin. +page_title: plugin Block - Agent Configuration +description: The "plugin" block is used to configure a Nomad plugin. --- -# `plugin` Stanza +# `plugin` Block -The `plugin` stanza is used to configure plugins. +The `plugin` block is used to configure plugins. ```hcl plugin "example-plugin" { diff --git a/website/content/docs/configuration/search.mdx b/website/content/docs/configuration/search.mdx index 4e31e2b7f48..dacdf56c5fa 100644 --- a/website/content/docs/configuration/search.mdx +++ b/website/content/docs/configuration/search.mdx @@ -1,13 +1,13 @@ --- layout: docs -page_title: search Stanza - Agent Configuration +page_title: search Block - Agent Configuration sidebar_title: search description: >- - The "search" stanza specifies configuration for the search API provided + The "search" block specifies configuration for the search API provided by the Nomad servers. --- -# `search` Stanza +# `search` Block - ]} /> -The `search` stanza specifies configuration for the search API provided by the +The `search` block specifies configuration for the search API provided by the Nomad servers. ```hcl diff --git a/website/content/docs/configuration/sentinel.mdx b/website/content/docs/configuration/sentinel.mdx index 4ef417f10b9..85085ab869b 100644 --- a/website/content/docs/configuration/sentinel.mdx +++ b/website/content/docs/configuration/sentinel.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: sentinel Stanza - Agent Configuration +page_title: sentinel Block - Agent Configuration description: >- - The "sentinel" stanza configures the Nomad agent for Sentinel policies and + The "sentinel" block configures the Nomad agent for Sentinel policies and tune various parameters. --- -# `sentinel` Stanza +# `sentinel` Block -The `sentinel` stanza configures the Sentinel policy engine and tunes various parameters. +The `sentinel` block configures the Sentinel policy engine and tunes various parameters. ```hcl sentinel { diff --git a/website/content/docs/configuration/server.mdx b/website/content/docs/configuration/server.mdx index 817c7dd991c..4d9e36e4505 100644 --- a/website/content/docs/configuration/server.mdx +++ b/website/content/docs/configuration/server.mdx @@ -1,17 +1,17 @@ --- layout: docs -page_title: server Stanza - Agent Configuration +page_title: server Block - Agent Configuration description: |- - The "server" stanza configures the Nomad agent to operate in server mode to + The "server" block configures the Nomad agent to operate in server mode to participate in scheduling decisions, register with service discovery, handle join failures, and more. --- -# `server` Stanza +# `server` Block -The `server` stanza configures the Nomad agent to operate in server mode to +The `server` block configures the Nomad agent to operate in server mode to participate in scheduling decisions, register with service discovery, handle join failures, and more. @@ -252,23 +252,23 @@ server { Use `retry_join` with an array as a replacement for `start_join`, **do not use both options**. See the [server_join][server-join] section for more information on the format of the string. This field is - deprecated in favor of the [server_join stanza][server-join]. + deprecated in favor of the [server_join block][server-join]. - `retry_interval` `(string: "30s")` - Specifies the time to wait between retry join attempts. This field is deprecated in favor of the [server_join - stanza][server-join]. + block][server-join]. - `retry_max` `(int: 0)` - Specifies the maximum number of join attempts to be made before exiting with a return code of 1. By default, this is set to 0 which is interpreted as infinite retries. This field is deprecated in favor of - the [server_join stanza][server-join]. + the [server_join block][server-join]. - `start_join` `(array: [])` - Specifies a list of server addresses to join on startup. If Nomad is unable to join with any of the specified addresses, agent startup will fail. See the [server address format](/nomad/docs/configuration/server_join#server-address-format) section for more information on the format of the string. This field is - deprecated in favor of the [server_join stanza][server-join]. + deprecated in favor of the [server_join block][server-join]. ### `plan_rejection_tracker` Parameters @@ -298,7 +298,7 @@ account. ### Common Setup -This example shows a common Nomad agent `server` configuration stanza. The two +This example shows a common Nomad agent `server` configuration block. The two IP addresses could also be DNS, and should point to the other Nomad servers in the cluster @@ -346,7 +346,7 @@ server { ### Bootstrapping with a Custom Scheduler Config ((#configuring-scheduler-config)) -While [bootstrapping a cluster], you can use the `default_scheduler_config` stanza +While [bootstrapping a cluster], you can use the `default_scheduler_config` block to prime the cluster with a [`SchedulerConfig`][update-scheduler-config]. The scheduler configuration determines which scheduling algorithm is configured— spread scheduling or binpacking—and which job types are eligible for preemption. diff --git a/website/content/docs/configuration/server_join.mdx b/website/content/docs/configuration/server_join.mdx index 604fcf9930c..f4184645b28 100644 --- a/website/content/docs/configuration/server_join.mdx +++ b/website/content/docs/configuration/server_join.mdx @@ -1,12 +1,12 @@ --- layout: docs -page_title: server_join Stanza - Agent Configuration +page_title: server_join Block - Agent Configuration description: >- - The "server_join" stanza specifies how the Nomad agent will discover and + The "server_join" block specifies how the Nomad agent will discover and connect to Nomad servers. --- -# `server_join` Stanza +# `server_join` Block - ]} /> -The `server_join` stanza specifies how the Nomad agent will discover and connect +The `server_join` block specifies how the Nomad agent will discover and connect to Nomad servers. ```hcl diff --git a/website/content/docs/configuration/telemetry.mdx b/website/content/docs/configuration/telemetry.mdx index 50e091442e1..53d2231d8cf 100644 --- a/website/content/docs/configuration/telemetry.mdx +++ b/website/content/docs/configuration/telemetry.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: telemetry Stanza - Agent Configuration +page_title: telemetry Block - Agent Configuration description: |- - The "telemetry" stanza configures Nomad's publication of metrics and telemetry + The "telemetry" block configures Nomad's publication of metrics and telemetry to third-party systems. --- -# `telemetry` Stanza +# `telemetry` Block -The `telemetry` stanza configures Nomad's publication of metrics and telemetry +The `telemetry` block configures Nomad's publication of metrics and telemetry to third-party systems. ```hcl @@ -21,12 +21,12 @@ telemetry { ``` This section of the documentation only covers the configuration options for -`telemetry` stanza. To understand the architecture and metrics themselves, +`telemetry` block. To understand the architecture and metrics themselves, please see the [Telemetry guide](/nomad/docs/operations/monitoring-nomad). ## `telemetry` Parameters -Due to the number of configurable parameters to the `telemetry` stanza, +Due to the number of configurable parameters to the `telemetry` block, parameters on this page are grouped by the telemetry provider. ### Common diff --git a/website/content/docs/configuration/tls.mdx b/website/content/docs/configuration/tls.mdx index be8e8a2fb59..118d8c93abf 100644 --- a/website/content/docs/configuration/tls.mdx +++ b/website/content/docs/configuration/tls.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: tls Stanza - Agent Configuration +page_title: tls Block - Agent Configuration description: |- - The "tls" stanza configures Nomad's TLS communication via HTTP and RPC to + The "tls" block configures Nomad's TLS communication via HTTP and RPC to enforce secure cluster communication between servers and clients. --- -# `tls` Stanza +# `tls` Block -The `tls` stanza configures Nomad's TLS communication via HTTP and RPC to +The `tls` block configures Nomad's TLS communication via HTTP and RPC to enforce secure cluster communication between servers, clients, and between. ```hcl @@ -24,7 +24,7 @@ tls { start the Nomad agent. This section of the documentation only covers the configuration options for -`tls` stanza. To understand how to setup the certificates themselves, please see +`tls` block. To understand how to setup the certificates themselves, please see the [Enable TLS Encryption for Nomad Tutorial](/nomad/tutorials/transport-security/security-enable-tls). ## `tls` Parameters @@ -79,8 +79,8 @@ the [Enable TLS Encryption for Nomad Tutorial](/nomad/tutorials/transport-securi ## `tls` Examples -The following examples only show the `tls` stanzas. Remember that the -`tls` stanza is only valid in the placements listed above. +The following examples only show the `tls` blocks. Remember that the +`tls` block is only valid in the placements listed above. ### Enabling TLS diff --git a/website/content/docs/configuration/ui.mdx b/website/content/docs/configuration/ui.mdx index 47a0cc45279..9525aab1164 100644 --- a/website/content/docs/configuration/ui.mdx +++ b/website/content/docs/configuration/ui.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: ui Stanza - Agent Configuration +page_title: ui Block - Agent Configuration description: |- - The "ui" stanza configures the Nomad agent's web UI. + The "ui" block configures the Nomad agent's web UI. --- -# `ui` Stanza +# `ui` Block -The `ui` stanza configures the Nomad agent's [web UI]. +The `ui` block configures the Nomad agent's [web UI]. ```hcl ui { @@ -26,7 +26,7 @@ ui { } ``` -A default `ui` stanza is automatically merged with all Nomad agent +A default `ui` block is automatically merged with all Nomad agent configurations. Note that the UI can be served from any Nomad agent, and the configuration is individual to each agent. ## `ui` Parameters diff --git a/website/content/docs/configuration/vault.mdx b/website/content/docs/configuration/vault.mdx index 678f6d7a90e..dad303682cd 100644 --- a/website/content/docs/configuration/vault.mdx +++ b/website/content/docs/configuration/vault.mdx @@ -1,17 +1,17 @@ --- layout: docs -page_title: vault Stanza - Agent Configuration +page_title: vault Block - Agent Configuration description: |- - The "vault" stanza configures Nomad's integration with HashiCorp's Vault. + The "vault" block configures Nomad's integration with HashiCorp's Vault. When configured, Nomad can create and distribute Vault tokens to tasks automatically. --- -# `vault` Stanza +# `vault` Block -The `vault` stanza configures Nomad's integration with [HashiCorp's +The `vault` block configures Nomad's integration with [HashiCorp's Vault][vault]. When configured, Nomad can create and distribute Vault tokens to tasks automatically. For more information on the architecture and setup, please see the [Nomad and Vault integration documentation][nomad-vault]. @@ -96,8 +96,8 @@ vault { ## `vault` Examples -The following examples only show the `vault` stanzas. Remember that the -`vault` stanza is only valid in the placements listed above. +The following examples only show the `vault` blocks. Remember that the +`vault` block is only valid in the placements listed above. ### Nomad Server diff --git a/website/content/docs/drivers/docker.mdx b/website/content/docs/drivers/docker.mdx index bfd8a19f2e3..7375d10cba9 100644 --- a/website/content/docs/drivers/docker.mdx +++ b/website/content/docs/drivers/docker.mdx @@ -71,7 +71,7 @@ The `docker` driver supports the following configuration in the job spec. Only - `auth_soft_fail` `(bool: false)` - Don't fail the task on an auth failure. Attempt to continue without auth. If the Nomad client configuration has an - [`auth.helper`](#plugin_auth_helper) stanza, the helper will be tried for + [`auth.helper`](#plugin_auth_helper) block, the helper will be tried for all images, including public images. If you mix private and public images, you will need to include `auth_soft_fail=true` in every job using a public image. @@ -292,7 +292,7 @@ config { group-wide bridge networking, you may encounter issues preventing your containers from reaching networks outside of the bridge interface on systems with firewalld enabled. This behavior is often caused by the CNI plugin not registering the group - network as trusted and can be resolved as described in the [network stanza] documentation. + network as trusted and can be resolved as described in the [network block] documentation. - `pid_mode` - (Optional) `host` or not set (default). Set to `host` to share the PID namespace with the host. Note that this also requires the Nomad agent @@ -744,12 +744,12 @@ expose and port forwarding. #### Deprecated `port_map` Syntax -Up until Nomad 0.12, ports could be specified in a task's resource stanza and set using the docker +Up until Nomad 0.12, ports could be specified in a task's resource block and set using the docker `port_map` field. As more features have been added to the group network resource allocation, task based network resources are deprecated. With it the `port_map` field is also deprecated and can only be used with task network resources. -Users should migrate their jobs to define ports in the group network stanza and specified which ports +Users should migrate their jobs to define ports in the group network block and specified which ports a task maps with the `ports` field. ### Advertising Container IPs @@ -807,7 +807,7 @@ For the best performance and security features you should use recent versions of the Linux Kernel and Docker daemon. If you would like to change any of the options related to the `docker` driver -on a Nomad client, you can modify them with the [plugin stanza][plugin-stanza] +on a Nomad client, you can modify them with the [plugin block][plugin-block] syntax. Below is an example of a configuration (many of the values are the default). See the next section for more information on the options. @@ -897,7 +897,7 @@ host system. - `allow_runtimes` - defaults to `["runc", "nvidia"]` - A list of the allowed docker runtimes a task may use. -- `auth` stanza: +- `auth` block: - `config` - Allows an operator to specify a JSON file which is in the dockercfg format containing authentication @@ -914,7 +914,7 @@ host system. public images. If you mix private and public images, you will need to include [`auth_soft_fail=true`] in every job using a public image. -- `tls` stanza: +- `tls` block: - `cert` - Path to the server's certificate file (`.pem`). Specify this along with `key` and `ca` to use a TLS client to connect to the docker @@ -937,7 +937,7 @@ host system. Available options are `job_name`, `job_id`, `task_group_name`, `task_name`, `namespace`, `node_name`, `node_id`. Globs are supported (e.g. `task*`) -- `logging` stanza: +- `logging` block: - `type` - Defaults to `"json-file"`. Specifies the logging driver docker should use for all containers Nomad starts. Note that for older versions @@ -950,7 +950,7 @@ host system. [configuration](https://docs.docker.com/config/containers/logging/configure/) to the logging driver. -- `gc` stanza: +- `gc` block: - `image` - Defaults to `true`. Changing this to `false` will prevent Nomad from removing images from stopped tasks. @@ -965,7 +965,7 @@ host system. from removing a container when the task exits. Under a name conflict, Nomad may still remove the dead container. - - `dangling_containers` stanza for controlling dangling container detection + - `dangling_containers` block for controlling dangling container detection and cleanup: - `enabled` - Defaults to `true`. Enables dangling container handling. @@ -982,7 +982,7 @@ host system. GC. Should not need adjusting higher but may be adjusted lower to GC more aggressively. -- `volumes` stanza: +- `volumes` block: - `enabled` - Defaults to `false`. Allows tasks to bind host paths (`volumes`) inside their container and use volume drivers @@ -1006,7 +1006,7 @@ host system. ## Client Configuration ~> Note: client configuration options will soon be deprecated. Please use -[plugin options][plugin-options] instead. See the [plugin stanza][plugin-stanza] +[plugin options][plugin-options] instead. See the [plugin block][plugin-block] documentation for more information. The `docker` driver has the following [client configuration @@ -1186,7 +1186,7 @@ Containers that don't match Nomad container patterns are left untouched. Operators can run the reaper in a dry-run mode, where it only logs dangling container ids without killing them, or disable it by setting the -`gc.dangling_containers` config stanza. +`gc.dangling_containers` config block. ### Docker for Windows @@ -1197,7 +1197,7 @@ Windows is relatively new and rapidly evolving you may want to consult the [faq-win-mac]: /nomad/docs/faq#q-how-to-connect-to-my-host-network-when-using-docker-desktop-windows-and-macos [winissues]: https://github.com/hashicorp/nomad/issues?q=is%3Aopen+is%3Aissue+label%3Atheme%2Fdriver%2Fdocker+label%3Atheme%2Fplatform-windows [plugin-options]: #plugin-options -[plugin-stanza]: /nomad/docs/configuration/plugin +[plugin-block]: /nomad/docs/configuration/plugin [allocation working directory]: /nomad/docs/runtime/environment#task-directories 'Task Directories' [`auth_soft_fail=true`]: #auth_soft_fail [cap_add]: /nomad/docs/drivers/docker#cap_add @@ -1209,6 +1209,6 @@ Windows is relatively new and rapidly evolving you may want to consult the [allow_caps]: /nomad/docs/drivers/docker#allow_caps [Connect]: /nomad/docs/job-specification/connect [`bridge`]: /nomad/docs/job-specification/network#bridge -[network stanza]: /nomad/docs/job-specification/network#bridge-mode +[network block]: /nomad/docs/job-specification/network#bridge-mode [`pids_limit`]: /nomad/docs/drivers/docker#pids_limit -[Windows isolation]: https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container \ No newline at end of file +[Windows isolation]: https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container diff --git a/website/content/docs/drivers/exec.mdx b/website/content/docs/drivers/exec.mdx index 6550accee0e..271095ccec9 100644 --- a/website/content/docs/drivers/exec.mdx +++ b/website/content/docs/drivers/exec.mdx @@ -253,5 +253,5 @@ This list is configurable through the agent client [no_net_raw]: /nomad/docs/upgrade/upgrade-specific#nomad-1-1-0-rc1-1-0-5-0-12-12 [allow_caps]: /nomad/docs/drivers/exec#allow_caps [docker_caps]: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities -[host volume]: /nomad/docs/configuration/client#host_volume-stanza +[host volume]: /nomad/docs/configuration/client#host_volume-block [volume_mount]: /nomad/docs/job-specification/volume_mount diff --git a/website/content/docs/drivers/index.mdx b/website/content/docs/drivers/index.mdx index c35ed28025b..88f5244ad5a 100644 --- a/website/content/docs/drivers/index.mdx +++ b/website/content/docs/drivers/index.mdx @@ -12,8 +12,8 @@ support a broad set of workloads across all major operating systems. Starting with Nomad 0.9, task drivers are now pluggable. This gives users the flexibility to introduce their own drivers without having to recompile Nomad. -You can view the [plugin stanza][plugin] documentation for examples on how to -use the `plugin` stanza in Nomad's client configuration. Note that we have +You can view the [plugin block][plugin] documentation for examples on how to +use the `plugin` block in Nomad's client configuration. Note that we have introduced new syntax when specifying driver options in the client configuration (see [docker][docker_plugin] for an example). Keep in mind that even though all built-in drivers are now plugins, Nomad remains a single binary and maintains diff --git a/website/content/docs/drivers/raw_exec.mdx b/website/content/docs/drivers/raw_exec.mdx index db91d3f7414..b8b2db68e79 100644 --- a/website/content/docs/drivers/raw_exec.mdx +++ b/website/content/docs/drivers/raw_exec.mdx @@ -99,7 +99,7 @@ plugin "raw_exec" { ``` Nomad versions before v0.9 use the following client configuration. This configuration is -also supported in Nomad v0.9.0, but is deprecated in favor of the plugin stanza: +also supported in Nomad v0.9.0, but is deprecated in favor of the plugin block: ``` client { @@ -125,7 +125,7 @@ client { ## Client Options ~> Note: client configuration options will soon be deprecated. Please use -[plugin options][plugin-options] instead. See the [plugin stanza][plugin-stanza] documentation for more information. +[plugin options][plugin-options] instead. See the [plugin block][plugin-block] documentation for more information. - `driver.raw_exec.enable` - Specifies whether the driver should be enabled or disabled. Defaults to `false`. @@ -154,4 +154,4 @@ appropriate privileges, the cgroup system is mounted and the operator hasn't disabled cgroups for the driver. [plugin-options]: #plugin-options -[plugin-stanza]: /nomad/docs/configuration/plugin +[plugin-block]: /nomad/docs/configuration/plugin diff --git a/website/content/docs/integrations/consul-connect.mdx b/website/content/docs/integrations/consul-connect.mdx index 0cac6547cf6..4576c06ec7b 100644 --- a/website/content/docs/integrations/consul-connect.mdx +++ b/website/content/docs/integrations/consul-connect.mdx @@ -269,7 +269,7 @@ The API service is defined as a task group with a bridge network: ``` Since the API service is only accessible via Consul service mesh, it does not define -any ports in its network. The service stanza enables service mesh. +any ports in its network. The service block enables service mesh. ```hcl group "api" { @@ -290,7 +290,7 @@ any ports in its network. The service stanza enables service mesh. } ``` -The `port` in the service stanza is the port the API service listens on. The +The `port` in the service block is the port the API service listens on. The Envoy proxy will automatically route traffic to that port inside the network namespace. Note that currently this cannot be a named port; it must be a hard-coded port value. See [GH-9907]. @@ -345,7 +345,7 @@ The web frontend connects to the API service via Consul service mesh. } ``` -The `upstreams` stanza defines the remote service to access (`count-api`) and +The `upstreams` block defines the remote service to access (`count-api`) and what port to expose that service on inside the network namespace (`8080`). The web frontend is configured to communicate with the API service with an @@ -377,4 +377,4 @@ dashes (`-`) are converted to underscores (`_`) in environment variables so [`Local`]: /consul/docs/security/acl/acl-tokens#token-attributes [anon_token]: /consul/docs/security/acl/acl-tokens#special-purpose-tokens [consul_ports]: /consul/docs/agent/config/config-files#ports -[consul_grpc_tls]: /consul/docs/upgrading/upgrade-specific#changes-to-grpc-tls-configuration \ No newline at end of file +[consul_grpc_tls]: /consul/docs/upgrading/upgrade-specific#changes-to-grpc-tls-configuration diff --git a/website/content/docs/integrations/consul-integration.mdx b/website/content/docs/integrations/consul-integration.mdx index b8d743a80c4..61414160eea 100644 --- a/website/content/docs/integrations/consul-integration.mdx +++ b/website/content/docs/integrations/consul-integration.mdx @@ -43,12 +43,12 @@ To configure a job to register with service discovery, please see the ## Dynamic Configuration -Nomad's job specification includes a [`template` stanza](/nomad/docs/job-specification/template) +Nomad's job specification includes a [`template` block](/nomad/docs/job-specification/template) that utilizes a Consul ecosystem tool called [Consul Template](https://github.com/hashicorp/consul-template). This mechanism creates a convenient way to ship configuration files that are populated from environment variables, Consul data, Vault secrets, or just general configurations within a Nomad task. -For more information on Nomad's template stanza and how it leverages Consul Template, +For more information on Nomad's template block and how it leverages Consul Template, please see the [`template` job specification documentation](/nomad/docs/job-specification/template). ## Consul Namespaces diff --git a/website/content/docs/integrations/vault-integration.mdx b/website/content/docs/integrations/vault-integration.mdx index 11f9cc50b23..e4f8b2e77f0 100644 --- a/website/content/docs/integrations/vault-integration.mdx +++ b/website/content/docs/integrations/vault-integration.mdx @@ -275,7 +275,7 @@ More information about creating orphan tokens can be found in The [`-period` flag](/vault/docs/commands/token/create#period) is required to allow the automatic renewal of the token. If this is left out, a [`vault token renew` command](/vault/docs/commands/token/renew) will need to be run manually to renew the token. The token can then be set in the server configuration's -[`vault` stanza][config], as a command-line flag, or via an environment +[`vault` block][config], as a command-line flag, or via an environment variable. ```shell-session @@ -318,7 +318,7 @@ but will log the reasons the token is invalid and disable Vault integration. ### Permission Denied errors -If you are using a Vault version less than 0.7.1 with a Nomad version greater than or equal to 0.6.1, you will need to update your task's policy (listed in [the `vault` stanza of the job specification][vault-spec]) to add the following: +If you are using a Vault version less than 0.7.1 with a Nomad version greater than or equal to 0.6.1, you will need to update your task's policy (listed in [the `vault` block of the job specification][vault-spec]) to add the following: ```hcl path "sys/leases/renew" { diff --git a/website/content/docs/job-specification/affinity.mdx b/website/content/docs/job-specification/affinity.mdx index a8589e14606..a070254c07b 100644 --- a/website/content/docs/job-specification/affinity.mdx +++ b/website/content/docs/job-specification/affinity.mdx @@ -1,13 +1,13 @@ --- layout: docs -page_title: affinity Stanza - Job Specification +page_title: affinity Block - Job Specification description: |- - The "affinity" stanza allows restricting the set of eligible nodes. + The "affinity" block allows restricting the set of eligible nodes. Affinities may filter on attributes or metadata. Additionally affinities may be specified at the job, group, or task levels for ultimate flexibility. --- -# `affinity` Stanza +# `affinity` Block -The `affinity` stanza allows operators to express placement preference for a set of nodes. Affinities may +The `affinity` block allows operators to express placement preference for a set of nodes. Affinities may be expressed on [attributes][interpolation] or [client metadata][client-meta]. Additionally affinities may be specified at the [job][job], [group][group], or [task][task] levels for ultimate flexibility. @@ -52,7 +52,7 @@ job "docs" { } ``` -Affinities apply to task groups but may be specified within job and task stanzas as well. +Affinities apply to task groups but may be specified within job and task blocks as well. Job affinities apply to all groups within the job. Task affinities apply to the whole task group that the task is a part of. @@ -170,8 +170,8 @@ affinity { ## `affinity` Examples -The following examples only show the `affinity` stanzas. Remember that the -`affinity` stanza is only valid in the placements listed above. +The following examples only show the `affinity` blocks. Remember that the +`affinity` block is only valid in the placements listed above. ### Kernel Data @@ -275,4 +275,4 @@ The placement score is affected by the following factors. of a job on the same node. - `node-reschedule-penalty` - Used when the job is being rescheduled. Nomad adds a penalty to avoid placing the job on a node where it has failed to run before. -- `node-affinity` - Used when the criteria specified in the `affinity` stanza matches the node. +- `node-affinity` - Used when the criteria specified in the `affinity` block matches the node. diff --git a/website/content/docs/job-specification/artifact.mdx b/website/content/docs/job-specification/artifact.mdx index 291351d11b7..f42692b60fd 100644 --- a/website/content/docs/job-specification/artifact.mdx +++ b/website/content/docs/job-specification/artifact.mdx @@ -1,17 +1,17 @@ --- layout: docs -page_title: artifact Stanza - Job Specification +page_title: artifact Block - Job Specification description: |- - The "artifact" stanza instructs Nomad to fetch and unpack a remote resource, + The "artifact" block instructs Nomad to fetch and unpack a remote resource, such as a file, tarball, or binary, and permits downloading artifacts from a variety of locations using a URL as the input source. --- -# `artifact` Stanza +# `artifact` Block -The `artifact` stanza instructs Nomad to fetch and unpack a remote resource, +The `artifact` block instructs Nomad to fetch and unpack a remote resource, such as a file, tarball, or binary. Nomad downloads artifacts using the popular [`go-getter`][go-getter] library, which permits downloading artifacts from a variety of locations using a URL as the input source. @@ -73,8 +73,8 @@ interrupted and fail to start. Refer to the task events for more information. ## `artifact` Examples -The following examples only show the `artifact` stanzas. Remember that the -`artifact` stanza is only valid in the placements listed above. +The following examples only show the `artifact` blocks. Remember that the +`artifact` block is only valid in the placements listed above. ### Download File diff --git a/website/content/docs/job-specification/change_script.mdx b/website/content/docs/job-specification/change_script.mdx index 61a6b5dd97b..38418678379 100644 --- a/website/content/docs/job-specification/change_script.mdx +++ b/website/content/docs/job-specification/change_script.mdx @@ -1,15 +1,15 @@ --- layout: docs -page_title: change_script Stanza - Job Specification -description: The "change_script" stanza configures a script to be run on template re-render. +page_title: change_script Block - Job Specification +description: The "change_script" block configures a script to be run on template re-render. --- -# `change_script` Stanza +# `change_script` Block -The `change_script` stanza allows operators to configure scripts that -will be executed on template change. This stanza is only used when template +The `change_script` block allows operators to configure scripts that +will be executed on template change. This block is only used when template `change_mode` is set to `script`. ```hcl @@ -53,7 +53,7 @@ job "docs" { ### Template as a script example Below is an example of how a script can be embedded in a `data` block of another -`template` stanza: +`template` block: ```hcl job "docs" { diff --git a/website/content/docs/job-specification/check.mdx b/website/content/docs/job-specification/check.mdx index a3db710af0b..e1e344aa94a 100644 --- a/website/content/docs/job-specification/check.mdx +++ b/website/content/docs/job-specification/check.mdx @@ -5,7 +5,7 @@ description: |- The "check" block declares service check definition for a service registered into the Nomad or Consul service provider. --- -# `check` Stanza +# `check` Block : [])` - Specifies additional arguments to the `command`. This only applies to script-based health checks. -- `check_restart` - See [`check_restart` stanza][check_restart_stanza]. +- `check_restart` - See [`check_restart` block][check_restart_block]. - `command` `(string: )` - Specifies the command to run for performing the health check. The script must exit: 0 for passing, 1 for warning, or any @@ -123,7 +123,7 @@ job "example" { - `port` `(string: )` - Specifies the label of the port on which the check will be performed. Note this is the _label_ of the port and not the port number unless `address_mode = driver`. The port label must match one defined - in the [`network`][network] stanza. If a port value was declared on the + in the [`network`][network] block. If a port value was declared on the `service`, this will inherit from that value if not supplied. If supplied, this value takes precedence over the `service.port` value. This is useful for services which operate on multiple ports. `grpc`, `http`, and `tcp` checks @@ -136,7 +136,7 @@ job "example" { - `task` `(string: "")` - Specifies the task associated with this check. Scripts are executed within the task's environment, and - `check_restart` stanzas will apply to the specified task. Inherits + `check_restart` blocks will apply to the specified task. Inherits the [`service.task`][service_task] value if not set. Must be unset or equivelent to `service.task` in task-level services. @@ -177,14 +177,14 @@ job "example" { - `ignore` - Any status will be treated as healthy. ~> **Caveat:** `on_update` is only compatible with certain - [`check_restart`][check_restart_stanza] configurations. `on_update = "ignore_warnings"` requires that `check_restart.ignore_warnings = true`. + [`check_restart`][check_restart_block] configurations. `on_update = "ignore_warnings"` requires that `check_restart.ignore_warnings = true`. `check_restart` can however specify `ignore_warnings = true` with `on_update = "require_healthy"`. If `on_update` is set to `ignore`, `check_restart` must be omitted entirely. -#### `header` Stanza +#### `header` Block -HTTP checks may include a `header` stanza to set HTTP headers. The `header` -stanza parameters have lists of strings as values. Multiple values will cause +HTTP checks may include a `header` block to set HTTP headers. The `header` +block parameters have lists of strings as values. Multiple values will cause the header to be set multiple times, once for each value. ```hcl @@ -285,7 +285,7 @@ service { ``` In this example Consul would health check the `example.Service` service on the -`rpc` port defined in the task's [network resources][network] stanza. See +`rpc` port defined in the task's [network resources][network] block. See [Using Driver Address Mode](#using-driver-address-mode) for details on address selection. @@ -434,9 +434,9 @@ Output = nomad: Get "http://:9999/": dial tcp :9999: connect: connection re does not have access to the file system of a task for that driver. -[check_restart_stanza]: /nomad/docs/job-specification/check_restart +[check_restart_block]: /nomad/docs/job-specification/check_restart [consul_passfail]: /consul/docs/discovery/checks#success-failures-before-passing-critical [network]: /nomad/docs/job-specification/network 'Nomad network Job Specification' [service]: /nomad/docs/job-specification/service [service_task]: /nomad/docs/job-specification/service#task-1 -[on_update]: /nomad/docs/job-specification/service#on_update \ No newline at end of file +[on_update]: /nomad/docs/job-specification/service#on_update diff --git a/website/content/docs/job-specification/check_restart.mdx b/website/content/docs/job-specification/check_restart.mdx index 559a2663599..8964fa40eac 100644 --- a/website/content/docs/job-specification/check_restart.mdx +++ b/website/content/docs/job-specification/check_restart.mdx @@ -1,12 +1,12 @@ --- layout: docs -page_title: check_restart Stanza - Job Specification +page_title: check_restart Block - Job Specification description: |- - The "check_restart" stanza instructs Nomad when to restart tasks with + The "check_restart" block instructs Nomad when to restart tasks with unhealthy service checks. --- -# `check_restart` Stanza +# `check_restart` Block -The `check_restart` stanza instructs Nomad when to restart tasks with unhealthy +The `check_restart` block instructs Nomad when to restart tasks with unhealthy service checks. When a health check in Nomad or Consul has been unhealthy for the `limit` -specified in a `check_restart` stanza, it is restarted according to the task group's -[`restart` policy][restart_stanza]. The `check_restart` settings apply to -[`check`s][check_stanza], but may also be placed on [`service`s][service_stanza] +specified in a `check_restart` block, it is restarted according to the task group's +[`restart` policy][restart_block]. The `check_restart` settings apply to +[`check`s][check_block], but may also be placed on [`service`s][service_block] to apply to all checks on a service. If `check_restart` is set on both the check -and service, the stanzas are merged with the check values taking precedence. +and service, the blocks are merged with the check values taking precedence. ```hcl job "mysql" { @@ -105,7 +105,7 @@ check_restart { After the grace period if the script check fails, it has 180 seconds (`60s interval * 3 limit`) to pass before a restart is triggered. Once a restart is triggered the task group's -[`restart` policy][restart_stanza] takes control: +[`restart` policy][restart_block] takes control: ```hcl restart { @@ -115,7 +115,7 @@ restart { } ``` -The [`restart` stanza][restart_stanza] controls the restart behavior of the +The [`restart` block][restart_block] controls the restart behavior of the task. In this case it will stop the task and then wait 10 seconds before starting it again. @@ -134,10 +134,10 @@ restart { If the check continues to fail, the task will be restarted up to `attempts` times within an `interval`. If the `restart` attempts are reached within the `limit` then the `mode` controls the behavior. In this case the task would fail -and not be restarted again. See the [`restart` stanza][restart_stanza] for +and not be restarted again. See the [`restart` block][restart_block] for details. -[check_stanza]: /nomad/docs/job-specification/service#check-parameters 'check stanza' +[check_block]: /nomad/docs/job-specification/service#check-parameters 'check block' [gh-9176]: https://github.com/hashicorp/nomad/issues/9176 -[restart_stanza]: /nomad/docs/job-specification/restart 'restart stanza' -[service_stanza]: /nomad/docs/job-specification/service 'service stanza' +[restart_block]: /nomad/docs/job-specification/restart 'restart block' +[service_block]: /nomad/docs/job-specification/service 'service block' diff --git a/website/content/docs/job-specification/connect.mdx b/website/content/docs/job-specification/connect.mdx index b94d9133fed..78920e1a76a 100644 --- a/website/content/docs/job-specification/connect.mdx +++ b/website/content/docs/job-specification/connect.mdx @@ -1,14 +1,14 @@ --- layout: docs -page_title: connect Stanza - Job Specification -description: The "connect" stanza allows specifying options for Consul Connect integration +page_title: connect Block - Job Specification +description: The "connect" block allows specifying options for Consul Connect integration --- -# `connect` Stanza +# `connect` Block -The `connect` stanza allows configuring various options for +The `connect` block allows configuring various options for [Consul Connect](/nomad/docs/integrations/consul-connect). It is valid only within the context of a service definition at the task group level. For using `connect` when Consul ACLs are enabled, be sure to read through @@ -65,7 +65,7 @@ or `gateway` may be realized per `connect` block. ### Using Connect Native -The following example is a minimal service stanza for a +The following example is a minimal service block for a [Consul Connect Native](/consul/docs/connect/native) application implemented by a task named `generate`. @@ -83,7 +83,7 @@ service { ### Using Sidecar Service -The following example is a minimal connect stanza with defaults and is +The following example is a minimal connect block with defaults and is sufficient to start an Envoy proxy sidecar for allowing incoming connections via Consul Connect. @@ -191,7 +191,7 @@ job "countdash" { ### Using a Gateway -The following is an example service stanza for creating and using a connect ingress +The following is an example service block for creating and using a connect ingress gateway. It includes a gateway service definition and an api service fronted by the gateway. Once running, the gateway can be used to reach the api service by first looking up the gateway Consul DNS address, e.g. diff --git a/website/content/docs/job-specification/constraint.mdx b/website/content/docs/job-specification/constraint.mdx index ee0987e85e0..82c0c55ef19 100644 --- a/website/content/docs/job-specification/constraint.mdx +++ b/website/content/docs/job-specification/constraint.mdx @@ -1,13 +1,13 @@ --- layout: docs -page_title: constraint Stanza - Job Specification +page_title: constraint Block - Job Specification description: |- - The "constraint" stanza allows restricting the set of eligible nodes. + The "constraint" block allows restricting the set of eligible nodes. Constraints may filter on attributes or metadata. Additionally constraints may be specified at the job, group, or task levels for ultimate flexibility. --- -# `constraint` Stanza +# `constraint` Block - - The "csi_plugin" stanza allows the task to specify it provides a + The "csi_plugin" block allows the task to specify it provides a Container Storage Interface plugin to the cluster. --- -# `csi_plugin` Stanza +# `csi_plugin` Block -The "csi_plugin" stanza allows the task to specify it provides a +The "csi_plugin" block allows the task to specify it provides a Container Storage Interface plugin to the cluster. Nomad will automatically register the plugin so that it can be used by other jobs to claim [volumes][csi_volumes]. @@ -46,15 +46,15 @@ csi_plugin { bidirectional communication with Nomad. This field is typically not required. Refer to your CSI plugin's documentation for details. -- `stage_publish_base_dir` `(string: )` - The base directory +- `stage_publish_base_dir` `(string: )` - The base directory path inside the container where the plugin will be instructed to - stage and publish volumes. This field is typically not required. + stage and publish volumes. This field is typically not required. Refer to your CSI plugin's documentation for details. -- `health_timeout` `(duration: )` - The duration that +- `health_timeout` `(duration: )` - The duration that the plugin supervisor will wait before restarting an unhealthy CSI plugin. Must be a duration value such as `30s` or `2m`. - Defaults to `30s` if not set. + Defaults to `30s` if not set. ~> **Note:** Plugins running as `node` or `monolith` require root privileges (or `CAP_SYS_ADMIN` on Linux) to mount volumes on the diff --git a/website/content/docs/job-specification/device.mdx b/website/content/docs/job-specification/device.mdx index ab1dc2adce9..0748bf8ec43 100644 --- a/website/content/docs/job-specification/device.mdx +++ b/website/content/docs/job-specification/device.mdx @@ -1,22 +1,22 @@ --- layout: docs -page_title: device Stanza - Job Specification +page_title: device Block - Job Specification description: |- - The "device" stanza is used to require a certain device be made available + The "device" block is used to require a certain device be made available to the task. --- -# `device` Stanza +# `device` Block -The `device` stanza is used to create both a scheduling and runtime requirement +The `device` block is used to create both a scheduling and runtime requirement that the given task has access to the specified devices. A device is a hardware device that is attached to the node and may be made available to the task. Examples are GPUs, FPGAs, and TPUs. -When a `device` stanza is added, Nomad will schedule the task onto a node that -contains the set of device(s) that meet the specified requirements. The `device` stanza +When a `device` block is added, Nomad will schedule the task onto a node that +contains the set of device(s) that meet the specified requirements. The `device` block allows the operator to specify as little as just the type of device required, such as `gpu`, all the way to specifying arbitrary constraints and affinities. Once the scheduler has placed the allocation on a suitable node, the Nomad @@ -57,7 +57,7 @@ In the above example, the task is requesting two GPUs, from the Nvidia vendor, but is not specifying the specific model required. Instead it is placing a hard constraint that the device has at least 2 GiB of memory and that it would prefer to use GPUs that have at least 4 GiB. This examples shows how expressive the -`device` stanza can be. +`device` block can be. ~> Device support is currently limited to Linux, and container based drivers due to the ability to isolate devices to specific tasks. @@ -227,8 +227,8 @@ Conversion is only possible within the same base unit. ## `device` Examples -The following examples only show the `device` stanzas. Remember that the -`device` stanza is only valid in the placements listed above. +The following examples only show the `device` blocks. Remember that the +`device` block is only valid in the placements listed above. ### Single Nvidia GPU diff --git a/website/content/docs/job-specification/dispatch_payload.mdx b/website/content/docs/job-specification/dispatch_payload.mdx index 24c030e97e6..482fe100475 100644 --- a/website/content/docs/job-specification/dispatch_payload.mdx +++ b/website/content/docs/job-specification/dispatch_payload.mdx @@ -1,18 +1,18 @@ --- layout: docs -page_title: dispatch_payload Stanza - Job Specification +page_title: dispatch_payload Block - Job Specification description: |- - The "dispatch_payload" stanza allows a task to access dispatch payloads. + The "dispatch_payload" block allows a task to access dispatch payloads. to --- -# `dispatch_payload` Stanza +# `dispatch_payload` Block -The `dispatch_payload` stanza is used in conjunction with a [`parameterized`][parameterized] job +The `dispatch_payload` block is used in conjunction with a [`parameterized`][parameterized] job that expects a payload. When the job is dispatched with a payload, the payload -will be made available to any task that has a `dispatch_payload` stanza. The +will be made available to any task that has a `dispatch_payload` block. The payload will be written to the configured file before the task is started. This allows the task to use the payload as input or configuration. @@ -36,8 +36,8 @@ job "docs" { ## `dispatch_payload` Examples -The following examples only show the `dispatch_payload` stanzas. Remember that the -`dispatch_payload` stanza is only valid in the placements listed above. +The following examples only show the `dispatch_payload` blocks. Remember that the +`dispatch_payload` block is only valid in the placements listed above. ### Write Payload to a File diff --git a/website/content/docs/job-specification/env.mdx b/website/content/docs/job-specification/env.mdx index a51ec70708c..9e7872395ac 100644 --- a/website/content/docs/job-specification/env.mdx +++ b/website/content/docs/job-specification/env.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: env Stanza - Job Specification +page_title: env Block - Job Specification description: |- - The "env" stanza configures a list of environment variables to populate the + The "env" block configures a list of environment variables to populate the task's environment before starting. --- -# `env` Stanza +# `env` Block -The `env` stanza configures a list of environment variables to populate the +The `env` block configures a list of environment variables to populate the task's environment before starting. ```hcl @@ -27,15 +27,15 @@ job "docs" { ## `env` Parameters -The "parameters" for the `env` stanza can be any key-value. The keys and values +The "parameters" for the `env` block can be any key-value. The keys and values are both of type `string`, but they can be specified as other types. They will automatically be converted to strings. Invalid characters such as dashes (`-`) will be converted to underscores. ## `env` Examples -The following examples only show the `env` stanzas. Remember that the -`env` stanza is only valid in the placements listed above. +The following examples only show the `env` blocks. Remember that the +`env` block is only valid in the placements listed above. ### Coercion @@ -79,7 +79,7 @@ env = { Nomad also supports populating dynamic environment variables from data stored in HashiCorp Consul and Vault. To use this feature please see the documentation on -the [`template` stanza][template-env]. +the [`template` block][template-env]. [interpolation]: /nomad/docs/runtime/interpolation 'Nomad interpolation' -[template-env]: /nomad/docs/job-specification/template#environment-variables 'Nomad template Stanza' +[template-env]: /nomad/docs/job-specification/template#environment-variables 'Nomad template Block' diff --git a/website/content/docs/job-specification/ephemeral_disk.mdx b/website/content/docs/job-specification/ephemeral_disk.mdx index 3e48b6f9e67..7a508ab4ed8 100644 --- a/website/content/docs/job-specification/ephemeral_disk.mdx +++ b/website/content/docs/job-specification/ephemeral_disk.mdx @@ -1,17 +1,17 @@ --- layout: docs -page_title: ephemeral_disk Stanza - Job Specification +page_title: ephemeral_disk Block - Job Specification description: |- - The "ephemeral_disk" stanza describes the ephemeral disk requirements of the + The "ephemeral_disk" block describes the ephemeral disk requirements of the group. Ephemeral disks can be marked as sticky and support live data migrations. --- -# `ephemeral_disk` Stanza +# `ephemeral_disk` Block -The `ephemeral_disk` stanza describes the ephemeral disk requirements of the +The `ephemeral_disk` block describes the ephemeral disk requirements of the group. Ephemeral disks can be marked as sticky and support live data migrations. All tasks in this group will share the same ephemeral disk. @@ -47,8 +47,8 @@ The ephemeral disk can be referenced under `alloc/data/`. More information can b ## `ephemeral_disk` Examples -The following examples only show the `ephemeral_disk` stanzas. Remember that the -`ephemeral_disk` stanza is only valid in the placements listed above. +The following examples only show the `ephemeral_disk` blocks. Remember that the +`ephemeral_disk` block is only valid in the placements listed above. ### Sticky Volumes diff --git a/website/content/docs/job-specification/expose.mdx b/website/content/docs/job-specification/expose.mdx index 567127ef9ef..e709e4c4647 100644 --- a/website/content/docs/job-specification/expose.mdx +++ b/website/content/docs/job-specification/expose.mdx @@ -1,12 +1,12 @@ --- layout: docs -page_title: expose Stanza - Job Specification +page_title: expose Block - Job Specification description: |- - The "expose" stanza allows specifying options for configuring Envoy expose + The "expose" block allows specifying options for configuring Envoy expose paths used in Consul Connect integration --- -# `expose` Stanza +# `expose` Block -The `expose` stanza allows configuration of additional listeners for the default +The `expose` block allows configuration of additional listeners for the default Envoy sidecar proxy managed by Nomad for [Consul Connect][learn-consul-connect]. These listeners create a bypass of the Connect TLS and network namespace isolation, enabling non-Connect enabled services to make requests to specific HTTP paths through the sidecar proxy. -The `expose` configuration is valid within the context of a `proxy` stanza. +The `expose` configuration is valid within the context of a `proxy` block. Additional information about Expose Path configurations for Envoy can be found in Consul's [Expose Paths Configuration Reference][consul-expose-path-config]. @@ -72,7 +72,7 @@ job "expose-check-example" { ``` For uses other than Consul service checks, use the `expose` configuration in the -`proxy` stanza. The example below effectively demonstrates exposing the +`proxy` block. The example below effectively demonstrates exposing the `/health` endpoint similar to the example above, but using the fully flexible `expose` configuration. @@ -213,7 +213,7 @@ proxy { A common use case for `expose` is for exposing endpoints used in Consul service check definitions. For these cases the [expose][] parameter in the service check -stanza can be used to automatically generate the expose path configuration. +block can be used to automatically generate the expose path configuration. Configuring a port for use by the check is optional, as a dynamic port will be automatically generated if not provided. diff --git a/website/content/docs/job-specification/gateway.mdx b/website/content/docs/job-specification/gateway.mdx index 9103be3ce86..16889f41140 100644 --- a/website/content/docs/job-specification/gateway.mdx +++ b/website/content/docs/job-specification/gateway.mdx @@ -1,20 +1,20 @@ --- layout: docs -page_title: gateway Stanza - Job Specification +page_title: gateway Block - Job Specification description: |- - The "gateway" stanza allows specifying options for configuring Consul Gateways + The "gateway" block allows specifying options for configuring Consul Gateways used in the Consul Connect integration --- -# `gateway` Stanza +# `gateway` Block -The `gateway` stanza allows configuration of [Consul Connect Gateways](/consul/docs/connect/gateways). Nomad will +The `gateway` block allows configuration of [Consul Connect Gateways](/consul/docs/connect/gateways). Nomad will automatically create the necessary Gateway [Configuration Entry](/consul/docs/agent/config-entries) as well as inject an Envoy proxy task into the Nomad job to serve as the Gateway. -The `gateway` configuration is valid within the context of a `connect` stanza. +The `gateway` configuration is valid within the context of a `connect` block. Additional information about Gateway configurations can be found in Consul's [Connect Gateways](/consul/docs/connect/gateways) documentation. @@ -211,7 +211,7 @@ meta.connect.gateway_image = custom/envoy-${NOMAD_envoy_version}:latest ### Custom gateway task The task created for the gateway can be configured manually using the -[`sidecar_task`][sidecar_task] stanza. +[`sidecar_task`][sidecar_task] block. ``` connect { diff --git a/website/content/docs/job-specification/group.mdx b/website/content/docs/job-specification/group.mdx index bbb3e476d6c..a64fe832254 100644 --- a/website/content/docs/job-specification/group.mdx +++ b/website/content/docs/job-specification/group.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: group Stanza - Job Specification +page_title: group Block - Job Specification description: |- - The "group" stanza defines a series of tasks that should be co-located on the + The "group" block defines a series of tasks that should be co-located on the same Nomad client. Any task within a group will be placed on the same client. --- -# `group` Stanza +# `group` Block -The `group` stanza defines a series of tasks that should be co-located on the +The `group` block defines a series of tasks that should be co-located on the same Nomad client. Any [task][] within a group will be placed on the same client. @@ -52,7 +52,7 @@ job "docs" { - `migrate` ([Migrate][]: nil) - Specifies the group strategy for migrating off of draining nodes. Only service jobs with a count greater than - 1 support migrate stanzas. + 1 support migrate blocks. - `network` ([Network][]: <optional>) - Specifies the network requirements and configuration, including static and dynamic port allocations, @@ -64,7 +64,7 @@ job "docs" { - `restart` ([Restart][]: nil) - Specifies the restart policy for all tasks in this group. If omitted, a default policy exists for each job - type, which can be found in the [restart stanza documentation][restart]. + type, which can be found in the [restart block documentation][restart]. - `service` ([Service][]: nil) - Specifies integrations with Nomad or [Consul](/nomad/docs/configuration/consul) for service discovery. Nomad @@ -125,8 +125,8 @@ job "docs" { ## `group` Examples -The following examples only show the `group` stanzas. Remember that the -`group` stanza is only valid in the placements listed above. +The following examples only show the `group` blocks. Remember that the +`group` block is only valid in the placements listed above. ### Specifying Count @@ -175,7 +175,7 @@ group "example" { ### Network -This example shows network constraints as specified in the [network][] stanza +This example shows network constraints as specified in the [network][] block which uses the `bridge` networking mode, dynamically allocates two ports, and statically allocates one port: @@ -222,7 +222,7 @@ group "example" { ### Stop After Client Disconnect This example shows how `stop_after_client_disconnect` interacts with -other stanzas. For the `first` group, after the default 10 second +other blocks. For the `first` group, after the default 10 second [`heartbeat_grace`] window expires and 90 more seconds passes, the server will reschedule the allocation. The client will wait 90 seconds before sending a stop signal (`SIGTERM`) to the `first-task` diff --git a/website/content/docs/job-specification/index.mdx b/website/content/docs/job-specification/index.mdx index 75af37d8615..6ba2b01ff10 100644 --- a/website/content/docs/job-specification/index.mdx +++ b/website/content/docs/job-specification/index.mdx @@ -12,7 +12,7 @@ between human readable and editable, and machine-friendly. The job specification is broken down into smaller pieces, which you will find expanded in the navigation menu. We recommend getting started at the [job][] -stanza. Alternatively, you can keep reading to see a few examples. +block. Alternatively, you can keep reading to see a few examples. Nomad HCL is parsed in the command line and sent to Nomad in JSON format via the HTTP API. @@ -127,9 +127,9 @@ job "docs" { } ``` -Note that starting with Nomad 0.10, the `service` stanza can also be specified at the group level. This +Note that starting with Nomad 0.10, the `service` block can also be specified at the group level. This allows job specification authors to create and register services with Consul Connect support. A service -stanza specified at the group level must include a [connect][] stanza, like the following snippet. +block specified at the group level must include a [connect][] block, like the following snippet. ```hcl service { @@ -144,4 +144,4 @@ service { [hcl]: https://github.com/hashicorp/hcl 'HashiCorp Configuration Language' [job]: /nomad/docs/job-specification/job 'Nomad job Job Specification' -[connect]: /nomad/docs/job-specification/connect 'Connect Stanza Specification' +[connect]: /nomad/docs/job-specification/connect 'Connect Block Specification' diff --git a/website/content/docs/job-specification/job.mdx b/website/content/docs/job-specification/job.mdx index 1daf01c0fa4..d1c66c49bf5 100644 --- a/website/content/docs/job-specification/job.mdx +++ b/website/content/docs/job-specification/job.mdx @@ -1,17 +1,17 @@ --- layout: docs -page_title: job Stanza - Job Specification +page_title: job Block - Job Specification description: |- - The "job" stanza is the top-most configuration option in the job + The "job" block is the top-most configuration option in the job specification. A job is a declarative specification of tasks that Nomad should run. --- -# `job` Stanza +# `job` Block -The `job` stanza is the top-most configuration option in the job specification. +The `job` block is the top-most configuration option in the job specification. A job is a declarative specification of tasks that Nomad should run. Jobs have one or more task groups, which are themselves collections of one or more tasks. Job names are unique per [region][region] or [namespace][namespace]. @@ -91,7 +91,7 @@ job "docs" { - `migrate` ([Migrate][]: nil) - Specifies the groups strategy for migrating off of draining nodes. If omitted, a default migration strategy is - applied. Only service jobs with a count greater than 1 support migrate stanzas. + applied. Only service jobs with a count greater than 1 support migrate blocks. - `namespace` `(string: "default")` - The namespace in which to execute the job. Prior to Nomad 1.0 namespaces were Enterprise-only. @@ -126,7 +126,7 @@ job "docs" { - `vault_token` `(string: "")` - Specifies the Vault token that proves the submitter of the job has access to the specified policies in the - [`vault`][vault] stanza. This field is only used to transfer the token and is + [`vault`][vault] block. This field is only used to transfer the token and is not stored after job submission. !> It is **strongly discouraged** to place the token as a configuration @@ -146,8 +146,8 @@ job "docs" { ## `job` Examples -The following examples only show the `job` stanzas. Remember that the -`job` stanza is only valid in the placements listed above. +The following examples only show the `job` blocks. Remember that the +`job` block is only valid in the placements listed above. ### Docker Container diff --git a/website/content/docs/job-specification/lifecycle.mdx b/website/content/docs/job-specification/lifecycle.mdx index 66c31250b3e..c6dba837137 100644 --- a/website/content/docs/job-specification/lifecycle.mdx +++ b/website/content/docs/job-specification/lifecycle.mdx @@ -1,19 +1,19 @@ --- layout: docs -page_title: lifecycle Stanza - Job Specification +page_title: lifecycle Block - Job Specification description: |- - The "lifecycle" stanza configures when a task is run within the lifecycle of a + The "lifecycle" block configures when a task is run within the lifecycle of a task group --- -# `lifecycle` Stanza +# `lifecycle` Block -The `lifecycle` stanza is used to express task dependencies in Nomad by +The `lifecycle` block is used to express task dependencies in Nomad by configuring when a task is run within the lifecycle of a task group. -Main tasks are tasks that do not have a `lifecycle` stanza. Lifecycle task hooks +Main tasks are tasks that do not have a `lifecycle` block. Lifecycle task hooks specify when other tasks are run in relation to the main tasks. There are three different lifecycle hooks, indicating when a task is started: diff --git a/website/content/docs/job-specification/logs.mdx b/website/content/docs/job-specification/logs.mdx index 51c61583532..f4424c72383 100644 --- a/website/content/docs/job-specification/logs.mdx +++ b/website/content/docs/job-specification/logs.mdx @@ -1,20 +1,20 @@ --- layout: docs -page_title: logs Stanza - Job Specification +page_title: logs Block - Job Specification description: |- - The "logs" stanza configures the log rotation policy for a task's stdout and - stderr. Logging is enabled by default with sane defaults. The "logs" stanza + The "logs" block configures the log rotation policy for a task's stdout and + stderr. Logging is enabled by default with sane defaults. The "logs" block allows for finer-grained control over how Nomad handles log files. --- -# `logs` Stanza +# `logs` Block -The `logs` stanza configures the log rotation policy for a task's `stdout` and +The `logs` block configures the log rotation policy for a task's `stdout` and `stderr`. Logging is enabled by default with sane defaults (provided in the parameters section below), and there is currently no way to disable logging for -tasks. The `logs` stanza allows for finer-grained control over how Nomad handles +tasks. The `logs` block allows for finer-grained control over how Nomad handles log files. Nomad's log rotation works by writing stdout/stderr output from tasks to a file @@ -54,8 +54,8 @@ please see the [`nomad alloc logs`][logs-command] command. ## `logs` Examples -The following examples only show the `logs` stanzas. Remember that the -`logs` stanza is only valid in the placements listed above. +The following examples only show the `logs` blocks. Remember that the +`logs` block is only valid in the placements listed above. ### Configure Defaults diff --git a/website/content/docs/job-specification/meta.mdx b/website/content/docs/job-specification/meta.mdx index 9471a45a931..3e018bd36d1 100644 --- a/website/content/docs/job-specification/meta.mdx +++ b/website/content/docs/job-specification/meta.mdx @@ -1,10 +1,10 @@ --- layout: docs -page_title: meta Stanza - Job Specification -description: The "meta" stanza allows for user-defined arbitrary key-value pairs. +page_title: meta Block - Job Specification +description: The "meta" block allows for user-defined arbitrary key-value pairs. --- -# `meta` Stanza +# `meta` Block -The `meta` stanza allows for user-defined arbitrary key-value pairs. It is -possible to use the `meta` stanza at the [job][], [group][], or [task][] level. +The `meta` block allows for user-defined arbitrary key-value pairs. It is +possible to use the `meta` block at the [job][], [group][], or [task][] level. ```hcl job "docs" { @@ -45,15 +45,15 @@ Meta values are made available inside tasks as [runtime environment variables][e ## `meta` Parameters -The "parameters" for the `meta` stanza can be any key-value. The keys and values +The "parameters" for the `meta` block can be any key-value. The keys and values are both of type `string`, but they can be specified as other types. They will automatically be converted to strings. Any `-` character existing in the key will also be converted to `_`. ## `meta` Examples -The following examples only show the `meta` stanzas. Remember that the -`meta` stanza is only valid in the placements listed above. +The following examples only show the `meta` blocks. Remember that the +`meta` block is only valid in the placements listed above. ### Coercion diff --git a/website/content/docs/job-specification/migrate.mdx b/website/content/docs/job-specification/migrate.mdx index a10cb168c18..e92348246f5 100644 --- a/website/content/docs/job-specification/migrate.mdx +++ b/website/content/docs/job-specification/migrate.mdx @@ -1,13 +1,13 @@ --- layout: docs -page_title: migrate Stanza - Job Specification +page_title: migrate Block - Job Specification description: |- - The "migrate" stanza specifies the group's migrate strategy. The migrate + The "migrate" block specifies the group's migrate strategy. The migrate strategy is used to control the job's behavior when it is being migrated off of a draining node. --- -# `migrate` Stanza +# `migrate` Block -The `migrate` stanza specifies the group's strategy for migrating allocations from +The `migrate` block specifies the group's strategy for migrating allocations from [draining][drain] nodes. If omitted, a default migration strategy is applied. If specified at the job level, the configuration will apply to all groups within the job. Only service jobs with a count greater than 1 support migrate -stanzas. +blocks. ```hcl job "docs" { @@ -39,7 +39,7 @@ allocations have been healthy for their `min_healthy_time` or `healthy_deadline` is reached. Note that a node's drain [deadline][deadline] will override the `migrate` -stanza for allocations on that node. The `migrate` stanza is for job authors to +block for allocations on that node. The `migrate` block is for job authors to define how their services should be migrated, while the node drain deadline is for system operators to put hard limits on how long a drain may take. diff --git a/website/content/docs/job-specification/multiregion.mdx b/website/content/docs/job-specification/multiregion.mdx index e5a8f4a55a1..53f5ee4b4d3 100644 --- a/website/content/docs/job-specification/multiregion.mdx +++ b/website/content/docs/job-specification/multiregion.mdx @@ -1,18 +1,18 @@ --- layout: docs -page_title: multiregion Stanza - Job Specification +page_title: multiregion Block - Job Specification description: |- - The "multiregion" stanza specifies that a job will be deployed to multiple federated + The "multiregion" block specifies that a job will be deployed to multiple federated regions. --- -# `multiregion` Stanza +# `multiregion` Block -The `multiregion` stanza specifies that a job will be deployed to multiple +The `multiregion` block specifies that a job will be deployed to multiple [federated regions]. If omitted, the job will be deployed to a single region—the one specified by the `region` field or the `-region` command line flag to `nomad job run`. @@ -20,8 +20,8 @@ one specified by the `region` field or the `-region` command line flag to Federated Nomad clusters are members of the same gossip cluster but not the same raft cluster; they don't share their data stores. Each region in a multiregion deployment gets an independent copy of the job, parameterized with -the values of the `region` stanza. Nomad regions coordinate to rollout each -region's deployment using rules determined by the `strategy` stanza. +the values of the `region` block. Nomad regions coordinate to rollout each +region's deployment using rules determined by the `strategy` block. ```hcl job "docs" { @@ -58,7 +58,7 @@ begins in the `running` state, and ends in the `successful` state, the `canceled` state (if another deployment supersedes it before it it's complete), or the `failed` state. A failed single region deployment may automatically revert to the previous version of the job if its `update` -stanza has the [`auto_revert`][update-auto-revert] setting. +block has the [`auto_revert`][update-auto-revert] setting. In a multiregion deployment, regions begin in the `pending` state. This allows Nomad to determine that all regions have accepted the job before @@ -69,10 +69,10 @@ final region will unblock the regions to mark them as `successful`. ## Parameterized Dispatch -Job dispatching is region specific. While a [parameterized job] can be +Job dispatching is region specific. While a [parameterized job] can be registered in multiple [federated regions] like any other job, a parameterized job operates much like a function definition that takes variable input. -Operators are expected to invoke the job by invoking [`job dispatch`] +Operators are expected to invoke the job by invoking [`job dispatch`] from the CLI or the [HTTP API] and provide the appropriate dispatch options for that region. @@ -108,10 +108,10 @@ multiregion deployments are considered GA. - `on_failure` `(string: )` - Specifies the behavior when a region deployment fails. Available options are `"fail_all"`, `"fail_local"`, or the default (empty `""`). This field and its interactions with the job's - [`update` stanza] is described in the [examples] below. + [`update` block] is described in the [examples] below. Each region within a multiregion deployment follows the `auto_revert` - strategy of its own `update` stanza (if any). The multiregion `on_failure` + strategy of its own `update` block (if any). The multiregion `on_failure` field tells Nomad how many other regions should be marked as failed when one region's deployment fails: @@ -131,7 +131,7 @@ multiregion deployments are considered GA. ~> For `system` jobs, only [`max_parallel`](#max_parallel) is enforced. The `system` scheduler will be updated to support `on_failure` when the -[`update` stanza] is fully supported for system jobs in a future release. +[`update` block] is fully supported for system jobs in a future release. ### `region` Parameters @@ -147,17 +147,17 @@ The name of a region must match the name of one of the [federated regions]. datacenters in the region which are eligible for task placement. If not provided, the `datacenters` field of the job will be used. -- `meta` - `Meta: nil` - The meta stanza allows for user-defined arbitrary +- `meta` - `Meta: nil` - The meta block allows for user-defined arbitrary key-value pairs. The meta specified for each region will be merged with the - meta stanza at the job level. + meta block at the job level. As described above, the parameters for each region replace the default values for the field with the same name for each region. ## `multiregion` Examples -The following examples only show the `multiregion` stanza and the other -stanzas it might be interacting with. +The following examples only show the `multiregion` block and the other +blocks it might be interacting with. ### Max Parallel @@ -185,7 +185,7 @@ multiregion { This example shows the default value of `on_failure`. Because `max_parallel = 1`, the "north" region will deploy first, followed by "south", and so on. But supposing the "east" region failed, both the "east" region and the "west" -region would be marked `failed`. Because the job has an `update` stanza with +region would be marked `failed`. Because the job has an `update` block with `auto_revert=true`, both regions would then rollback to the previous job version. The "north" and "south" regions would remain `blocked` until an operator intervenes. @@ -276,7 +276,7 @@ group "worker" { ``` [federated regions]: /nomad/tutorials/manage-clusters/federation -[`update` stanza]: /nomad/docs/job-specification/update +[`update` block]: /nomad/docs/job-specification/update [update-auto-revert]: /nomad/docs/job-specification/update#auto_revert [examples]: #multiregion-examples [upgrade strategies]: /nomad/tutorials/job-updates diff --git a/website/content/docs/job-specification/network.mdx b/website/content/docs/job-specification/network.mdx index b2e9faa99fa..ada5550781d 100644 --- a/website/content/docs/job-specification/network.mdx +++ b/website/content/docs/job-specification/network.mdx @@ -1,16 +1,16 @@ --- layout: docs -page_title: network Stanza - Job Specification +page_title: network Block - Job Specification description: |- - The "network" stanza specifies the networking requirements for the task group, + The "network" block specifies the networking requirements for the task group, including networking mode and port allocations. --- -# `network` Stanza +# `network` Block -The `network` stanza specifies the networking requirements for the task group, +The `network` block specifies the networking requirements for the task group, including the network mode and port allocations. When scheduling jobs in Nomad they are provisioned across your fleet of machines along with other jobs and services. Because you don't know in advance what host your job will be @@ -38,7 +38,7 @@ job "docs" { ### Network modes -When the `network` stanza is defined with `bridge` as the networking mode, +When the `network` block is defined with `bridge` as the networking mode, all tasks in the task group share the same network namespace. This is a prerequisite for [Consul Connect](/nomad/docs/integrations/consul-connect). Tasks running within a network namespace are not visible to applications outside the namespace on the same host. @@ -128,8 +128,8 @@ These parameters support [interpolation](/nomad/docs/runtime/interpolation). ## `network` Examples -The following examples only show the `network` stanzas. Remember that the -`network` stanza is only valid in the placements listed above. +The following examples only show the `network` blocks. Remember that the +`network` block is only valid in the placements listed above. ### Dynamic Ports @@ -209,7 +209,7 @@ Bridge mode allows compatible tasks to share a networking stack and interfaces. can then do port mapping without relying on individual task drivers to implement port mapping configuration. -The following example is a group level network stanza that uses bridge mode +The following example is a group level network block that uses bridge mode and port mapping. ```hcl @@ -253,7 +253,7 @@ Nomad supports CNI by fingerprinting each node for [CNI network configurations]( These are associated to the node by the `name` field of the CNI configuration. The `name` can then be used when setting the network `mode` field in the form of `cni/`. -As an example if the following CNI configuration was on a node the proceeding network stanza could be used. +As an example if the following CNI configuration was on a node the proceeding network block could be used. ```json { @@ -290,7 +290,7 @@ network { } ``` -The Nomad client will build the correct [capabilities arguments](https://github.com/containernetworking/cni/blob/v0.8.0/CONVENTIONS.md#well-known-capabilities) for the portmap plugin based on the defined port stanzas. +The Nomad client will build the correct [capabilities arguments](https://github.com/containernetworking/cni/blob/v0.8.0/CONVENTIONS.md#well-known-capabilities) for the portmap plugin based on the defined port blocks. ### Host Networks @@ -321,7 +321,7 @@ network { ### Limitations -- Only one `network` stanza can be specified, when it is defined at the task group level. +- Only one `network` block can be specified, when it is defined at the task group level. - Only the `NOMAD_PORT_