diff --git a/config.toml b/config.toml index cadc842717dc9..6fe259ce30765 100644 --- a/config.toml +++ b/config.toml @@ -64,10 +64,10 @@ time_format_blog = "Monday, January 02, 2006" description = "Production-Grade Container Orchestration" showedit = true -latest = "v1.14" +latest = "v1.15" -fullversion = "v1.14.0" -version = "v1.14" +fullversion = "v1.15.0" +version = "v1.15" githubbranch = "master" docsbranch = "master" deprecated = false @@ -77,10 +77,10 @@ githubWebsiteRepo = "github.com/kubernetes/website" githubWebsiteRaw = "raw.githubusercontent.com/kubernetes/website" [[params.versions]] -fullversion = "v1.14.0" -version = "v1.14" -githubbranch = "v1.14.0" -docsbranch = "release-1.14" +fullversion = "v1.15.0" +version = "v1.15" +githubbranch = "v1.15.0" +docsbranch = "release-1.15" url = "https://kubernetes.io" [params.pushAssets] @@ -95,33 +95,33 @@ js = [ ] [[params.versions]] -fullversion = "v1.13.4" +fullversion = "v1.14.3" +version = "v1.14" +githubbranch = "v1.14.3" +docsbranch = "release-1.14" +url = "https://v1-14.docs.kubernetes.io" + +[[params.versions]] +fullversion = "v1.13.7" version = "v1.13" -githubbranch = "v1.13.4" +githubbranch = "v1.13.7" docsbranch = "release-1.13" url = "https://v1-13.docs.kubernetes.io" [[params.versions]] -fullversion = "v1.12.6" +fullversion = "v1.12.9" version = "v1.12" -githubbranch = "v1.12.6" +githubbranch = "v1.12.9" docsbranch = "release-1.12" url = "https://v1-12.docs.kubernetes.io" [[params.versions]] -fullversion = "v1.11.8" +fullversion = "v1.11.10" version = "v1.11" -githubbranch = "v1.11.8" +githubbranch = "v1.11.10" docsbranch = "release-1.11" url = "https://v1-11.docs.kubernetes.io" -[[params.versions]] -fullversion = "v1.10.13" -version = "v1.10" -githubbranch = "v1.10.13" -docsbranch = "release-1.10" -url = "https://v1-10.docs.kubernetes.io" - # Language definitions. [languages] diff --git a/content/en/docs/concepts/cluster-administration/cloud-providers.md b/content/en/docs/concepts/cluster-administration/cloud-providers.md index 4be46003fdca5..6ea9d9ed211e8 100644 --- a/content/en/docs/concepts/cluster-administration/cloud-providers.md +++ b/content/en/docs/concepts/cluster-administration/cloud-providers.md @@ -17,14 +17,14 @@ kubeadm has configuration options to specify configuration information for cloud in-tree cloud provider can be configured using kubeadm as shown below: ```yaml -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration nodeRegistration: kubeletExtraArgs: cloud-provider: "openstack" cloud-config: "/etc/kubernetes/cloud.conf" --- -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.13.0 apiServer: diff --git a/content/en/docs/concepts/configuration/manage-compute-resources-container.md b/content/en/docs/concepts/configuration/manage-compute-resources-container.md index 0b5b9ea00867f..95de47de74e31 100644 --- a/content/en/docs/concepts/configuration/manage-compute-resources-container.md +++ b/content/en/docs/concepts/configuration/manage-compute-resources-container.md @@ -384,6 +384,70 @@ The scheduler ensures that the sum of the resource requests of the scheduled Con For container-level isolation, if a Container's writable layer and logs usage exceeds its storage limit, the Pod will be evicted. For pod-level isolation, if the sum of the local ephemeral storage usage from all containers and also the Pod's emptyDir volumes exceeds the limit, the Pod will be evicted. +### Monitoring ephemeral-storage consumption + +When local ephemeral storage is used, it is monitored on an ongoing +basis by the kubelet. The monitoring is performed by scanning each +emptyDir volume, log directories, and writable layers on a periodic +basis. Starting with Kubernetes 1.15, emptyDir volumes (but not log +directories or writable layers) may, at the cluster operator's option, +be managed by use of [project +quotas](http://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html). +Project quotas were originally implemented in XFS, and have more +recently been ported to ext4fs. Project quotas can be used for both +monitoring and enforcement; as of Kubernetes 1.15, they are available +as alpha functionality for monitoring only. + +Quotas are faster and more accurate than directory scanning. When a +directory is assigned to a project, all files created under a +directory are created in that project, and the kernel merely has to +keep track of how many blocks are in use by files in that project. If +a file is created and deleted, but with an open file descriptor, it +continues to consume space. This space will be tracked by the quota, +but will not be seen by a directory scan. + +Kubernetes uses project IDs starting from 1048576. The IDs in use are +registered in `/etc/projects` and `/etc/projid`. If project IDs in +this range are used for other purposes on the system, those project +IDs must be registered in `/etc/projects` and `/etc/projid` to prevent +Kubernetes from using them. + +To enable use of project quotas, the cluster operator must do the +following: + +* Enable the `LocalStorageCapacityIsolationFSQuotaMonitoring=true` + feature gate in the kubelet configuration. This defaults to `false` + in Kubernetes 1.15, so must be explicitly set to `true`. + +* Ensure that the root partition (or optional runtime partition) is + built with project quotas enabled. All XFS filesystems support + project quotas, but ext4 filesystems must be built specially. + +* Ensure that the root partition (or optional runtime partition) is + mounted with project quotas enabled. + +#### Building and mounting filesystems with project quotas enabled + +XFS filesystems require no special action when building; they are +automatically built with project quotas enabled. + +Ext4fs filesystems must be built with quotas enabled, then they must +be enabled in the filesystem: + +``` +% sudo mkfs.ext4 other_ext4fs_args... -E quotatype=prjquota /dev/block_device +% sudo tune2fs -O project -Q prjquota /dev/block_device + +``` + +To mount the filesystem, both ext4fs and XFS require the `prjquota` +option set in `/etc/fstab`: + +``` +/dev/block_device /var/kubernetes_data defaults,prjquota 0 0 +``` + + ## Extended resources Extended resources are fully-qualified resource names outside the diff --git a/content/en/docs/concepts/configuration/pod-priority-preemption.md b/content/en/docs/concepts/configuration/pod-priority-preemption.md index f630476a0855b..13427915ef840 100644 --- a/content/en/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/en/docs/concepts/configuration/pod-priority-preemption.md @@ -77,6 +77,13 @@ when a cluster is under resource pressure. For this reason, it is not recommended to disable preemption. {{< /note >}} +{{< note >}} +In Kubernetes 1.15 and later, +if the feature `NonPreemptingPriority` is enabled, +PriorityClasses have the option to set `preemptionPolicy: Never`. +This will prevent pods of that PriorityClass from preempting other pods. +{{< /note >}} + In Kubernetes 1.11 and later, preemption is controlled by a kube-scheduler flag `disablePreemption`, which is set to `false` by default. If you want to disable preemption despite the above note, you can set @@ -145,6 +152,55 @@ globalDefault: false description: "This priority class should be used for XYZ service pods only." ``` +### Non-preempting PriorityClasses (alpha) {#non-preempting-priority-class} + +1.15 adds the `PreemptionPolicy` field as an alpha feature. +It is disabled by default in 1.15, +and requires the `NonPreemptingPriority`[feature gate](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ +) to be enabled. + +Pods with `PreemptionPolicy: Never` will be placed in the scheduling queue +ahead of lower-priority pods, +but they cannot preempt other pods. +A non-preempting pod waiting to be scheduled will stay in the scheduling queue, +until sufficient resources are free, +and it can be scheduled. +Non-preempting pods, +like other pods, +are subject to scheduler back-off. +This means that if the scheduler tries these pods and they cannot be scheduled, +they will be retried with lower frequency, +allowing other pods with lower priority to be scheduled before them. + +Non-preempting pods may still be preempted by other, +high-priority pods. + +`PreemptionPolicy` defaults to `PreemptLowerPriority`, +which will allow pods of that PriorityClass to preempt lower-priority pods +(as is existing default behavior). +If `PreemptionPolicy` is set to `Never`, +pods in that PriorityClass will be non-preempting. + +An example use case is for data science workloads. +A user may submit a job that they want to be prioritized above other workloads, +but do not wish to discard existing work by preempting running pods. +The high priority job with `PreemptionPolicy: Never` will be scheduled +ahead of other queued pods, +as soon as sufficient cluster resources "naturally" become free. + +#### Example Non-preempting PriorityClass + +```yaml +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority-nonpreempting +value: 1000000 +preemptionPolicy: Never +globalDefault: false +description: "This priority class will not cause other pods to be preempted." +``` + ## Pod priority After you have one or more PriorityClasses, you can create Pods that specify one diff --git a/content/en/docs/concepts/configuration/scheduling-framework.md b/content/en/docs/concepts/configuration/scheduling-framework.md new file mode 100644 index 0000000000000..c83a026b4eac0 --- /dev/null +++ b/content/en/docs/concepts/configuration/scheduling-framework.md @@ -0,0 +1,296 @@ +--- +reviewers: +- ahg-g +title: Scheduling Framework +content_template: templates/concept +weight: 70 +--- + +{{% capture overview %}} + +{{< feature-state for_k8s_version="1.15" state="alpha" >}} + +The scheduling framework is a new plugable architecture for Kubernetes Scheduler +that makes scheduler customizations easy. It adds a new set of "plugin" APIs to +the existing scheduler. Plugins are compiled into the scheduler. The APIs +allow most scheduling features to be implemented as plugins, while keeping the +scheduling "core" simple and maintainable. Refer to the [design proposal of the +scheduling framework][kep] for more technical information on the design of the +framework. + +[kep]: (https://github.com/kubernetes/enhancements/blob/master/keps/sig-scheduling/20180409-scheduling-framework.md) + +{{% /capture %}} + +{{% capture body %}} + +# Framework workflow + +The Scheduling Framework defines a few extension points. Scheduler plugins +register to be invoked at one or more extension points. Some of these plugins +can change the scheduling decisions and some are informational only. + +Each attempt to schedule one Pod is split into two phases, the **scheduling +cycle** and the **binding cycle**. + +## Scheduling Cycle & Binding Cycle + +The scheduling cycle selects a node for the Pod, and the binding cycle applies +that decision to the cluster. Together, a scheduling cycle and binding cycle are +referred to as a "scheduling context". + +Scheduling cycles are run serially, while binding cycles may run concurrently. + +A scheduling or binding cycle can be aborted if the Pod is determined to +be unschedulable or if there is an internal error. The Pod will be returned to +the queue and retried. + +## Extension points + +The following picture shows the scheduling context of a Pod and the extension +points that the scheduling framework exposes. In this picture "Filter" is +equivalent to "Predicate" and "Scoring" is equivalent to "Priority function". + +One plugin may register at multiple extension points to perform more complex or +stateful tasks. + +{{< figure src="/images/docs/scheduling-framework.png" title="scheduling framework extension points" >}} + +### Queue sort + +These plugins are used to sort Pods in the scheduling queue. A queue sort plugin +essentially will provide a "less(Pod1, Pod2)" function. Only one queue sort +plugin may be enabled at a time. + +### Pre-filter + +These plugins are used to pre-process info about the Pod, or to check certain +conditions that the cluster or the Pod must meet. If a pre-filter plugin returns +an error, the scheduling cycle is aborted. + +### Filter + +These plugins are used to filter out nodes that cannot run the Pod. For each +node, the scheduler will call filter plugins in their configured order. If any +filter plugin marks the node as infeasible, the remaining plugins will not be +called for that node. Nodes may be evaluated concurrently. + +### Post-filter + +This is an informational extension point. Plugins will be called with a list of +nodes that passed the filtering phase. A plugin may use this data to update +internal state or to generate logs/metrics. + +**Note:** Plugins wishing to perform "pre-scoring" work should use the +post-filter extension point. + +### Scoring + +These plugins are used to rank nodes that have passed the filtering phase. The +scheduler will call each scoring plugin for each node. There will be a well +defined range of integers representing the minimum and maximum scores. After the +[normalize scoring](#normalize-scoring) phase, the scheduler will combine node +scores from all plugins according to the configured plugin weights. + +### Normalize scoring + +These plugins are used to modify scores before the scheduler computes a final +ranking of Nodes. A plugin that registers for this extension point will be +called with the [scoring](#scoring) results from the same plugin. This is called +once per plugin per scheduling cycle. + +For example, suppose a plugin `BlinkingLightScorer` ranks Nodes based on how +many blinking lights they have. + +```go +func ScoreNode(_ *v1.pod, n *v1.Node) (int, error) { + return getBlinkingLightCount(n) +} +``` + +However, the maximum count of blinking lights may be small compared to +`NodeScoreMax`. To fix this, `BlinkingLightScorer` should also register for this +extension point. + +```go +func NormalizeScores(scores map[string]int) { + highest := 0 + for _, score := range scores { + highest = max(highest, score) + } + for node, score := range scores { + scores[node] = score*NodeScoreMax/highest + } +} +``` + +If any normalize-scoring plugin returns an error, the scheduling cycle is +aborted. + +**Note:** Plugins wishing to perform "pre-reserve" work should use the +normalize-scoring extension point. + +### Reserve + +This is an informational extension point. Plugins which maintain runtime state +(aka "stateful plugins") should use this extension point to be notified by the +scheduler when resources on a node are being reserved for a given Pod. This +happens before the scheduler actually binds the Pod to the Node, and it exists +to prevent race conditions while the scheduler waits for the bind to succeed. + +This is the last step in a scheduling cycle. Once a Pod is in the reserved +state, it will either trigger [Un-reserve](#un-reserve) plugins (on failure) or +[Post-bind](#post-bind) plugins (on success) at the end of the binding cycle. + +*Note: This concept used to be referred to as "assume".* + +### Permit + +These plugins are used to prevent or delay the binding of a Pod. A permit plugin +can do one of three things. + +1. **approve** \ + Once all permit plugins approve a Pod, it is sent for binding. + +1. **deny** \ + If any permit plugin denies a Pod, it is returned to the scheduling queue. + This will trigger [Un-reserve](#un-reserve) plugins. + +1. **wait** (with a timeout) \ + If a permit plugin returns "wait", then the Pod is kept in the permit phase + until a [plugin approves it](#frameworkhandle). If a timeout occurs, **wait** + becomes **deny** and the Pod is returned to the scheduling queue, triggering + [un-reserve](#un-reserve) plugins. + +**Approving a Pod binding** + +While any plugin can access the list of "waiting" Pods from the cache and +approve them (see [`FrameworkHandle`](#frameworkhandle)) we expect only the permit +plugins to approve binding of reserved Pods that are in "waiting" state. Once a +Pod is approved, it is sent to the pre-bind phase. + +### Pre-bind + +These plugins are used to perform any work required before a Pod is bound. For +example, a pre-bind plugin may provision a network volume and mount it on the +target node before allowing the Pod to run there. + +If any pre-bind plugin returns an error, the Pod is [rejected](#un-reserve) and +returned to the scheduling queue. + +### Bind + +These plugins are used to bind a Pod to a Node. Bind plugins will not be called +until all pre-bind plugins have completed. Each bind plugin is called in the +configured order. A bind plugin may choose whether or not to handle the given +Pod. If a bind plugin chooses to handle a Pod, **the remaining bind plugins are +skipped**. + +### Post-bind + +This is an informational extension point. Post-bind plugins are called after a +Pod is successfully bound. This is the end of a binding cycle, and can be used +to clean up associated resources. + +### Unreserve + +This is an informational extension point. If a Pod was reserved and then +rejected in a later phase, then unreserve plugins will be notified. Unreserve +plugins should clean up state associated with the reserved Pod. + +Plugins that use this extension point usually should also use +[Reserve](#reserve). + +## Plugin API + +There are two steps to the plugin API. First, plugins must register and get +configured, then they use the extension point interfaces. Extension point +interfaces have the following form. + +```go +type Plugin interface { + Name() string +} + +type QueueSortPlugin interface { + Plugin + Less(*v1.pod, *v1.pod) bool +} + +type PreFilterPlugin interface { + Plugin + PreFilter(PluginContext, *v1.pod) error +} + +// ... +``` + +# Plugin Configuration + +Plugins can be enabled in the scheduler configuration. Also, default plugins can +be disabled in the configuration. In 1.15, there are no default plugins for the +scheduling framework. + +The scheduler configuration can include configuration for plugins as well. Such +configurations are passed to the plugins at the time the scheduler initializes +them. The configuration is an arbitrary value. The receiving plugin should +decode and process the configuration. + +The following example shows a scheduler configuration that enables some +plugins at `reserve` and `preBind` extension points and disables a plugin. It +also provides a configuration to plugin `foo`. + +```yaml +apiVersion: kubescheduler.config.k8s.io/v1alpha1 +kind: KubeSchedulerConfiguration + +... + +plugins: + reserve: + enabled: + - name: foo + - name: bar + disabled: + - name: baz + preBind: + enabled: + - name: foo + disabled: + - name: baz + +pluginConfig: +- name: foo + args: > + Arbitrary set of args to plugin foo +``` + +When an extension point is omitted from the configuration default plugins for +that extension points are used. When an extension point exists and `enabled` is +provided, the `enabled` plugins are called in addition to default plugins. +Default plugins are called first and then the additional enabled plugins are +called in the same order specified in the configuration. If a different order of +calling default plugins is desired, default plugins must be `disabled` and +`enabled` in the desired order. + +Assuming there is a default plugin called `foo` at `reserve` and we are adding +pluing `bar` that we want to be invoked before `foo`, we should disable `foo` +and enable `bar` and `foo` in order. The following example shows the +configuration that achieves this: + +```yaml +apiVersion: kubescheduler.config.k8s.io/v1alpha1 +kind: KubeSchedulerConfiguration + +... + +plugins: + reserve: + enabled: + - name: bar + - name: foo + disabled: + - name: foo +``` + +{{% /capture %}} diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index fc70413535713..33143c04a5b6a 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -174,7 +174,7 @@ Aggregated APIs offer more advanced API features and customization of other feat | Feature | Description | CRDs | Aggregated API | | ------- | ----------- | ---- | -------------- | | Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Yes. Most validation can be specified in the CRD using [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation). Any other validations supported by addition of a [Validating Webhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9). | Yes, arbitrary validation checks | -| Defaulting | See above | Yes, via a [Mutating Webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook-beta-in-1-9); Planned, via CRD OpenAPI schema. | Yes | +| Defaulting | See above | Yes, either via [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#defaulting) `default` keyword (alpha in 1.15), or via a [Mutating Webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook-beta-in-1-9) | Yes | | Multi-versioning | Allows serving the same object through two API versions. Can help ease API changes like renaming fields. Less important if you control your client versions. | [Yes](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning) | Yes | | Custom Storage | If you need storage with a different performance mode (for example, time-series database instead of key-value store) or isolation for security (for example, encryption secrets or different | No | Yes | | Custom Business Logic | Perform arbitrary checks or actions when creating, reading, updating or deleting an object | Yes, using [Webhooks](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks). | Yes | @@ -183,7 +183,7 @@ Aggregated APIs offer more advanced API features and customization of other feat | Other Subresources | Add operations other than CRUD, such as "logs" or "exec". | No | Yes | | strategic-merge-patch | The new endpoints support PATCH with `Content-Type: application/strategic-merge-patch+json`. Useful for updating objects that may be modified both locally, and by the server. For more information, see ["Update API Objects in Place Using kubectl patch"](/docs/tasks/run-application/update-api-object-kubectl-patch/) | No | Yes | | Protocol Buffers | The new resource supports clients that want to use Protocol Buffers | No | Yes | -| OpenAPI Schema | Is there an OpenAPI (swagger) schema for the types that can be dynamically fetched from the server? Is the user protected from misspelling field names by ensuring only allowed fields are set? Are types enforced (in other words, don't put an `int` in a `string` field?) | No, but planned | Yes | +| OpenAPI Schema | Is there an OpenAPI (swagger) schema for the types that can be dynamically fetched from the server? Is the user protected from misspelling field names by ensuring only allowed fields are set? Are types enforced (in other words, don't put an `int` in a `string` field?) | Yes, based on the [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) schema (beta in 1.15) | Yes | ### Common Features diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 729e421ba5906..749d1adfce0c2 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -164,7 +164,7 @@ DaemonSet, `/var/lib/kubelet/pod-resources` must be mounted as a in the plugin's [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). -Support for the "PodResources service" is still in alpha. +Support for the "PodResources service" is in beta, and is enabled by default. ## Examples diff --git a/content/en/docs/concepts/policy/resource-quotas.md b/content/en/docs/concepts/policy/resource-quotas.md index dee980efe14d2..3150d5cf2a982 100644 --- a/content/en/docs/concepts/policy/resource-quotas.md +++ b/content/en/docs/concepts/policy/resource-quotas.md @@ -134,6 +134,9 @@ Here is an example set of resources users may want to put under object count quo * `count/cronjobs.batch` * `count/deployments.extensions` +The 1.15 release added support for custom resources using the same syntax. +For example, to create a quota on a `widgets` custom resource in the `example.com` API group, use `count/widgets.example.com`. + When using `count/*` resource quota, an object is charged against the quota if it exists in server storage. These types of quotas are useful to protect against exhaustion of storage resources. For example, you may want to quota the number of secrets in a server given their large size. Too many secrets in a cluster can diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index ad3344beffa27..087774f65c0c0 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -18,7 +18,7 @@ weight: 10 No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, -and can load-balance across them. +:wqaand can load-balance across them. {{% /capture %}} @@ -805,15 +805,11 @@ There are other annotations to manage Classic Elastic Load Balancers that are de # A list of additional security groups to be added to the ELB ``` -#### Network Load Balancer support on AWS [alpha] {#aws-nlb-support} +#### Network Load Balancer support on AWS -{{< warning >}} -This is an alpha feature and is not yet recommended for production clusters. -{{< /warning >}} +{{< feature-state for_k8s_version="v1.15" state="beta" >}} -Starting from Kubernetes v1.9.0, you can use AWS Network Load Balancer (NLB) with Services. To -use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` -with the value set to `nlb`. +To use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` with the value set to `nlb`. ```yaml metadata: diff --git a/content/en/docs/concepts/services-networking/service.md.orig b/content/en/docs/concepts/services-networking/service.md.orig new file mode 100644 index 0000000000000..c46d4478a793b --- /dev/null +++ b/content/en/docs/concepts/services-networking/service.md.orig @@ -0,0 +1,1147 @@ +--- +reviewers: +- bprashanth +title: Service +feature: + title: Service discovery and load balancing + description: > + No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, and can load-balance across them. + +content_template: templates/concept +weight: 10 +--- + + +{{% capture overview %}} + +{{< glossary_definition term_id="service" length="short" >}} + +No need to modify your application to use an unfamiliar service discovery mechanism. +Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, +and can load-balance across them. + +{{% /capture %}} + +{{% capture body %}} + +## Motivation + +Kubernetes {{< glossary_tooltip term_id="pod" text="Pods" >}} are mortal. +They are born and when they die, they are not resurrected. +If you use a {{< glossary_tooltip term_id="deployment" >}} to run your app, +it can create and destroy Pods dynamically (e.g. when scaling out or in). + +Each Pod gets its own IP address, however the set of Pods +for a Deployment running in one moment in time could be different from +the set of Pods running that application a moment later. + +This leads to a problem: if some set of Pods (call them “backends”) provides +functionality to other Pods (call them “frontends”) inside your cluster, +how do those frontends find out and keep track of which IP address to connect +to, so that the frontend can use the backend part of the workload? + +Enter _Services_. + +## Service resources {#service-resource} + +In Kubernetes, a Service is an abstraction which defines a logical set of Pods +and a policy by which to access them (you'll sometimes see this pattern called +a micro-service). The set of Pods targeted by a Service is usually determined +by a {{< glossary_tooltip text="selector" term_id="selector" >}} +(see [below](#services-without-selectors) for why you might want a Service +_without_ a selector). + +For example: consider a stateless image-processing backend which is running with +3 replicas. Those replicas are fungible—frontends do not care which backend +they use. While the actual Pods that compose the backend set may change, the +frontend clients should not need to be aware of that, nor should they need to keep +track of the set of backends themselves. + +The Service abstraction enables this decoupling. + +### Cloud-native service discovery + +If you're able to use Kubernetes APIs for service discovery in your application, +you can query the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} +for Endpoints, that will be updated whenever the set of Pods in a Service changes. + +For non-native applications, Kubernetes offers ways to place a network port or load +balancer in between your application and the backend Pods. + +## Defining a service + +A Service in Kubernetes is a REST object, similar to a Pod. Like all of the +REST objects, you can `POST` a Service definition to the API server to create +a new instance. + +For example, suppose you have a set of Pods that each listen on TCP port 9376 +and carry a label `"app=MyApp"`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +This specification will create a new Service object named “my-service” which +targets TCP port 9376 on any Pod with the `"app=MyApp"` label. + +This Service will also be assigned an IP address (sometimes called the "cluster IP"), +which is used by the service proxies +(see [Virtual IPs and service proxies](#virtual-ips-and-service-proxies) below). + +The controller for the Service selector will continuously scan for Pods that +match its selector, and will then POST any updates to an Endpoint object +also named “my-service”. + +{{< note >}} +A Service can map _any_ incoming `port` to a `targetPort`. By default, and +for convenience, the `targetPort` will be set to the same value as the `port` +field. +{{< /note >}} + +Port definitions in Pods have names, and you can reference these names in the +targetPort attribute of a Service. This will work even if there are a mixture +of Pods in the Service, with the same network protocol available via different +port numbers but a single configured name. +This offers a lot of flexibility for deploying and evolving your Services. +For example, you can change the port number that pods expose in the next +version of your backend software, without breaking clients. + +The default protocol for services is TCP; you can also use any other +[supported protocol](#protocol-support). + +As many Services need to expose more than one port, Kubernetes supports multiple +port definitions on a Service object. +Each port definition can have the same `protocol`, or a different one. + +### Services without selectors + +Services most commonly abstract access to Kubernetes Pods, but they can also +abstract other kinds of backends. For example: + + * You want to have an external database cluster in production, but in your + test environment you use your own databases. + * You want to point your service to a service in a different + {{< glossary_tooltip term_id="namespace" >}} or on another cluster. + * You are migrating a workload to Kubernetes. Whilst evaluating the approach, + you run only a proportion of your backends in Kubernetes. + +In any of these scenarios you can define a service _without_ a Pod selector. +For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +Because this service has no selector, the corresponding Endpoint object will *not* be +created automatically. You can manually map the service to the network address and port +where it's running, by adding an Endpoint object manually: + +```yaml +apiVersion: v1 +kind: Endpoints +metadata: + name: my-service +subsets: + - addresses: + - ip: 192.0.2.42 + ports: + - port: 9376 +``` + +{{< note >}} +The endpoint IPs _must not_ be: loopback (127.0.0.0/8 for IPv4, ::1/128 for IPv6), or +link-local (169.254.0.0/16 and 224.0.0.0/24 for IPv4, fe80::/64 for IPv6). + +Endpoint IP addresses also cannot be the cluster IPs of other Kubernetes services, +because {{< glossary_tooltip term_id="kube-proxy" >}} doesn't support virtual IPs +as a destination. +{{< /note >}} + +Accessing a Service without a selector works the same as if it had a selector. +In the example above, traffic will be routed to the single endpoint defined in +the YAML: `192.0.2.42:9376` (TCP). + +An ExternalName Service is a special case of service that does not have +selectors and uses DNS names instead. For more information, see the +[ExternalName](#externalname) section later in this document. + +## Virtual IPs and service proxies + +Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is +responsible for implementing a form of virtual IP for `Services` of type other +than [`ExternalName`](#externalname). + +### Why not use round-robin DNS? + +A question that pops up every now and then is why Kubernetes relies on +proxying to forward inbound traffic to backends. What about other +approaches? For example, would it be possible to configure DNS records that +have multiple A values (or AAAA for IPv6), and rely on round-robin name +resolution? + +There are a few reasons for using proxying for Services: + + * There is a long history of DNS implementations not respecting record TTLs, + and caching the results of name lookups after they should have expired. + * Some apps do DNS lookups only once and cache the results indefinitely. + * Even if apps and libraries did proper re-resolution, the low or zero TTLs + on the DNS records could impose a high load on DNS that then becomes + difficult to manage. + +### Version compatibility + +Since Kubernetes v1.0 you have been able to use the +[userspace proxy mode](#proxy-mode-userspace). +Kubernetes v1.1 added iptables mode proxying, and in Kubernetes v1.2 the +iptables mode for kube-proxy became the default. +Kubernetes v1.8 added ipvs proxy mode. + +### User space proxy mode {#proxy-mode-userspace} + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of Service and Endpoint objects. For each Service it opens a +port (randomly chosen) on the local node. Any connections to this "proxy port" +will be proxied to one of the Service's backend Pods (as reported via +Endpoints). kube-proxy takes the `SessionAffinity` setting of the Service into +account when deciding which backend Pod to use. + +Lastly, the user-space proxy installs iptables rules which capture traffic to +the Service's `clusterIP` (which is virtual) and `port`. The rules +redirect that traffic to the proxy port which proxies the backend Pod. + +By default, kube-proxy in userspace mode chooses a backend via a round-robin algorithm. + +![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) + +### `iptables` proxy mode {#proxy-mode-iptables} + +In this mode, kube-proxy watches the Kubernetes control plane for the addition and +removal of Service and Endpoint objects. For each Service, it installs +iptables rules which capture traffic to the Service's `clusterIP` (which is +virtual) and `port` and redirects that traffic to one of the Service's +backend sets. For each Endpoint object, it installs iptables rules which +select a backend Pod. + +By default, kube-proxy in iptables mode chooses a backend at random. + +Using iptables to handle traffic has a lower system overhead, because traffic +is handled by Linux netfilter without the need switch between userspace and the +kernel space. This approach is also likely to be more reliable. + +If kube-proxy is running in iptables mode and the first Pod that's selected +does not respond, the connection will fail. This is different from userspace +mode: in that scenario, kube-proxy would detect that the connection to the first +Pod had failed and would automatically retry with a different backend Pod. + +You can use Pod [readiness probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes) +to verify that backend Pods are working OK, so that kube-proxy in iptables mode +only sees backends that test out as healthy. Doing this means you avoid +having traffic sent via kube-proxy to a Pod that's known to have failed. + +![Services overview diagram for iptables proxy](/images/docs/services-iptables-overview.svg) + +### IPVS proxy mode {#proxy-mode-ipvs} + +{{< feature-state for_k8s_version="v1.11" state="stable" >}} + +In `ipvs` mode, kube-proxy watches Kubernetes Services and Endpoints, +calls `netlink` interface to create IPVS rules accordingly and synchronizes +IPVS rules with Kubernetes Services and Endpoints periodically. +This control loop ensures that IPVS status matches the desired +state. +When accessing a Service, IPVS will direct traffic to one of the backend Pods. + +The IPVS proxy mode is based on netfilter hook function that is similar to +iptables mode, but uses hash table as the underlying data structure and works +in the kernel space. +That means kube-proxy in IPVS mode redirects traffic with a lower latency than +kube-proxy in iptables mode, with much better performance when synchronising +proxy rules. Compared to the other proxy modes, IPVS mode also supports a +higher throughput of network traffic. + +IPVS provides more options for balancing traffic to backend Pods; +these are: + +- `rr`: round-robin +- `lc`: least connection (smallest number of open connections) +- `dh`: destination hashing +- `sh`: source hashing +- `sed`: shortest expected delay +- `nq`: never queue + +{{< note >}} +To run kube-proxy in IPVS mode, you must make the IPVS Linux available on +the node before you starting kube-proxy. + +When kube-proxy starts in IPVS proxy mode, it will verify whether IPVS +kernel modules are available, and if those are not detected then kube-proxy +fall back to running in iptables proxy mode. +{{< /note >}} + +![Services overview diagram for IPVS proxy](/images/docs/services-ipvs-overview.svg) + +In any of these proxy models, any traffic bound for the Service’s IP:Port is +proxied to an appropriate backend without the clients knowing anything +about Kubernetes or Services or Pods. + +If you want to make sure that connections from a particular client +are passed to the same Pod each time, you can select session affinity based +the on client's IP addresses by setting `service.spec.sessionAffinity` to "ClientIP" +(the default is "None"). +You can then also set the maximum session sticky time by setting +`service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` appropriately. +(the default value is 10800, which works out to be 3 hours). + +## Multi-Port Services + +For some Services, you need to expose more than one port. +Kubernetes lets you configure multiple port definitions on a Service object. +When using multiple ports for a Service, you must give all of your ports names +so that these are unambiguous. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + - name: https + protocol: TCP + port: 443 + targetPort: 9377 +``` + +{{< note >}} +As with Kubernetes {{< glossary_tooltip term_id="name" text="names">}} in general, names for ports +must only contain lowercase alphanumeric characters and `-`. Port names must +also start and end with an alphanumeric character. + +For example, the names `123-abc` and `web` are valid, but `123_abc` and `-web` are not. +{{< /note >}} + +## Choosing your own IP address + +You can specify your own cluster IP address as part of a `Service` creation +request. To do this, set the `.spec.clusterIP` field. For example, if you +already have an existing DNS entry that you wish to reuse, or legacy systems +that are configured for a specific IP address and difficult to re-configure. + +The IP address that you choose must be a valid IPv4 or IPv6 address from within the +`service-cluster-ip-range` CIDR range that is configured for the API server. +If you try to create a Service with an invalid clusterIP address value, the API +server will returns a 422 HTTP status code to indicate that there's a problem. + +## Discovering services + +Kubernetes supports 2 primary modes of finding a Service - environment +variables and DNS. + +### Environment variables + +When a Pod is run on a Node, the kubelet adds a set of environment variables +for each active Service. It supports both [Docker links +compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see +[makeLinkVariables](http://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, +where the Service name is upper-cased and dashes are converted to underscores. + +For example, the Service `"redis-master"` which exposes TCP port 6379 and has been +allocated cluster IP address 10.0.0.11 produces the following environment +variables: + +```shell +REDIS_MASTER_SERVICE_HOST=10.0.0.11 +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_PORT=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP_PROTO=tcp +REDIS_MASTER_PORT_6379_TCP_PORT=6379 +REDIS_MASTER_PORT_6379_TCP_ADDR=10.0.0.11 +``` + +{{< note >}} +When you have a Pod that might need to acccess a Service, and you are using +the environment variable method to publish the port and cluster IP to the client +Pods, you must create the Service *before* the client Pods come into existence. +Otherwise, those client Pods won't have their environment variables populated. + +If you only use DNS to discover the cluster IP for a Service, you don't need to +worry about this ordering issue. +{{< /note >}} + +### DNS + +You can (and almost always should) set up a DNS service for your Kubernetes +cluster using an [add-on](/docs/concepts/cluster-administration/addons/). + +A cluster-aware DNS server, such as CoreDNS, watches the Kubernetes API for new +Services and creates a set of DNS records for each one. If DNS has been enabled +throughout your cluster then all Pods should automatically be able to resolve +Services by their DNS name. + +For example, if you have a Service called `"my-service"` in a Kubernetes +Namespace `"my-ns"`, the control plane and the DNS service acting together will +create a DNS record for `"my-service.my-ns"`. Pods in the `"my-ns"` Namespace +should be able to find it by simply doing a name lookup for `my-service` +(`"my-service.my-ns"` would also work). + +Pods in other Namespaces must qualify the name as `my-service.my-ns`. These names +will resolve to the cluster IP assigned for the Service. + +Kubernetes also supports DNS SRV (service) records for named ports. If the +`"my-service.my-ns"` Service has a port named `"http"` with protocol set to +`TCP`, you can do a DNS SRV query for `_http._tcp.my-service.my-ns` to discover +the port number for `"http"`, as well as the IP address. + +The Kubernetes DNS server is the only way to access `ExternalName` Services. +You can find more information about `ExternalName` resolution in +[DNS Pods and Services](/docs/concepts/services-networking/dns-pod-service/). + +## Headless services + +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create what are termed “headless” Services, by explicitly +specifying `"None"` for the cluster IP (`.spec.clusterIP`). + +You can use a headless Service to interface with other service discovery mechanisms, +without being tied to Kubernetes' implementation. For example, you could implement +a custom [Operator]( +be built upon this API. + +For such `Services`, a cluster IP is not allocated, kube-proxy does not handle +these services, and there is no load balancing or proxying done by the platform +for them. How DNS is automatically configured depends on whether the service has +selectors defined. + +### With selectors + +For headless services that define selectors, the endpoints controller creates +`Endpoints` records in the API, and modifies the DNS configuration to return A +records (addresses) that point directly to the `Pods` backing the `Service`. + +### Without selectors + +For headless services that do not define selectors, the endpoints controller does +not create `Endpoints` records. However, the DNS system looks for and configures +either: + + * CNAME records for [`ExternalName`](#externalname)-type services. + * A records for any `Endpoints` that share a name with the service, for all + other types. + +## Publishing services (ServiceTypes) {#publishing-services-service-types} + +For some parts of your application (e.g. frontends) you may want to expose a +Service onto an external IP address, one that's outside of your cluster. + +Kubernetes `ServiceTypes` allow you to specify what kind of service you want. +The default is `ClusterIP`. + +`Type` values and their behaviors are: + + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the + default `ServiceType`. + * [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port + (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will + route, is automatically created. You'll be able to contact the `NodePort` service, + from outside the cluster, + by requesting `:`. + * [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud + provider's load balancer. `NodePort` and `ClusterIP` services, to which the external + load balancer will route, are automatically created. + * [`ExternalName`](#externalname): Maps the service to the contents of the + `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record + with its value. No proxying of any kind is set up. + +{{< note >}} + +You need CoreDNS version 1.7 or higher to use the `ExternalName` type. + +{{< /note >}} + +### Type NodePort {#nodeport} + +If you set the `type` field to `NodePort`, the Kubernetes control plane will +allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767). +Each node will proxy that port each (the same port number on every Node) into your Service. +Your service will report that allocated port in its `.spec.ports[*].nodePort` field. + + +If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s); this is supported since Kubernetes v1.10. +This flag takes a comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 192.0.2.0/25) to specify IP address ranges that kube-proxy should consider as local to this node. + +For example, if you start kube-proxy with flag `--nodeport-addresses=127.0.0.0/8`, kube-proxy will select only the loopback interface for NodePort Services. The default for `--nodeport-addresses` is an empty list, and means that kube-proxy should consider all available network interfaces for NodePort. (That's also compatible with earlier Kubernetes releases). + +If you want a specific port number, you can specify a value in the `nodePort` +field. The control plane will either allocate you that port or report that +the API transaction failed. +This means that you need to take care about possible port collisions yourself). +You also have to use a valid port number, one that's inside the range configured +for NodePort use. + +Using a NodePort gives you the freedom to set up your own load balancing solution, +to configure environments that are not fully supported by Kubernetes, or even +to just expose one or more nodes' IPs directly. + +Note that this Service will be visible as both `:spec.ports[*].nodePort` +and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) + +### Type LoadBalancer {#loadbalancer} + +On cloud providers which support external load balancers, setting the `type` +field to `LoadBalancer` will provision a load balancer for your Service. +The actual creation of the load balancer happens asynchronously, and +information about the provisioned balancer will be published in the Service's +`.status.loadBalancer` field. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + clusterIP: 10.0.171.239 + loadBalancerIP: 78.11.24.19 + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 146.148.47.155 +``` + +Traffic from the external load balancer will be directed at the backend Pods, +though exactly how that works depends on the cloud provider. + +Some cloud providers allow you to specify the `loadBalancerIP`. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +the loadBalancer will be set up with an ephemeral IP address. If you specify a `loadBalancerIP` +but your cloud provider does not support the feature, the `loadbalancerIP` field that you +set will be ignored. + +{{< note >}} +If you're using SCTP, see the [caveat](#caveat-sctp-loadbalancer-service-type) below about the +`LoadBalancer` Service type. +{{< /note >}} + +{{< note >}} + +On **Azure**, if you want to use a user-specified public type `loadBalancerIP`, you first need +to create a static type public IP address resource. This public IP address resource should +be in the same resource group of the other automatically created resources of the cluster. +For example, `MC_myResourceGroup_myAKSCluster_eastus`. + +Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357). + +{{< /note >}} + +#### Internal load balancer +In a mixed environment it is sometimes necessary to route traffic from services inside the same +(virtual) network address block. + +In a split-horizon DNS environment you would need two services to be able to route both external and internal traffic to your endpoints. + +You can achieve this by adding one the following annotations to a Service. +The annotation to add depends on the cloud service provider you're using. + +{{< tabs name="service_tabs" >}} +{{% tab name="Default" %}} +Select one of the tabs. +{{% /tab %}} +{{% tab name="GCP" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + cloud.google.com/load-balancer-type: "Internal" +[...] +``` +Use `cloud.google.com/load-balancer-type: "internal"` for masters with version 1.7.0 to 1.7.3. +For more information, see the [docs](https://cloud.google.com/kubernetes-engine/docs/internal-load-balancing). +{{% /tab %}} +{{% tab name="AWS" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +[...] +``` +{{% /tab %}} +{{% tab name="Azure" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +[...] +``` +{{% /tab %}} +{{% tab name="OpenStack" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" +[...] +``` +{{% /tab %}} +{{% tab name="Baidu Cloud" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" +[...] +``` +{{% /tab %}} +{{< /tabs >}} + + +#### TLS support on AWS {#ssl-support-on-aws} + +For partial TLS / SSL support on clusters running on AWS, you can add three +annotations to a `LoadBalancer` service: + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 +``` + +The first specifies the ARN of the certificate to use. It can be either a +certificate from a third party issuer that was uploaded to IAM or one created +within AWS Certificate Manager. + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp) +``` + +The second annotation specifies which protocol a Pod speaks. For HTTPS and +SSL, the ELB will expect the Pod to authenticate itself over the encrypted +connection, using a certificate. + +HTTP and HTTPS will select layer 7 proxying: the ELB will terminate +the connection with the user, parse headers and inject the `X-Forwarded-For` +header with the user's IP address (Pods will only see the IP address of the +ELB at the other end of its connection) when forwarding requests. + +TCP and SSL will select layer 4 proxying: the ELB will forward traffic without +modifying the headers. + +In a mixed-use environment where some ports are secured and others are left unencrypted, +you can use the following annotations: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443" +``` + +In the above example, if the service contained three ports, `80`, `443`, and +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just +be proxied HTTP. + +From Kubernetes v1.9 onwrds you can use [predefined AWS SSL policies](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) with HTTPS or SSL listeners for your Services. +To see which policies are available for use, you can the `aws` command line tool: + +```bash +aws elb describe-load-balancer-policies --query 'PolicyDescriptions[].PolicyName' +``` + +You can then specify any one of those policies using the +"`service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`" +annotation; for example: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" +``` + +#### PROXY protocol support on AWS + +To enable [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) +support for clusters running on AWS, you can use the following service +annotation: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" +``` + +Since version 1.3.0, the use of this annotation applies to all ports proxied by the ELB +and cannot be configured otherwise. + +#### ELB Access Logs on AWS + +There are several annotations to manage access logs for ELB services on AWS. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` +controls whether access logs are enabled. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval` +controls the interval in minutes for publishing the access logs. You can specify +an interval of either 5 or 60 minutes. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` +controls the name of the Amazon S3 bucket where load balancer access logs are +stored. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` +specifies the logical hierarchy you created for your Amazon S3 bucket. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" + # Specifies whether access logs are enabled for the load balancer + service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" + # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" + # The name of the Amazon S3 bucket where the access logs are stored + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" + # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` +``` + +#### Connection Draining on AWS + +Connection draining for Classic ELBs can be managed with the annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled` set +to the value of `"true"`. The annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout` can +also be used to set maximum time, in seconds, to keep the existing connections open before deregistering the instances. + + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60" +``` + +#### Other ELB annotations + +There are other annotations to manage Classic Elastic Load Balancers that are described below. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" + # The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer + + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # Specifies whether cross-zone load balancing is enabled for the load balancer + + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops" + # A comma-separated list of key-value pairs which will be recorded as + # additional tags in the ELB. + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: "" + # The number of successive successful health checks required for a backend to + # be considered healthy for traffic. Defaults to 2, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3" + # The number of unsuccessful health checks required for a backend to be + # considered unhealthy for traffic. Defaults to 6, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20" + # The approximate interval, in seconds, between health checks of an + # individual instance. Defaults to 10, must be between 5 and 300 + service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5" + # The amount of time, in seconds, during which no response means a failed + # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval + # value. Defaults to 5, must be between 2 and 60 + + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" + # A list of additional security groups to be added to the ELB +``` + +#### Network Load Balancer support on AWS + +{{< feature-state for_k8s_version="v1.15" state="beta" >}} + +To use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` with the value set to `nlb`. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +{{< note >}} +NLB only works with certain instance classes; see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets) +on Elastic Load Balancing for a list of supported instance types. +{{< /note >}} + +Unlike Classic Elastic Load Balancers, Network Load Balancers (NLBs) forward the +client's IP address through to the node. If a service's `.spec.externalTrafficPolicy` +is set to `Cluster`, the client's IP address will not be propagated to the end +pods. + +By setting `.spec.externalTrafficPolicy` to `Local`, client IP addresses will be +propagated to the end pods, but this could result in uneven distribution of +traffic. Nodes without any pods for a particular LoadBalancer service will fail +the NLB Target Group's health check on the auto-assigned +`.spec.healthCheckNodePort` and not receive any traffic. + +In order to achieve even traffic, either use a DaemonSet, or specify a +[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) +to not locate on the same node. + +You can also use NLB Services with the [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer) +annotation. + +In order for client traffic to reach instances behind an NLB, the Node security +groups are modified with the following IP rules: + +| Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | +|------|----------|---------|------------|---------------------| +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | +| MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | + +In order to limit which client IP's can access the Network Load Balancer, +specify `loadBalancerSourceRanges`. + +```yaml +spec: + loadBalancerSourceRanges: + - "143.231.0.0/16" +``` + +{{< note >}} +If `.spec.loadBalancerSourceRanges` is not set, Kubernetes will +allow traffic from `0.0.0.0/0` to the Node Security Group(s). If nodes have +public IP addresses, be aware that non-NLB traffic can also reach all instances +in those modified security groups. + +{{< /note >}} + +### Type ExternalName {#externalname} + +Services of type ExternalName map a service to a DNS name, not to a typical selector such as +`my-service` or `cassandra`. You specify these services with the `spec.externalName` parameter. + +This Service definition, for example, maps +the `my-service` Service in the `prod` namespace to `my.database.example.com`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: prod +spec: + type: ExternalName + externalName: my.database.example.com +``` +{{< note >}} +ExternalName accepts an IPv4 address string, but as a DNS names comprised of digits, not as an IP address. ExternalNames that resemble IPv4 addresses are not resolved by CoreDNS or ingress-nginx because ExternalName +is intended to specify a canonical DNS name. To hardcode an IP address, consider using +[headless services](#headless-services). +{{< /note >}} + +When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS service +will return a `CNAME` record with the value `my.database.example.com`. Accessing +`my-service` works in the same way as other Services but with the crucial +difference that redirection happens at the DNS level rather than via proxying or +forwarding. Should you later decide to move your database into your cluster, you +can start its pods, add appropriate selectors or endpoints, and change the +Service's `type`. + + +{{< note >}} +This section is indebted to the [Kubernetes Tips - Part +1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/). +{{< /note >}} + +### External IPs + +If there are external IPs that route to one or more cluster nodes, Kubernetes services can be exposed on those +`externalIPs`. Traffic that ingresses into the cluster with the external IP (as destination IP), on the service port, +will be routed to one of the service endpoints. `externalIPs` are not managed by Kubernetes and are the responsibility +of the cluster administrator. + +In the Service spec, `externalIPs` can be specified along with any of the `ServiceTypes`. +In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`" (`externalIP:port`) + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + externalIPs: + - 80.11.12.10 +``` + +## Shortcomings + +Using the userspace proxy for VIPs will work at small to medium scale, but will +not scale to very large clusters with thousands of Services. The [original +design proposal for portals](http://issue.k8s.io/1107) has more details on +this. + +Using the userspace proxy obscures the source IP address of a packet accessing +a Service. +This makes some kinds of network filtering (firewalling) impossible. The iptables +proxy mode does not +obscure in-cluster source IPs, but it does still impact clients coming through +a load balancer or node-port. + +The `Type` field is designed as nested functionality - each level adds to the +previous. This is not strictly required on all cloud providers (e.g. Google Compute Engine does +not need to allocate a `NodePort` to make `LoadBalancer` work, but AWS does) +but the current API requires it. + +## Virtual IP implementation {#the-gory-details-of-virtual-ips} + +The previous information should be sufficient for many people who just want to +use Services. However, there is a lot going on behind the scenes that may be +worth understanding. + +### Avoiding collisions + +One of the primary philosophies of Kubernetes is that you should not be +exposed to situations that could cause your actions to fail through no fault +of your own. For the design of the Service resource, this means not making +you choose your own port number for a if that choice might collide with +someone else's choice. That is an isolation failure. + +In order to allow you to choose a port number for your Services, we must +ensure that no two Services can collide. Kubernetes does that by allocating each +Service its own IP address. + +To ensure each service receives a unique IP, an internal allocator atomically +updates a global allocation map in {{< glossary_tooltip term_id="etcd" >}} +prior to creating each Service. The map object must exist in the registry for +Services to get IP address assignments, otherwise creations will +fail with a message indicating an IP address could not be allocated. + +In the control plane, a background controller is responsible for creating that +map (needed to support migrating from older versions of Kubernetes that used +in-memory locking). Kubernetes also uses controllers to checking for invalid +assignments (eg due to administrator intervention) and for cleaning up allocated +IP addresses that are no longer used by any Services. + +### Service IP addresses {#ips-and-vips} + +Unlike Pod IP addresses, which actually route to a fixed destination, +Service IPs are not actually answered by a single host. Instead, kube-proxy +uses iptables (packet processing logic in Linux) to define _virtual_ IP addresses +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for Services are actually populated in +terms of the Service's virtual IP address (and port). + +kube-proxy supports three proxy modes—userspace, iptables and IPVS—which +each operate slightly differently. + +#### Userspace + +As an example, consider the image processing application described above. +When the backend Service is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it opens a new random port, establishes an +iptables redirect from the virtual IP address to this new port, and starts accepting +connections on it. + +When a client connects to the Service's virtual IP address, the iptables +rule kicks in, and redirects the packets to the proxy's own port. +The “Service proxy” chooses a backend, and starts proxying traffic from the client to the backend. + +This means that Service owners can choose any port they want without risk of +collision. Clients can simply connect to an IP and port, without being aware +of which Pods they are actually accessing. + +#### iptables + +Again, consider the image processing application described above. +When the backend Service is created, the Kubernetes control plane assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it installs a series of iptables rules which +redirect from the virtual IP address to per-Service rules. The per-Service +rules link to per-Endpoint rules which redirect traffic (using destination NAT) +to the backends. + +When a client connects to the Service's virtual IP address the iptables rule kicks in. +A backend is chosen (either based on session affinity or randomly) and packets are +redirected to the backend. Unlike the userspace proxy, packets are never +copied to userspace, the kube-proxy does not have to be running for the virtual +IP address to work, and Nodes see traffic arriving from the unaltered client IP +address. + +This same basic flow executes when traffic comes in through a node-port or +through a load-balancer, though in those cases the client IP does get altered. + +#### IPVS + +iptables operations slow down dramatically in large scale cluster e.g 10,000 Services. +IPVS is designed for load balancing and based on in-kernel hash tables. So you can achieve performance consistency in large number of services from IPVS-based kube-proxy. Meanwhile, IPVS-based kube-proxy has more sophisticated load balancing algorithms (least conns, locality, weighted, persistence). + +## API Object + +Service is a top-level resource in the Kubernetes REST API. You can find more details +about the API object at: [Service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core). + +## Supported protocols {#protocol-support} + +### TCP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use TCP for any kind of service, and it's the default network protocol. + +### UDP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use UDP for most services. For type=LoadBalancer services, UDP support +depends on the cloud provider offering this facility. + +### HTTP + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it, you can use a Service in LoadBalancer mode +to set up external HTTP / HTTPS reverse proxying, forwarded to the Endpoints +of the Service. + +{{< note >}} +You can also use {{< glossary_tooltip term_id="ingress" >}} in place of Service +to expose HTTP / HTTPS services. +{{< /note >}} + +### PROXY protocol + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it (eg, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +you can use a Service in LoadBalancer mode to configure a load balancer outside +of Kubernetes itself, that will forward connections prefixed with +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). + +The load balancer will send an initial series of octets describing the +incoming connection, similar to this example + +``` +PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n +``` +followed by the data from the client. + +### SCTP + +{{< feature-state for_k8s_version="v1.12" state="alpha" >}} + +Kubernetes supports SCTP as a `protocol` value in Service, Endpoint, NetworkPolicy and Pod definitions as an alpha feature. To enable this feature, the cluster administrator needs to enable the `SCTPSupport` feature gate on the apiserver, for example, `--feature-gates=SCTPSupport=true,…`. + +When the feature gate is enabled, you can set the `protocol` field of a Service, Endpoint, NetworkPolicy or Pod to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections. + +#### Warnings {#caveat-sctp-overview} + +##### Support for multihomed SCTP associations {#caveat-sctp-multihomed} + +{{< warning >}} +The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a Pod. + +NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules. +{{< /warning >}} + +##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type} + +{{< warning >}} +You can only create a Service with `type` LoadBalancer plus `protocol` SCTP if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise, the Service creation request is rejected. The current set of cloud load balancer providers (Azure, AWS, CloudStack, GCE, OpenStack) all lack support for SCTP. +{{< /warning >}} + +##### Windows {#caveat-sctp-windows-os} + +{{< warning >}} +SCTP is not supported on Windows based nodes. +{{< /warning >}} + +##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace} + +{{< warning >}} +The kube-proxy does not support the management of SCTP associations when it is in userspace mode. +{{< /warning >}} + +## Future work + +In the future, the proxy policy for Services can become more nuanced than +simple round-robin balancing, for example master-elected or sharded. We also +envision that some Services will have "real" load balancers, in which case the +virtual IP address will simply transport the packets there. + +The Kubernetes project intends to improve support for L7 (HTTP) Services. + +The Kubernetes project intends to have more flexible ingress modes for Services +which encompass the current ClusterIP, NodePort, and LoadBalancer modes and more. + + +{{% /capture %}} + +{{% capture whatsnext %}} + +* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) +* Read about [Ingress](/docs/concepts/services-networking/ingress/) + +{{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/service_BACKUP_16221.md b/content/en/docs/concepts/services-networking/service_BACKUP_16221.md new file mode 100644 index 0000000000000..c46d4478a793b --- /dev/null +++ b/content/en/docs/concepts/services-networking/service_BACKUP_16221.md @@ -0,0 +1,1147 @@ +--- +reviewers: +- bprashanth +title: Service +feature: + title: Service discovery and load balancing + description: > + No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, and can load-balance across them. + +content_template: templates/concept +weight: 10 +--- + + +{{% capture overview %}} + +{{< glossary_definition term_id="service" length="short" >}} + +No need to modify your application to use an unfamiliar service discovery mechanism. +Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, +and can load-balance across them. + +{{% /capture %}} + +{{% capture body %}} + +## Motivation + +Kubernetes {{< glossary_tooltip term_id="pod" text="Pods" >}} are mortal. +They are born and when they die, they are not resurrected. +If you use a {{< glossary_tooltip term_id="deployment" >}} to run your app, +it can create and destroy Pods dynamically (e.g. when scaling out or in). + +Each Pod gets its own IP address, however the set of Pods +for a Deployment running in one moment in time could be different from +the set of Pods running that application a moment later. + +This leads to a problem: if some set of Pods (call them “backends”) provides +functionality to other Pods (call them “frontends”) inside your cluster, +how do those frontends find out and keep track of which IP address to connect +to, so that the frontend can use the backend part of the workload? + +Enter _Services_. + +## Service resources {#service-resource} + +In Kubernetes, a Service is an abstraction which defines a logical set of Pods +and a policy by which to access them (you'll sometimes see this pattern called +a micro-service). The set of Pods targeted by a Service is usually determined +by a {{< glossary_tooltip text="selector" term_id="selector" >}} +(see [below](#services-without-selectors) for why you might want a Service +_without_ a selector). + +For example: consider a stateless image-processing backend which is running with +3 replicas. Those replicas are fungible—frontends do not care which backend +they use. While the actual Pods that compose the backend set may change, the +frontend clients should not need to be aware of that, nor should they need to keep +track of the set of backends themselves. + +The Service abstraction enables this decoupling. + +### Cloud-native service discovery + +If you're able to use Kubernetes APIs for service discovery in your application, +you can query the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} +for Endpoints, that will be updated whenever the set of Pods in a Service changes. + +For non-native applications, Kubernetes offers ways to place a network port or load +balancer in between your application and the backend Pods. + +## Defining a service + +A Service in Kubernetes is a REST object, similar to a Pod. Like all of the +REST objects, you can `POST` a Service definition to the API server to create +a new instance. + +For example, suppose you have a set of Pods that each listen on TCP port 9376 +and carry a label `"app=MyApp"`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +This specification will create a new Service object named “my-service” which +targets TCP port 9376 on any Pod with the `"app=MyApp"` label. + +This Service will also be assigned an IP address (sometimes called the "cluster IP"), +which is used by the service proxies +(see [Virtual IPs and service proxies](#virtual-ips-and-service-proxies) below). + +The controller for the Service selector will continuously scan for Pods that +match its selector, and will then POST any updates to an Endpoint object +also named “my-service”. + +{{< note >}} +A Service can map _any_ incoming `port` to a `targetPort`. By default, and +for convenience, the `targetPort` will be set to the same value as the `port` +field. +{{< /note >}} + +Port definitions in Pods have names, and you can reference these names in the +targetPort attribute of a Service. This will work even if there are a mixture +of Pods in the Service, with the same network protocol available via different +port numbers but a single configured name. +This offers a lot of flexibility for deploying and evolving your Services. +For example, you can change the port number that pods expose in the next +version of your backend software, without breaking clients. + +The default protocol for services is TCP; you can also use any other +[supported protocol](#protocol-support). + +As many Services need to expose more than one port, Kubernetes supports multiple +port definitions on a Service object. +Each port definition can have the same `protocol`, or a different one. + +### Services without selectors + +Services most commonly abstract access to Kubernetes Pods, but they can also +abstract other kinds of backends. For example: + + * You want to have an external database cluster in production, but in your + test environment you use your own databases. + * You want to point your service to a service in a different + {{< glossary_tooltip term_id="namespace" >}} or on another cluster. + * You are migrating a workload to Kubernetes. Whilst evaluating the approach, + you run only a proportion of your backends in Kubernetes. + +In any of these scenarios you can define a service _without_ a Pod selector. +For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +Because this service has no selector, the corresponding Endpoint object will *not* be +created automatically. You can manually map the service to the network address and port +where it's running, by adding an Endpoint object manually: + +```yaml +apiVersion: v1 +kind: Endpoints +metadata: + name: my-service +subsets: + - addresses: + - ip: 192.0.2.42 + ports: + - port: 9376 +``` + +{{< note >}} +The endpoint IPs _must not_ be: loopback (127.0.0.0/8 for IPv4, ::1/128 for IPv6), or +link-local (169.254.0.0/16 and 224.0.0.0/24 for IPv4, fe80::/64 for IPv6). + +Endpoint IP addresses also cannot be the cluster IPs of other Kubernetes services, +because {{< glossary_tooltip term_id="kube-proxy" >}} doesn't support virtual IPs +as a destination. +{{< /note >}} + +Accessing a Service without a selector works the same as if it had a selector. +In the example above, traffic will be routed to the single endpoint defined in +the YAML: `192.0.2.42:9376` (TCP). + +An ExternalName Service is a special case of service that does not have +selectors and uses DNS names instead. For more information, see the +[ExternalName](#externalname) section later in this document. + +## Virtual IPs and service proxies + +Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is +responsible for implementing a form of virtual IP for `Services` of type other +than [`ExternalName`](#externalname). + +### Why not use round-robin DNS? + +A question that pops up every now and then is why Kubernetes relies on +proxying to forward inbound traffic to backends. What about other +approaches? For example, would it be possible to configure DNS records that +have multiple A values (or AAAA for IPv6), and rely on round-robin name +resolution? + +There are a few reasons for using proxying for Services: + + * There is a long history of DNS implementations not respecting record TTLs, + and caching the results of name lookups after they should have expired. + * Some apps do DNS lookups only once and cache the results indefinitely. + * Even if apps and libraries did proper re-resolution, the low or zero TTLs + on the DNS records could impose a high load on DNS that then becomes + difficult to manage. + +### Version compatibility + +Since Kubernetes v1.0 you have been able to use the +[userspace proxy mode](#proxy-mode-userspace). +Kubernetes v1.1 added iptables mode proxying, and in Kubernetes v1.2 the +iptables mode for kube-proxy became the default. +Kubernetes v1.8 added ipvs proxy mode. + +### User space proxy mode {#proxy-mode-userspace} + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of Service and Endpoint objects. For each Service it opens a +port (randomly chosen) on the local node. Any connections to this "proxy port" +will be proxied to one of the Service's backend Pods (as reported via +Endpoints). kube-proxy takes the `SessionAffinity` setting of the Service into +account when deciding which backend Pod to use. + +Lastly, the user-space proxy installs iptables rules which capture traffic to +the Service's `clusterIP` (which is virtual) and `port`. The rules +redirect that traffic to the proxy port which proxies the backend Pod. + +By default, kube-proxy in userspace mode chooses a backend via a round-robin algorithm. + +![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) + +### `iptables` proxy mode {#proxy-mode-iptables} + +In this mode, kube-proxy watches the Kubernetes control plane for the addition and +removal of Service and Endpoint objects. For each Service, it installs +iptables rules which capture traffic to the Service's `clusterIP` (which is +virtual) and `port` and redirects that traffic to one of the Service's +backend sets. For each Endpoint object, it installs iptables rules which +select a backend Pod. + +By default, kube-proxy in iptables mode chooses a backend at random. + +Using iptables to handle traffic has a lower system overhead, because traffic +is handled by Linux netfilter without the need switch between userspace and the +kernel space. This approach is also likely to be more reliable. + +If kube-proxy is running in iptables mode and the first Pod that's selected +does not respond, the connection will fail. This is different from userspace +mode: in that scenario, kube-proxy would detect that the connection to the first +Pod had failed and would automatically retry with a different backend Pod. + +You can use Pod [readiness probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes) +to verify that backend Pods are working OK, so that kube-proxy in iptables mode +only sees backends that test out as healthy. Doing this means you avoid +having traffic sent via kube-proxy to a Pod that's known to have failed. + +![Services overview diagram for iptables proxy](/images/docs/services-iptables-overview.svg) + +### IPVS proxy mode {#proxy-mode-ipvs} + +{{< feature-state for_k8s_version="v1.11" state="stable" >}} + +In `ipvs` mode, kube-proxy watches Kubernetes Services and Endpoints, +calls `netlink` interface to create IPVS rules accordingly and synchronizes +IPVS rules with Kubernetes Services and Endpoints periodically. +This control loop ensures that IPVS status matches the desired +state. +When accessing a Service, IPVS will direct traffic to one of the backend Pods. + +The IPVS proxy mode is based on netfilter hook function that is similar to +iptables mode, but uses hash table as the underlying data structure and works +in the kernel space. +That means kube-proxy in IPVS mode redirects traffic with a lower latency than +kube-proxy in iptables mode, with much better performance when synchronising +proxy rules. Compared to the other proxy modes, IPVS mode also supports a +higher throughput of network traffic. + +IPVS provides more options for balancing traffic to backend Pods; +these are: + +- `rr`: round-robin +- `lc`: least connection (smallest number of open connections) +- `dh`: destination hashing +- `sh`: source hashing +- `sed`: shortest expected delay +- `nq`: never queue + +{{< note >}} +To run kube-proxy in IPVS mode, you must make the IPVS Linux available on +the node before you starting kube-proxy. + +When kube-proxy starts in IPVS proxy mode, it will verify whether IPVS +kernel modules are available, and if those are not detected then kube-proxy +fall back to running in iptables proxy mode. +{{< /note >}} + +![Services overview diagram for IPVS proxy](/images/docs/services-ipvs-overview.svg) + +In any of these proxy models, any traffic bound for the Service’s IP:Port is +proxied to an appropriate backend without the clients knowing anything +about Kubernetes or Services or Pods. + +If you want to make sure that connections from a particular client +are passed to the same Pod each time, you can select session affinity based +the on client's IP addresses by setting `service.spec.sessionAffinity` to "ClientIP" +(the default is "None"). +You can then also set the maximum session sticky time by setting +`service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` appropriately. +(the default value is 10800, which works out to be 3 hours). + +## Multi-Port Services + +For some Services, you need to expose more than one port. +Kubernetes lets you configure multiple port definitions on a Service object. +When using multiple ports for a Service, you must give all of your ports names +so that these are unambiguous. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + - name: https + protocol: TCP + port: 443 + targetPort: 9377 +``` + +{{< note >}} +As with Kubernetes {{< glossary_tooltip term_id="name" text="names">}} in general, names for ports +must only contain lowercase alphanumeric characters and `-`. Port names must +also start and end with an alphanumeric character. + +For example, the names `123-abc` and `web` are valid, but `123_abc` and `-web` are not. +{{< /note >}} + +## Choosing your own IP address + +You can specify your own cluster IP address as part of a `Service` creation +request. To do this, set the `.spec.clusterIP` field. For example, if you +already have an existing DNS entry that you wish to reuse, or legacy systems +that are configured for a specific IP address and difficult to re-configure. + +The IP address that you choose must be a valid IPv4 or IPv6 address from within the +`service-cluster-ip-range` CIDR range that is configured for the API server. +If you try to create a Service with an invalid clusterIP address value, the API +server will returns a 422 HTTP status code to indicate that there's a problem. + +## Discovering services + +Kubernetes supports 2 primary modes of finding a Service - environment +variables and DNS. + +### Environment variables + +When a Pod is run on a Node, the kubelet adds a set of environment variables +for each active Service. It supports both [Docker links +compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see +[makeLinkVariables](http://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, +where the Service name is upper-cased and dashes are converted to underscores. + +For example, the Service `"redis-master"` which exposes TCP port 6379 and has been +allocated cluster IP address 10.0.0.11 produces the following environment +variables: + +```shell +REDIS_MASTER_SERVICE_HOST=10.0.0.11 +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_PORT=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP_PROTO=tcp +REDIS_MASTER_PORT_6379_TCP_PORT=6379 +REDIS_MASTER_PORT_6379_TCP_ADDR=10.0.0.11 +``` + +{{< note >}} +When you have a Pod that might need to acccess a Service, and you are using +the environment variable method to publish the port and cluster IP to the client +Pods, you must create the Service *before* the client Pods come into existence. +Otherwise, those client Pods won't have their environment variables populated. + +If you only use DNS to discover the cluster IP for a Service, you don't need to +worry about this ordering issue. +{{< /note >}} + +### DNS + +You can (and almost always should) set up a DNS service for your Kubernetes +cluster using an [add-on](/docs/concepts/cluster-administration/addons/). + +A cluster-aware DNS server, such as CoreDNS, watches the Kubernetes API for new +Services and creates a set of DNS records for each one. If DNS has been enabled +throughout your cluster then all Pods should automatically be able to resolve +Services by their DNS name. + +For example, if you have a Service called `"my-service"` in a Kubernetes +Namespace `"my-ns"`, the control plane and the DNS service acting together will +create a DNS record for `"my-service.my-ns"`. Pods in the `"my-ns"` Namespace +should be able to find it by simply doing a name lookup for `my-service` +(`"my-service.my-ns"` would also work). + +Pods in other Namespaces must qualify the name as `my-service.my-ns`. These names +will resolve to the cluster IP assigned for the Service. + +Kubernetes also supports DNS SRV (service) records for named ports. If the +`"my-service.my-ns"` Service has a port named `"http"` with protocol set to +`TCP`, you can do a DNS SRV query for `_http._tcp.my-service.my-ns` to discover +the port number for `"http"`, as well as the IP address. + +The Kubernetes DNS server is the only way to access `ExternalName` Services. +You can find more information about `ExternalName` resolution in +[DNS Pods and Services](/docs/concepts/services-networking/dns-pod-service/). + +## Headless services + +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create what are termed “headless” Services, by explicitly +specifying `"None"` for the cluster IP (`.spec.clusterIP`). + +You can use a headless Service to interface with other service discovery mechanisms, +without being tied to Kubernetes' implementation. For example, you could implement +a custom [Operator]( +be built upon this API. + +For such `Services`, a cluster IP is not allocated, kube-proxy does not handle +these services, and there is no load balancing or proxying done by the platform +for them. How DNS is automatically configured depends on whether the service has +selectors defined. + +### With selectors + +For headless services that define selectors, the endpoints controller creates +`Endpoints` records in the API, and modifies the DNS configuration to return A +records (addresses) that point directly to the `Pods` backing the `Service`. + +### Without selectors + +For headless services that do not define selectors, the endpoints controller does +not create `Endpoints` records. However, the DNS system looks for and configures +either: + + * CNAME records for [`ExternalName`](#externalname)-type services. + * A records for any `Endpoints` that share a name with the service, for all + other types. + +## Publishing services (ServiceTypes) {#publishing-services-service-types} + +For some parts of your application (e.g. frontends) you may want to expose a +Service onto an external IP address, one that's outside of your cluster. + +Kubernetes `ServiceTypes` allow you to specify what kind of service you want. +The default is `ClusterIP`. + +`Type` values and their behaviors are: + + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the + default `ServiceType`. + * [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port + (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will + route, is automatically created. You'll be able to contact the `NodePort` service, + from outside the cluster, + by requesting `:`. + * [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud + provider's load balancer. `NodePort` and `ClusterIP` services, to which the external + load balancer will route, are automatically created. + * [`ExternalName`](#externalname): Maps the service to the contents of the + `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record + with its value. No proxying of any kind is set up. + +{{< note >}} + +You need CoreDNS version 1.7 or higher to use the `ExternalName` type. + +{{< /note >}} + +### Type NodePort {#nodeport} + +If you set the `type` field to `NodePort`, the Kubernetes control plane will +allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767). +Each node will proxy that port each (the same port number on every Node) into your Service. +Your service will report that allocated port in its `.spec.ports[*].nodePort` field. + + +If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s); this is supported since Kubernetes v1.10. +This flag takes a comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 192.0.2.0/25) to specify IP address ranges that kube-proxy should consider as local to this node. + +For example, if you start kube-proxy with flag `--nodeport-addresses=127.0.0.0/8`, kube-proxy will select only the loopback interface for NodePort Services. The default for `--nodeport-addresses` is an empty list, and means that kube-proxy should consider all available network interfaces for NodePort. (That's also compatible with earlier Kubernetes releases). + +If you want a specific port number, you can specify a value in the `nodePort` +field. The control plane will either allocate you that port or report that +the API transaction failed. +This means that you need to take care about possible port collisions yourself). +You also have to use a valid port number, one that's inside the range configured +for NodePort use. + +Using a NodePort gives you the freedom to set up your own load balancing solution, +to configure environments that are not fully supported by Kubernetes, or even +to just expose one or more nodes' IPs directly. + +Note that this Service will be visible as both `:spec.ports[*].nodePort` +and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) + +### Type LoadBalancer {#loadbalancer} + +On cloud providers which support external load balancers, setting the `type` +field to `LoadBalancer` will provision a load balancer for your Service. +The actual creation of the load balancer happens asynchronously, and +information about the provisioned balancer will be published in the Service's +`.status.loadBalancer` field. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + clusterIP: 10.0.171.239 + loadBalancerIP: 78.11.24.19 + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 146.148.47.155 +``` + +Traffic from the external load balancer will be directed at the backend Pods, +though exactly how that works depends on the cloud provider. + +Some cloud providers allow you to specify the `loadBalancerIP`. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +the loadBalancer will be set up with an ephemeral IP address. If you specify a `loadBalancerIP` +but your cloud provider does not support the feature, the `loadbalancerIP` field that you +set will be ignored. + +{{< note >}} +If you're using SCTP, see the [caveat](#caveat-sctp-loadbalancer-service-type) below about the +`LoadBalancer` Service type. +{{< /note >}} + +{{< note >}} + +On **Azure**, if you want to use a user-specified public type `loadBalancerIP`, you first need +to create a static type public IP address resource. This public IP address resource should +be in the same resource group of the other automatically created resources of the cluster. +For example, `MC_myResourceGroup_myAKSCluster_eastus`. + +Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357). + +{{< /note >}} + +#### Internal load balancer +In a mixed environment it is sometimes necessary to route traffic from services inside the same +(virtual) network address block. + +In a split-horizon DNS environment you would need two services to be able to route both external and internal traffic to your endpoints. + +You can achieve this by adding one the following annotations to a Service. +The annotation to add depends on the cloud service provider you're using. + +{{< tabs name="service_tabs" >}} +{{% tab name="Default" %}} +Select one of the tabs. +{{% /tab %}} +{{% tab name="GCP" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + cloud.google.com/load-balancer-type: "Internal" +[...] +``` +Use `cloud.google.com/load-balancer-type: "internal"` for masters with version 1.7.0 to 1.7.3. +For more information, see the [docs](https://cloud.google.com/kubernetes-engine/docs/internal-load-balancing). +{{% /tab %}} +{{% tab name="AWS" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +[...] +``` +{{% /tab %}} +{{% tab name="Azure" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +[...] +``` +{{% /tab %}} +{{% tab name="OpenStack" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" +[...] +``` +{{% /tab %}} +{{% tab name="Baidu Cloud" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" +[...] +``` +{{% /tab %}} +{{< /tabs >}} + + +#### TLS support on AWS {#ssl-support-on-aws} + +For partial TLS / SSL support on clusters running on AWS, you can add three +annotations to a `LoadBalancer` service: + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 +``` + +The first specifies the ARN of the certificate to use. It can be either a +certificate from a third party issuer that was uploaded to IAM or one created +within AWS Certificate Manager. + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp) +``` + +The second annotation specifies which protocol a Pod speaks. For HTTPS and +SSL, the ELB will expect the Pod to authenticate itself over the encrypted +connection, using a certificate. + +HTTP and HTTPS will select layer 7 proxying: the ELB will terminate +the connection with the user, parse headers and inject the `X-Forwarded-For` +header with the user's IP address (Pods will only see the IP address of the +ELB at the other end of its connection) when forwarding requests. + +TCP and SSL will select layer 4 proxying: the ELB will forward traffic without +modifying the headers. + +In a mixed-use environment where some ports are secured and others are left unencrypted, +you can use the following annotations: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443" +``` + +In the above example, if the service contained three ports, `80`, `443`, and +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just +be proxied HTTP. + +From Kubernetes v1.9 onwrds you can use [predefined AWS SSL policies](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) with HTTPS or SSL listeners for your Services. +To see which policies are available for use, you can the `aws` command line tool: + +```bash +aws elb describe-load-balancer-policies --query 'PolicyDescriptions[].PolicyName' +``` + +You can then specify any one of those policies using the +"`service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`" +annotation; for example: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" +``` + +#### PROXY protocol support on AWS + +To enable [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) +support for clusters running on AWS, you can use the following service +annotation: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" +``` + +Since version 1.3.0, the use of this annotation applies to all ports proxied by the ELB +and cannot be configured otherwise. + +#### ELB Access Logs on AWS + +There are several annotations to manage access logs for ELB services on AWS. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` +controls whether access logs are enabled. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval` +controls the interval in minutes for publishing the access logs. You can specify +an interval of either 5 or 60 minutes. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` +controls the name of the Amazon S3 bucket where load balancer access logs are +stored. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` +specifies the logical hierarchy you created for your Amazon S3 bucket. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" + # Specifies whether access logs are enabled for the load balancer + service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" + # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" + # The name of the Amazon S3 bucket where the access logs are stored + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" + # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` +``` + +#### Connection Draining on AWS + +Connection draining for Classic ELBs can be managed with the annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled` set +to the value of `"true"`. The annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout` can +also be used to set maximum time, in seconds, to keep the existing connections open before deregistering the instances. + + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60" +``` + +#### Other ELB annotations + +There are other annotations to manage Classic Elastic Load Balancers that are described below. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" + # The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer + + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # Specifies whether cross-zone load balancing is enabled for the load balancer + + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops" + # A comma-separated list of key-value pairs which will be recorded as + # additional tags in the ELB. + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: "" + # The number of successive successful health checks required for a backend to + # be considered healthy for traffic. Defaults to 2, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3" + # The number of unsuccessful health checks required for a backend to be + # considered unhealthy for traffic. Defaults to 6, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20" + # The approximate interval, in seconds, between health checks of an + # individual instance. Defaults to 10, must be between 5 and 300 + service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5" + # The amount of time, in seconds, during which no response means a failed + # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval + # value. Defaults to 5, must be between 2 and 60 + + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" + # A list of additional security groups to be added to the ELB +``` + +#### Network Load Balancer support on AWS + +{{< feature-state for_k8s_version="v1.15" state="beta" >}} + +To use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` with the value set to `nlb`. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +{{< note >}} +NLB only works with certain instance classes; see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets) +on Elastic Load Balancing for a list of supported instance types. +{{< /note >}} + +Unlike Classic Elastic Load Balancers, Network Load Balancers (NLBs) forward the +client's IP address through to the node. If a service's `.spec.externalTrafficPolicy` +is set to `Cluster`, the client's IP address will not be propagated to the end +pods. + +By setting `.spec.externalTrafficPolicy` to `Local`, client IP addresses will be +propagated to the end pods, but this could result in uneven distribution of +traffic. Nodes without any pods for a particular LoadBalancer service will fail +the NLB Target Group's health check on the auto-assigned +`.spec.healthCheckNodePort` and not receive any traffic. + +In order to achieve even traffic, either use a DaemonSet, or specify a +[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) +to not locate on the same node. + +You can also use NLB Services with the [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer) +annotation. + +In order for client traffic to reach instances behind an NLB, the Node security +groups are modified with the following IP rules: + +| Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | +|------|----------|---------|------------|---------------------| +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | +| MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | + +In order to limit which client IP's can access the Network Load Balancer, +specify `loadBalancerSourceRanges`. + +```yaml +spec: + loadBalancerSourceRanges: + - "143.231.0.0/16" +``` + +{{< note >}} +If `.spec.loadBalancerSourceRanges` is not set, Kubernetes will +allow traffic from `0.0.0.0/0` to the Node Security Group(s). If nodes have +public IP addresses, be aware that non-NLB traffic can also reach all instances +in those modified security groups. + +{{< /note >}} + +### Type ExternalName {#externalname} + +Services of type ExternalName map a service to a DNS name, not to a typical selector such as +`my-service` or `cassandra`. You specify these services with the `spec.externalName` parameter. + +This Service definition, for example, maps +the `my-service` Service in the `prod` namespace to `my.database.example.com`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: prod +spec: + type: ExternalName + externalName: my.database.example.com +``` +{{< note >}} +ExternalName accepts an IPv4 address string, but as a DNS names comprised of digits, not as an IP address. ExternalNames that resemble IPv4 addresses are not resolved by CoreDNS or ingress-nginx because ExternalName +is intended to specify a canonical DNS name. To hardcode an IP address, consider using +[headless services](#headless-services). +{{< /note >}} + +When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS service +will return a `CNAME` record with the value `my.database.example.com`. Accessing +`my-service` works in the same way as other Services but with the crucial +difference that redirection happens at the DNS level rather than via proxying or +forwarding. Should you later decide to move your database into your cluster, you +can start its pods, add appropriate selectors or endpoints, and change the +Service's `type`. + + +{{< note >}} +This section is indebted to the [Kubernetes Tips - Part +1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/). +{{< /note >}} + +### External IPs + +If there are external IPs that route to one or more cluster nodes, Kubernetes services can be exposed on those +`externalIPs`. Traffic that ingresses into the cluster with the external IP (as destination IP), on the service port, +will be routed to one of the service endpoints. `externalIPs` are not managed by Kubernetes and are the responsibility +of the cluster administrator. + +In the Service spec, `externalIPs` can be specified along with any of the `ServiceTypes`. +In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`" (`externalIP:port`) + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + externalIPs: + - 80.11.12.10 +``` + +## Shortcomings + +Using the userspace proxy for VIPs will work at small to medium scale, but will +not scale to very large clusters with thousands of Services. The [original +design proposal for portals](http://issue.k8s.io/1107) has more details on +this. + +Using the userspace proxy obscures the source IP address of a packet accessing +a Service. +This makes some kinds of network filtering (firewalling) impossible. The iptables +proxy mode does not +obscure in-cluster source IPs, but it does still impact clients coming through +a load balancer or node-port. + +The `Type` field is designed as nested functionality - each level adds to the +previous. This is not strictly required on all cloud providers (e.g. Google Compute Engine does +not need to allocate a `NodePort` to make `LoadBalancer` work, but AWS does) +but the current API requires it. + +## Virtual IP implementation {#the-gory-details-of-virtual-ips} + +The previous information should be sufficient for many people who just want to +use Services. However, there is a lot going on behind the scenes that may be +worth understanding. + +### Avoiding collisions + +One of the primary philosophies of Kubernetes is that you should not be +exposed to situations that could cause your actions to fail through no fault +of your own. For the design of the Service resource, this means not making +you choose your own port number for a if that choice might collide with +someone else's choice. That is an isolation failure. + +In order to allow you to choose a port number for your Services, we must +ensure that no two Services can collide. Kubernetes does that by allocating each +Service its own IP address. + +To ensure each service receives a unique IP, an internal allocator atomically +updates a global allocation map in {{< glossary_tooltip term_id="etcd" >}} +prior to creating each Service. The map object must exist in the registry for +Services to get IP address assignments, otherwise creations will +fail with a message indicating an IP address could not be allocated. + +In the control plane, a background controller is responsible for creating that +map (needed to support migrating from older versions of Kubernetes that used +in-memory locking). Kubernetes also uses controllers to checking for invalid +assignments (eg due to administrator intervention) and for cleaning up allocated +IP addresses that are no longer used by any Services. + +### Service IP addresses {#ips-and-vips} + +Unlike Pod IP addresses, which actually route to a fixed destination, +Service IPs are not actually answered by a single host. Instead, kube-proxy +uses iptables (packet processing logic in Linux) to define _virtual_ IP addresses +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for Services are actually populated in +terms of the Service's virtual IP address (and port). + +kube-proxy supports three proxy modes—userspace, iptables and IPVS—which +each operate slightly differently. + +#### Userspace + +As an example, consider the image processing application described above. +When the backend Service is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it opens a new random port, establishes an +iptables redirect from the virtual IP address to this new port, and starts accepting +connections on it. + +When a client connects to the Service's virtual IP address, the iptables +rule kicks in, and redirects the packets to the proxy's own port. +The “Service proxy” chooses a backend, and starts proxying traffic from the client to the backend. + +This means that Service owners can choose any port they want without risk of +collision. Clients can simply connect to an IP and port, without being aware +of which Pods they are actually accessing. + +#### iptables + +Again, consider the image processing application described above. +When the backend Service is created, the Kubernetes control plane assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it installs a series of iptables rules which +redirect from the virtual IP address to per-Service rules. The per-Service +rules link to per-Endpoint rules which redirect traffic (using destination NAT) +to the backends. + +When a client connects to the Service's virtual IP address the iptables rule kicks in. +A backend is chosen (either based on session affinity or randomly) and packets are +redirected to the backend. Unlike the userspace proxy, packets are never +copied to userspace, the kube-proxy does not have to be running for the virtual +IP address to work, and Nodes see traffic arriving from the unaltered client IP +address. + +This same basic flow executes when traffic comes in through a node-port or +through a load-balancer, though in those cases the client IP does get altered. + +#### IPVS + +iptables operations slow down dramatically in large scale cluster e.g 10,000 Services. +IPVS is designed for load balancing and based on in-kernel hash tables. So you can achieve performance consistency in large number of services from IPVS-based kube-proxy. Meanwhile, IPVS-based kube-proxy has more sophisticated load balancing algorithms (least conns, locality, weighted, persistence). + +## API Object + +Service is a top-level resource in the Kubernetes REST API. You can find more details +about the API object at: [Service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core). + +## Supported protocols {#protocol-support} + +### TCP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use TCP for any kind of service, and it's the default network protocol. + +### UDP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use UDP for most services. For type=LoadBalancer services, UDP support +depends on the cloud provider offering this facility. + +### HTTP + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it, you can use a Service in LoadBalancer mode +to set up external HTTP / HTTPS reverse proxying, forwarded to the Endpoints +of the Service. + +{{< note >}} +You can also use {{< glossary_tooltip term_id="ingress" >}} in place of Service +to expose HTTP / HTTPS services. +{{< /note >}} + +### PROXY protocol + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it (eg, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +you can use a Service in LoadBalancer mode to configure a load balancer outside +of Kubernetes itself, that will forward connections prefixed with +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). + +The load balancer will send an initial series of octets describing the +incoming connection, similar to this example + +``` +PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n +``` +followed by the data from the client. + +### SCTP + +{{< feature-state for_k8s_version="v1.12" state="alpha" >}} + +Kubernetes supports SCTP as a `protocol` value in Service, Endpoint, NetworkPolicy and Pod definitions as an alpha feature. To enable this feature, the cluster administrator needs to enable the `SCTPSupport` feature gate on the apiserver, for example, `--feature-gates=SCTPSupport=true,…`. + +When the feature gate is enabled, you can set the `protocol` field of a Service, Endpoint, NetworkPolicy or Pod to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections. + +#### Warnings {#caveat-sctp-overview} + +##### Support for multihomed SCTP associations {#caveat-sctp-multihomed} + +{{< warning >}} +The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a Pod. + +NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules. +{{< /warning >}} + +##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type} + +{{< warning >}} +You can only create a Service with `type` LoadBalancer plus `protocol` SCTP if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise, the Service creation request is rejected. The current set of cloud load balancer providers (Azure, AWS, CloudStack, GCE, OpenStack) all lack support for SCTP. +{{< /warning >}} + +##### Windows {#caveat-sctp-windows-os} + +{{< warning >}} +SCTP is not supported on Windows based nodes. +{{< /warning >}} + +##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace} + +{{< warning >}} +The kube-proxy does not support the management of SCTP associations when it is in userspace mode. +{{< /warning >}} + +## Future work + +In the future, the proxy policy for Services can become more nuanced than +simple round-robin balancing, for example master-elected or sharded. We also +envision that some Services will have "real" load balancers, in which case the +virtual IP address will simply transport the packets there. + +The Kubernetes project intends to improve support for L7 (HTTP) Services. + +The Kubernetes project intends to have more flexible ingress modes for Services +which encompass the current ClusterIP, NodePort, and LoadBalancer modes and more. + + +{{% /capture %}} + +{{% capture whatsnext %}} + +* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) +* Read about [Ingress](/docs/concepts/services-networking/ingress/) + +{{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/service_BACKUP_80035.md b/content/en/docs/concepts/services-networking/service_BACKUP_80035.md new file mode 100644 index 0000000000000..c46d4478a793b --- /dev/null +++ b/content/en/docs/concepts/services-networking/service_BACKUP_80035.md @@ -0,0 +1,1147 @@ +--- +reviewers: +- bprashanth +title: Service +feature: + title: Service discovery and load balancing + description: > + No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, and can load-balance across them. + +content_template: templates/concept +weight: 10 +--- + + +{{% capture overview %}} + +{{< glossary_definition term_id="service" length="short" >}} + +No need to modify your application to use an unfamiliar service discovery mechanism. +Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, +and can load-balance across them. + +{{% /capture %}} + +{{% capture body %}} + +## Motivation + +Kubernetes {{< glossary_tooltip term_id="pod" text="Pods" >}} are mortal. +They are born and when they die, they are not resurrected. +If you use a {{< glossary_tooltip term_id="deployment" >}} to run your app, +it can create and destroy Pods dynamically (e.g. when scaling out or in). + +Each Pod gets its own IP address, however the set of Pods +for a Deployment running in one moment in time could be different from +the set of Pods running that application a moment later. + +This leads to a problem: if some set of Pods (call them “backends”) provides +functionality to other Pods (call them “frontends”) inside your cluster, +how do those frontends find out and keep track of which IP address to connect +to, so that the frontend can use the backend part of the workload? + +Enter _Services_. + +## Service resources {#service-resource} + +In Kubernetes, a Service is an abstraction which defines a logical set of Pods +and a policy by which to access them (you'll sometimes see this pattern called +a micro-service). The set of Pods targeted by a Service is usually determined +by a {{< glossary_tooltip text="selector" term_id="selector" >}} +(see [below](#services-without-selectors) for why you might want a Service +_without_ a selector). + +For example: consider a stateless image-processing backend which is running with +3 replicas. Those replicas are fungible—frontends do not care which backend +they use. While the actual Pods that compose the backend set may change, the +frontend clients should not need to be aware of that, nor should they need to keep +track of the set of backends themselves. + +The Service abstraction enables this decoupling. + +### Cloud-native service discovery + +If you're able to use Kubernetes APIs for service discovery in your application, +you can query the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} +for Endpoints, that will be updated whenever the set of Pods in a Service changes. + +For non-native applications, Kubernetes offers ways to place a network port or load +balancer in between your application and the backend Pods. + +## Defining a service + +A Service in Kubernetes is a REST object, similar to a Pod. Like all of the +REST objects, you can `POST` a Service definition to the API server to create +a new instance. + +For example, suppose you have a set of Pods that each listen on TCP port 9376 +and carry a label `"app=MyApp"`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +This specification will create a new Service object named “my-service” which +targets TCP port 9376 on any Pod with the `"app=MyApp"` label. + +This Service will also be assigned an IP address (sometimes called the "cluster IP"), +which is used by the service proxies +(see [Virtual IPs and service proxies](#virtual-ips-and-service-proxies) below). + +The controller for the Service selector will continuously scan for Pods that +match its selector, and will then POST any updates to an Endpoint object +also named “my-service”. + +{{< note >}} +A Service can map _any_ incoming `port` to a `targetPort`. By default, and +for convenience, the `targetPort` will be set to the same value as the `port` +field. +{{< /note >}} + +Port definitions in Pods have names, and you can reference these names in the +targetPort attribute of a Service. This will work even if there are a mixture +of Pods in the Service, with the same network protocol available via different +port numbers but a single configured name. +This offers a lot of flexibility for deploying and evolving your Services. +For example, you can change the port number that pods expose in the next +version of your backend software, without breaking clients. + +The default protocol for services is TCP; you can also use any other +[supported protocol](#protocol-support). + +As many Services need to expose more than one port, Kubernetes supports multiple +port definitions on a Service object. +Each port definition can have the same `protocol`, or a different one. + +### Services without selectors + +Services most commonly abstract access to Kubernetes Pods, but they can also +abstract other kinds of backends. For example: + + * You want to have an external database cluster in production, but in your + test environment you use your own databases. + * You want to point your service to a service in a different + {{< glossary_tooltip term_id="namespace" >}} or on another cluster. + * You are migrating a workload to Kubernetes. Whilst evaluating the approach, + you run only a proportion of your backends in Kubernetes. + +In any of these scenarios you can define a service _without_ a Pod selector. +For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +Because this service has no selector, the corresponding Endpoint object will *not* be +created automatically. You can manually map the service to the network address and port +where it's running, by adding an Endpoint object manually: + +```yaml +apiVersion: v1 +kind: Endpoints +metadata: + name: my-service +subsets: + - addresses: + - ip: 192.0.2.42 + ports: + - port: 9376 +``` + +{{< note >}} +The endpoint IPs _must not_ be: loopback (127.0.0.0/8 for IPv4, ::1/128 for IPv6), or +link-local (169.254.0.0/16 and 224.0.0.0/24 for IPv4, fe80::/64 for IPv6). + +Endpoint IP addresses also cannot be the cluster IPs of other Kubernetes services, +because {{< glossary_tooltip term_id="kube-proxy" >}} doesn't support virtual IPs +as a destination. +{{< /note >}} + +Accessing a Service without a selector works the same as if it had a selector. +In the example above, traffic will be routed to the single endpoint defined in +the YAML: `192.0.2.42:9376` (TCP). + +An ExternalName Service is a special case of service that does not have +selectors and uses DNS names instead. For more information, see the +[ExternalName](#externalname) section later in this document. + +## Virtual IPs and service proxies + +Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is +responsible for implementing a form of virtual IP for `Services` of type other +than [`ExternalName`](#externalname). + +### Why not use round-robin DNS? + +A question that pops up every now and then is why Kubernetes relies on +proxying to forward inbound traffic to backends. What about other +approaches? For example, would it be possible to configure DNS records that +have multiple A values (or AAAA for IPv6), and rely on round-robin name +resolution? + +There are a few reasons for using proxying for Services: + + * There is a long history of DNS implementations not respecting record TTLs, + and caching the results of name lookups after they should have expired. + * Some apps do DNS lookups only once and cache the results indefinitely. + * Even if apps and libraries did proper re-resolution, the low or zero TTLs + on the DNS records could impose a high load on DNS that then becomes + difficult to manage. + +### Version compatibility + +Since Kubernetes v1.0 you have been able to use the +[userspace proxy mode](#proxy-mode-userspace). +Kubernetes v1.1 added iptables mode proxying, and in Kubernetes v1.2 the +iptables mode for kube-proxy became the default. +Kubernetes v1.8 added ipvs proxy mode. + +### User space proxy mode {#proxy-mode-userspace} + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of Service and Endpoint objects. For each Service it opens a +port (randomly chosen) on the local node. Any connections to this "proxy port" +will be proxied to one of the Service's backend Pods (as reported via +Endpoints). kube-proxy takes the `SessionAffinity` setting of the Service into +account when deciding which backend Pod to use. + +Lastly, the user-space proxy installs iptables rules which capture traffic to +the Service's `clusterIP` (which is virtual) and `port`. The rules +redirect that traffic to the proxy port which proxies the backend Pod. + +By default, kube-proxy in userspace mode chooses a backend via a round-robin algorithm. + +![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) + +### `iptables` proxy mode {#proxy-mode-iptables} + +In this mode, kube-proxy watches the Kubernetes control plane for the addition and +removal of Service and Endpoint objects. For each Service, it installs +iptables rules which capture traffic to the Service's `clusterIP` (which is +virtual) and `port` and redirects that traffic to one of the Service's +backend sets. For each Endpoint object, it installs iptables rules which +select a backend Pod. + +By default, kube-proxy in iptables mode chooses a backend at random. + +Using iptables to handle traffic has a lower system overhead, because traffic +is handled by Linux netfilter without the need switch between userspace and the +kernel space. This approach is also likely to be more reliable. + +If kube-proxy is running in iptables mode and the first Pod that's selected +does not respond, the connection will fail. This is different from userspace +mode: in that scenario, kube-proxy would detect that the connection to the first +Pod had failed and would automatically retry with a different backend Pod. + +You can use Pod [readiness probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes) +to verify that backend Pods are working OK, so that kube-proxy in iptables mode +only sees backends that test out as healthy. Doing this means you avoid +having traffic sent via kube-proxy to a Pod that's known to have failed. + +![Services overview diagram for iptables proxy](/images/docs/services-iptables-overview.svg) + +### IPVS proxy mode {#proxy-mode-ipvs} + +{{< feature-state for_k8s_version="v1.11" state="stable" >}} + +In `ipvs` mode, kube-proxy watches Kubernetes Services and Endpoints, +calls `netlink` interface to create IPVS rules accordingly and synchronizes +IPVS rules with Kubernetes Services and Endpoints periodically. +This control loop ensures that IPVS status matches the desired +state. +When accessing a Service, IPVS will direct traffic to one of the backend Pods. + +The IPVS proxy mode is based on netfilter hook function that is similar to +iptables mode, but uses hash table as the underlying data structure and works +in the kernel space. +That means kube-proxy in IPVS mode redirects traffic with a lower latency than +kube-proxy in iptables mode, with much better performance when synchronising +proxy rules. Compared to the other proxy modes, IPVS mode also supports a +higher throughput of network traffic. + +IPVS provides more options for balancing traffic to backend Pods; +these are: + +- `rr`: round-robin +- `lc`: least connection (smallest number of open connections) +- `dh`: destination hashing +- `sh`: source hashing +- `sed`: shortest expected delay +- `nq`: never queue + +{{< note >}} +To run kube-proxy in IPVS mode, you must make the IPVS Linux available on +the node before you starting kube-proxy. + +When kube-proxy starts in IPVS proxy mode, it will verify whether IPVS +kernel modules are available, and if those are not detected then kube-proxy +fall back to running in iptables proxy mode. +{{< /note >}} + +![Services overview diagram for IPVS proxy](/images/docs/services-ipvs-overview.svg) + +In any of these proxy models, any traffic bound for the Service’s IP:Port is +proxied to an appropriate backend without the clients knowing anything +about Kubernetes or Services or Pods. + +If you want to make sure that connections from a particular client +are passed to the same Pod each time, you can select session affinity based +the on client's IP addresses by setting `service.spec.sessionAffinity` to "ClientIP" +(the default is "None"). +You can then also set the maximum session sticky time by setting +`service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` appropriately. +(the default value is 10800, which works out to be 3 hours). + +## Multi-Port Services + +For some Services, you need to expose more than one port. +Kubernetes lets you configure multiple port definitions on a Service object. +When using multiple ports for a Service, you must give all of your ports names +so that these are unambiguous. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + - name: https + protocol: TCP + port: 443 + targetPort: 9377 +``` + +{{< note >}} +As with Kubernetes {{< glossary_tooltip term_id="name" text="names">}} in general, names for ports +must only contain lowercase alphanumeric characters and `-`. Port names must +also start and end with an alphanumeric character. + +For example, the names `123-abc` and `web` are valid, but `123_abc` and `-web` are not. +{{< /note >}} + +## Choosing your own IP address + +You can specify your own cluster IP address as part of a `Service` creation +request. To do this, set the `.spec.clusterIP` field. For example, if you +already have an existing DNS entry that you wish to reuse, or legacy systems +that are configured for a specific IP address and difficult to re-configure. + +The IP address that you choose must be a valid IPv4 or IPv6 address from within the +`service-cluster-ip-range` CIDR range that is configured for the API server. +If you try to create a Service with an invalid clusterIP address value, the API +server will returns a 422 HTTP status code to indicate that there's a problem. + +## Discovering services + +Kubernetes supports 2 primary modes of finding a Service - environment +variables and DNS. + +### Environment variables + +When a Pod is run on a Node, the kubelet adds a set of environment variables +for each active Service. It supports both [Docker links +compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see +[makeLinkVariables](http://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, +where the Service name is upper-cased and dashes are converted to underscores. + +For example, the Service `"redis-master"` which exposes TCP port 6379 and has been +allocated cluster IP address 10.0.0.11 produces the following environment +variables: + +```shell +REDIS_MASTER_SERVICE_HOST=10.0.0.11 +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_PORT=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP_PROTO=tcp +REDIS_MASTER_PORT_6379_TCP_PORT=6379 +REDIS_MASTER_PORT_6379_TCP_ADDR=10.0.0.11 +``` + +{{< note >}} +When you have a Pod that might need to acccess a Service, and you are using +the environment variable method to publish the port and cluster IP to the client +Pods, you must create the Service *before* the client Pods come into existence. +Otherwise, those client Pods won't have their environment variables populated. + +If you only use DNS to discover the cluster IP for a Service, you don't need to +worry about this ordering issue. +{{< /note >}} + +### DNS + +You can (and almost always should) set up a DNS service for your Kubernetes +cluster using an [add-on](/docs/concepts/cluster-administration/addons/). + +A cluster-aware DNS server, such as CoreDNS, watches the Kubernetes API for new +Services and creates a set of DNS records for each one. If DNS has been enabled +throughout your cluster then all Pods should automatically be able to resolve +Services by their DNS name. + +For example, if you have a Service called `"my-service"` in a Kubernetes +Namespace `"my-ns"`, the control plane and the DNS service acting together will +create a DNS record for `"my-service.my-ns"`. Pods in the `"my-ns"` Namespace +should be able to find it by simply doing a name lookup for `my-service` +(`"my-service.my-ns"` would also work). + +Pods in other Namespaces must qualify the name as `my-service.my-ns`. These names +will resolve to the cluster IP assigned for the Service. + +Kubernetes also supports DNS SRV (service) records for named ports. If the +`"my-service.my-ns"` Service has a port named `"http"` with protocol set to +`TCP`, you can do a DNS SRV query for `_http._tcp.my-service.my-ns` to discover +the port number for `"http"`, as well as the IP address. + +The Kubernetes DNS server is the only way to access `ExternalName` Services. +You can find more information about `ExternalName` resolution in +[DNS Pods and Services](/docs/concepts/services-networking/dns-pod-service/). + +## Headless services + +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create what are termed “headless” Services, by explicitly +specifying `"None"` for the cluster IP (`.spec.clusterIP`). + +You can use a headless Service to interface with other service discovery mechanisms, +without being tied to Kubernetes' implementation. For example, you could implement +a custom [Operator]( +be built upon this API. + +For such `Services`, a cluster IP is not allocated, kube-proxy does not handle +these services, and there is no load balancing or proxying done by the platform +for them. How DNS is automatically configured depends on whether the service has +selectors defined. + +### With selectors + +For headless services that define selectors, the endpoints controller creates +`Endpoints` records in the API, and modifies the DNS configuration to return A +records (addresses) that point directly to the `Pods` backing the `Service`. + +### Without selectors + +For headless services that do not define selectors, the endpoints controller does +not create `Endpoints` records. However, the DNS system looks for and configures +either: + + * CNAME records for [`ExternalName`](#externalname)-type services. + * A records for any `Endpoints` that share a name with the service, for all + other types. + +## Publishing services (ServiceTypes) {#publishing-services-service-types} + +For some parts of your application (e.g. frontends) you may want to expose a +Service onto an external IP address, one that's outside of your cluster. + +Kubernetes `ServiceTypes` allow you to specify what kind of service you want. +The default is `ClusterIP`. + +`Type` values and their behaviors are: + + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the + default `ServiceType`. + * [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port + (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will + route, is automatically created. You'll be able to contact the `NodePort` service, + from outside the cluster, + by requesting `:`. + * [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud + provider's load balancer. `NodePort` and `ClusterIP` services, to which the external + load balancer will route, are automatically created. + * [`ExternalName`](#externalname): Maps the service to the contents of the + `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record + with its value. No proxying of any kind is set up. + +{{< note >}} + +You need CoreDNS version 1.7 or higher to use the `ExternalName` type. + +{{< /note >}} + +### Type NodePort {#nodeport} + +If you set the `type` field to `NodePort`, the Kubernetes control plane will +allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767). +Each node will proxy that port each (the same port number on every Node) into your Service. +Your service will report that allocated port in its `.spec.ports[*].nodePort` field. + + +If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s); this is supported since Kubernetes v1.10. +This flag takes a comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 192.0.2.0/25) to specify IP address ranges that kube-proxy should consider as local to this node. + +For example, if you start kube-proxy with flag `--nodeport-addresses=127.0.0.0/8`, kube-proxy will select only the loopback interface for NodePort Services. The default for `--nodeport-addresses` is an empty list, and means that kube-proxy should consider all available network interfaces for NodePort. (That's also compatible with earlier Kubernetes releases). + +If you want a specific port number, you can specify a value in the `nodePort` +field. The control plane will either allocate you that port or report that +the API transaction failed. +This means that you need to take care about possible port collisions yourself). +You also have to use a valid port number, one that's inside the range configured +for NodePort use. + +Using a NodePort gives you the freedom to set up your own load balancing solution, +to configure environments that are not fully supported by Kubernetes, or even +to just expose one or more nodes' IPs directly. + +Note that this Service will be visible as both `:spec.ports[*].nodePort` +and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) + +### Type LoadBalancer {#loadbalancer} + +On cloud providers which support external load balancers, setting the `type` +field to `LoadBalancer` will provision a load balancer for your Service. +The actual creation of the load balancer happens asynchronously, and +information about the provisioned balancer will be published in the Service's +`.status.loadBalancer` field. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + clusterIP: 10.0.171.239 + loadBalancerIP: 78.11.24.19 + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 146.148.47.155 +``` + +Traffic from the external load balancer will be directed at the backend Pods, +though exactly how that works depends on the cloud provider. + +Some cloud providers allow you to specify the `loadBalancerIP`. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +the loadBalancer will be set up with an ephemeral IP address. If you specify a `loadBalancerIP` +but your cloud provider does not support the feature, the `loadbalancerIP` field that you +set will be ignored. + +{{< note >}} +If you're using SCTP, see the [caveat](#caveat-sctp-loadbalancer-service-type) below about the +`LoadBalancer` Service type. +{{< /note >}} + +{{< note >}} + +On **Azure**, if you want to use a user-specified public type `loadBalancerIP`, you first need +to create a static type public IP address resource. This public IP address resource should +be in the same resource group of the other automatically created resources of the cluster. +For example, `MC_myResourceGroup_myAKSCluster_eastus`. + +Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357). + +{{< /note >}} + +#### Internal load balancer +In a mixed environment it is sometimes necessary to route traffic from services inside the same +(virtual) network address block. + +In a split-horizon DNS environment you would need two services to be able to route both external and internal traffic to your endpoints. + +You can achieve this by adding one the following annotations to a Service. +The annotation to add depends on the cloud service provider you're using. + +{{< tabs name="service_tabs" >}} +{{% tab name="Default" %}} +Select one of the tabs. +{{% /tab %}} +{{% tab name="GCP" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + cloud.google.com/load-balancer-type: "Internal" +[...] +``` +Use `cloud.google.com/load-balancer-type: "internal"` for masters with version 1.7.0 to 1.7.3. +For more information, see the [docs](https://cloud.google.com/kubernetes-engine/docs/internal-load-balancing). +{{% /tab %}} +{{% tab name="AWS" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +[...] +``` +{{% /tab %}} +{{% tab name="Azure" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +[...] +``` +{{% /tab %}} +{{% tab name="OpenStack" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" +[...] +``` +{{% /tab %}} +{{% tab name="Baidu Cloud" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" +[...] +``` +{{% /tab %}} +{{< /tabs >}} + + +#### TLS support on AWS {#ssl-support-on-aws} + +For partial TLS / SSL support on clusters running on AWS, you can add three +annotations to a `LoadBalancer` service: + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 +``` + +The first specifies the ARN of the certificate to use. It can be either a +certificate from a third party issuer that was uploaded to IAM or one created +within AWS Certificate Manager. + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp) +``` + +The second annotation specifies which protocol a Pod speaks. For HTTPS and +SSL, the ELB will expect the Pod to authenticate itself over the encrypted +connection, using a certificate. + +HTTP and HTTPS will select layer 7 proxying: the ELB will terminate +the connection with the user, parse headers and inject the `X-Forwarded-For` +header with the user's IP address (Pods will only see the IP address of the +ELB at the other end of its connection) when forwarding requests. + +TCP and SSL will select layer 4 proxying: the ELB will forward traffic without +modifying the headers. + +In a mixed-use environment where some ports are secured and others are left unencrypted, +you can use the following annotations: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443" +``` + +In the above example, if the service contained three ports, `80`, `443`, and +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just +be proxied HTTP. + +From Kubernetes v1.9 onwrds you can use [predefined AWS SSL policies](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) with HTTPS or SSL listeners for your Services. +To see which policies are available for use, you can the `aws` command line tool: + +```bash +aws elb describe-load-balancer-policies --query 'PolicyDescriptions[].PolicyName' +``` + +You can then specify any one of those policies using the +"`service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`" +annotation; for example: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" +``` + +#### PROXY protocol support on AWS + +To enable [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) +support for clusters running on AWS, you can use the following service +annotation: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" +``` + +Since version 1.3.0, the use of this annotation applies to all ports proxied by the ELB +and cannot be configured otherwise. + +#### ELB Access Logs on AWS + +There are several annotations to manage access logs for ELB services on AWS. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` +controls whether access logs are enabled. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval` +controls the interval in minutes for publishing the access logs. You can specify +an interval of either 5 or 60 minutes. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` +controls the name of the Amazon S3 bucket where load balancer access logs are +stored. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` +specifies the logical hierarchy you created for your Amazon S3 bucket. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" + # Specifies whether access logs are enabled for the load balancer + service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" + # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" + # The name of the Amazon S3 bucket where the access logs are stored + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" + # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` +``` + +#### Connection Draining on AWS + +Connection draining for Classic ELBs can be managed with the annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled` set +to the value of `"true"`. The annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout` can +also be used to set maximum time, in seconds, to keep the existing connections open before deregistering the instances. + + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60" +``` + +#### Other ELB annotations + +There are other annotations to manage Classic Elastic Load Balancers that are described below. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" + # The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer + + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # Specifies whether cross-zone load balancing is enabled for the load balancer + + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops" + # A comma-separated list of key-value pairs which will be recorded as + # additional tags in the ELB. + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: "" + # The number of successive successful health checks required for a backend to + # be considered healthy for traffic. Defaults to 2, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3" + # The number of unsuccessful health checks required for a backend to be + # considered unhealthy for traffic. Defaults to 6, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20" + # The approximate interval, in seconds, between health checks of an + # individual instance. Defaults to 10, must be between 5 and 300 + service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5" + # The amount of time, in seconds, during which no response means a failed + # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval + # value. Defaults to 5, must be between 2 and 60 + + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" + # A list of additional security groups to be added to the ELB +``` + +#### Network Load Balancer support on AWS + +{{< feature-state for_k8s_version="v1.15" state="beta" >}} + +To use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` with the value set to `nlb`. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +{{< note >}} +NLB only works with certain instance classes; see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets) +on Elastic Load Balancing for a list of supported instance types. +{{< /note >}} + +Unlike Classic Elastic Load Balancers, Network Load Balancers (NLBs) forward the +client's IP address through to the node. If a service's `.spec.externalTrafficPolicy` +is set to `Cluster`, the client's IP address will not be propagated to the end +pods. + +By setting `.spec.externalTrafficPolicy` to `Local`, client IP addresses will be +propagated to the end pods, but this could result in uneven distribution of +traffic. Nodes without any pods for a particular LoadBalancer service will fail +the NLB Target Group's health check on the auto-assigned +`.spec.healthCheckNodePort` and not receive any traffic. + +In order to achieve even traffic, either use a DaemonSet, or specify a +[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) +to not locate on the same node. + +You can also use NLB Services with the [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer) +annotation. + +In order for client traffic to reach instances behind an NLB, the Node security +groups are modified with the following IP rules: + +| Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | +|------|----------|---------|------------|---------------------| +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | +| MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | + +In order to limit which client IP's can access the Network Load Balancer, +specify `loadBalancerSourceRanges`. + +```yaml +spec: + loadBalancerSourceRanges: + - "143.231.0.0/16" +``` + +{{< note >}} +If `.spec.loadBalancerSourceRanges` is not set, Kubernetes will +allow traffic from `0.0.0.0/0` to the Node Security Group(s). If nodes have +public IP addresses, be aware that non-NLB traffic can also reach all instances +in those modified security groups. + +{{< /note >}} + +### Type ExternalName {#externalname} + +Services of type ExternalName map a service to a DNS name, not to a typical selector such as +`my-service` or `cassandra`. You specify these services with the `spec.externalName` parameter. + +This Service definition, for example, maps +the `my-service` Service in the `prod` namespace to `my.database.example.com`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: prod +spec: + type: ExternalName + externalName: my.database.example.com +``` +{{< note >}} +ExternalName accepts an IPv4 address string, but as a DNS names comprised of digits, not as an IP address. ExternalNames that resemble IPv4 addresses are not resolved by CoreDNS or ingress-nginx because ExternalName +is intended to specify a canonical DNS name. To hardcode an IP address, consider using +[headless services](#headless-services). +{{< /note >}} + +When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS service +will return a `CNAME` record with the value `my.database.example.com`. Accessing +`my-service` works in the same way as other Services but with the crucial +difference that redirection happens at the DNS level rather than via proxying or +forwarding. Should you later decide to move your database into your cluster, you +can start its pods, add appropriate selectors or endpoints, and change the +Service's `type`. + + +{{< note >}} +This section is indebted to the [Kubernetes Tips - Part +1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/). +{{< /note >}} + +### External IPs + +If there are external IPs that route to one or more cluster nodes, Kubernetes services can be exposed on those +`externalIPs`. Traffic that ingresses into the cluster with the external IP (as destination IP), on the service port, +will be routed to one of the service endpoints. `externalIPs` are not managed by Kubernetes and are the responsibility +of the cluster administrator. + +In the Service spec, `externalIPs` can be specified along with any of the `ServiceTypes`. +In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`" (`externalIP:port`) + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + externalIPs: + - 80.11.12.10 +``` + +## Shortcomings + +Using the userspace proxy for VIPs will work at small to medium scale, but will +not scale to very large clusters with thousands of Services. The [original +design proposal for portals](http://issue.k8s.io/1107) has more details on +this. + +Using the userspace proxy obscures the source IP address of a packet accessing +a Service. +This makes some kinds of network filtering (firewalling) impossible. The iptables +proxy mode does not +obscure in-cluster source IPs, but it does still impact clients coming through +a load balancer or node-port. + +The `Type` field is designed as nested functionality - each level adds to the +previous. This is not strictly required on all cloud providers (e.g. Google Compute Engine does +not need to allocate a `NodePort` to make `LoadBalancer` work, but AWS does) +but the current API requires it. + +## Virtual IP implementation {#the-gory-details-of-virtual-ips} + +The previous information should be sufficient for many people who just want to +use Services. However, there is a lot going on behind the scenes that may be +worth understanding. + +### Avoiding collisions + +One of the primary philosophies of Kubernetes is that you should not be +exposed to situations that could cause your actions to fail through no fault +of your own. For the design of the Service resource, this means not making +you choose your own port number for a if that choice might collide with +someone else's choice. That is an isolation failure. + +In order to allow you to choose a port number for your Services, we must +ensure that no two Services can collide. Kubernetes does that by allocating each +Service its own IP address. + +To ensure each service receives a unique IP, an internal allocator atomically +updates a global allocation map in {{< glossary_tooltip term_id="etcd" >}} +prior to creating each Service. The map object must exist in the registry for +Services to get IP address assignments, otherwise creations will +fail with a message indicating an IP address could not be allocated. + +In the control plane, a background controller is responsible for creating that +map (needed to support migrating from older versions of Kubernetes that used +in-memory locking). Kubernetes also uses controllers to checking for invalid +assignments (eg due to administrator intervention) and for cleaning up allocated +IP addresses that are no longer used by any Services. + +### Service IP addresses {#ips-and-vips} + +Unlike Pod IP addresses, which actually route to a fixed destination, +Service IPs are not actually answered by a single host. Instead, kube-proxy +uses iptables (packet processing logic in Linux) to define _virtual_ IP addresses +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for Services are actually populated in +terms of the Service's virtual IP address (and port). + +kube-proxy supports three proxy modes—userspace, iptables and IPVS—which +each operate slightly differently. + +#### Userspace + +As an example, consider the image processing application described above. +When the backend Service is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it opens a new random port, establishes an +iptables redirect from the virtual IP address to this new port, and starts accepting +connections on it. + +When a client connects to the Service's virtual IP address, the iptables +rule kicks in, and redirects the packets to the proxy's own port. +The “Service proxy” chooses a backend, and starts proxying traffic from the client to the backend. + +This means that Service owners can choose any port they want without risk of +collision. Clients can simply connect to an IP and port, without being aware +of which Pods they are actually accessing. + +#### iptables + +Again, consider the image processing application described above. +When the backend Service is created, the Kubernetes control plane assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it installs a series of iptables rules which +redirect from the virtual IP address to per-Service rules. The per-Service +rules link to per-Endpoint rules which redirect traffic (using destination NAT) +to the backends. + +When a client connects to the Service's virtual IP address the iptables rule kicks in. +A backend is chosen (either based on session affinity or randomly) and packets are +redirected to the backend. Unlike the userspace proxy, packets are never +copied to userspace, the kube-proxy does not have to be running for the virtual +IP address to work, and Nodes see traffic arriving from the unaltered client IP +address. + +This same basic flow executes when traffic comes in through a node-port or +through a load-balancer, though in those cases the client IP does get altered. + +#### IPVS + +iptables operations slow down dramatically in large scale cluster e.g 10,000 Services. +IPVS is designed for load balancing and based on in-kernel hash tables. So you can achieve performance consistency in large number of services from IPVS-based kube-proxy. Meanwhile, IPVS-based kube-proxy has more sophisticated load balancing algorithms (least conns, locality, weighted, persistence). + +## API Object + +Service is a top-level resource in the Kubernetes REST API. You can find more details +about the API object at: [Service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core). + +## Supported protocols {#protocol-support} + +### TCP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use TCP for any kind of service, and it's the default network protocol. + +### UDP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use UDP for most services. For type=LoadBalancer services, UDP support +depends on the cloud provider offering this facility. + +### HTTP + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it, you can use a Service in LoadBalancer mode +to set up external HTTP / HTTPS reverse proxying, forwarded to the Endpoints +of the Service. + +{{< note >}} +You can also use {{< glossary_tooltip term_id="ingress" >}} in place of Service +to expose HTTP / HTTPS services. +{{< /note >}} + +### PROXY protocol + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it (eg, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +you can use a Service in LoadBalancer mode to configure a load balancer outside +of Kubernetes itself, that will forward connections prefixed with +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). + +The load balancer will send an initial series of octets describing the +incoming connection, similar to this example + +``` +PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n +``` +followed by the data from the client. + +### SCTP + +{{< feature-state for_k8s_version="v1.12" state="alpha" >}} + +Kubernetes supports SCTP as a `protocol` value in Service, Endpoint, NetworkPolicy and Pod definitions as an alpha feature. To enable this feature, the cluster administrator needs to enable the `SCTPSupport` feature gate on the apiserver, for example, `--feature-gates=SCTPSupport=true,…`. + +When the feature gate is enabled, you can set the `protocol` field of a Service, Endpoint, NetworkPolicy or Pod to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections. + +#### Warnings {#caveat-sctp-overview} + +##### Support for multihomed SCTP associations {#caveat-sctp-multihomed} + +{{< warning >}} +The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a Pod. + +NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules. +{{< /warning >}} + +##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type} + +{{< warning >}} +You can only create a Service with `type` LoadBalancer plus `protocol` SCTP if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise, the Service creation request is rejected. The current set of cloud load balancer providers (Azure, AWS, CloudStack, GCE, OpenStack) all lack support for SCTP. +{{< /warning >}} + +##### Windows {#caveat-sctp-windows-os} + +{{< warning >}} +SCTP is not supported on Windows based nodes. +{{< /warning >}} + +##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace} + +{{< warning >}} +The kube-proxy does not support the management of SCTP associations when it is in userspace mode. +{{< /warning >}} + +## Future work + +In the future, the proxy policy for Services can become more nuanced than +simple round-robin balancing, for example master-elected or sharded. We also +envision that some Services will have "real" load balancers, in which case the +virtual IP address will simply transport the packets there. + +The Kubernetes project intends to improve support for L7 (HTTP) Services. + +The Kubernetes project intends to have more flexible ingress modes for Services +which encompass the current ClusterIP, NodePort, and LoadBalancer modes and more. + + +{{% /capture %}} + +{{% capture whatsnext %}} + +* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) +* Read about [Ingress](/docs/concepts/services-networking/ingress/) + +{{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/service_BASE_16221.md b/content/en/docs/concepts/services-networking/service_BASE_16221.md new file mode 100644 index 0000000000000..a6bc859b79cab --- /dev/null +++ b/content/en/docs/concepts/services-networking/service_BASE_16221.md @@ -0,0 +1,1021 @@ +--- +reviewers: +- bprashanth +title: Services +feature: + title: Service discovery and load balancing + description: > + No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives containers their own IP addresses and a single DNS name for a set of containers, and can load-balance across them. + +content_template: templates/concept +weight: 10 +--- + + +{{% capture overview %}} + +Kubernetes [`Pods`](/docs/concepts/workloads/pods/pod/) are mortal. They are born and when they die, they +are not resurrected. [`ReplicaSets`](/docs/concepts/workloads/controllers/replicaset/) in +particular create and destroy `Pods` dynamically (e.g. when scaling out or in). While each `Pod` gets its own IP address, even +those IP addresses cannot be relied upon to be stable over time. This leads to +a problem: if some set of `Pods` (let's call them backends) provides +functionality to other `Pods` (let's call them frontends) inside the Kubernetes +cluster, how do those frontends find out and keep track of which backends are +in that set? + +Enter `Services`. + +A Kubernetes `Service` is an abstraction which defines a logical set of `Pods` +and a policy by which to access them - sometimes called a micro-service. The +set of `Pods` targeted by a `Service` is (usually) determined by a [`Label +Selector`](/docs/concepts/overview/working-with-objects/labels/#label-selectors) (see below for why you might want a +`Service` without a selector). + +As an example, consider an image-processing backend which is running with 3 +replicas. Those replicas are fungible - frontends do not care which backend +they use. While the actual `Pods` that compose the backend set may change, the +frontend clients should not need to be aware of that or keep track of the list +of backends themselves. The `Service` abstraction enables this decoupling. + +For Kubernetes-native applications, Kubernetes offers a simple `Endpoints` API +that is updated whenever the set of `Pods` in a `Service` changes. For +non-native applications, Kubernetes offers a virtual-IP-based bridge to Services +which redirects to the backend `Pods`. + +{{% /capture %}} + +{{% capture body %}} + +## Defining a service + +A `Service` in Kubernetes is a REST object, similar to a `Pod`. Like all of the +REST objects, a `Service` definition can be POSTed to the apiserver to create a +new instance. For example, suppose you have a set of `Pods` that each expose +port 9376 and carry a label `"app=MyApp"`. + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +This specification will create a new `Service` object named "my-service" which +targets TCP port 9376 on any `Pod` with the `"app=MyApp"` label. This `Service` +will also be assigned an IP address (sometimes called the "cluster IP"), which +is used by the service proxies (see below). The `Service`'s selector will be +evaluated continuously and the results will be POSTed to an `Endpoints` object +also named "my-service". + +Note that a `Service` can map an incoming port to any `targetPort`. By default +the `targetPort` will be set to the same value as the `port` field. Perhaps +more interesting is that `targetPort` can be a string, referring to the name of +a port in the backend `Pods`. The actual port number assigned to that name can +be different in each backend `Pod`. This offers a lot of flexibility for +deploying and evolving your `Services`. For example, you can change the port +number that pods expose in the next version of your backend software, without +breaking clients. + +`TCP` is the default protocol for services, and you can also use any other +[supported protocol](#protocol-support). At the moment, you can only set a +single `port` and `protocol` for a Service. + +### Services without selectors + +Services generally abstract access to Kubernetes `Pods`, but they can also +abstract other kinds of backends. For example: + + * You want to have an external database cluster in production, but in test + you use your own databases. + * You want to point your service to a service in another + [`Namespace`](/docs/concepts/overview/working-with-objects/namespaces/) or on another cluster. + * You are migrating your workload to Kubernetes and some of your backends run + outside of Kubernetes. + +In any of these scenarios you can define a service without a selector: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +Because this service has no selector, the corresponding `Endpoints` object will not be +created. You can manually map the service to your own specific endpoints: + +```yaml +kind: Endpoints +apiVersion: v1 +metadata: + name: my-service +subsets: + - addresses: + - ip: 1.2.3.4 + ports: + - port: 9376 +``` + +{{< note >}} +The endpoint IPs may not be loopback (127.0.0.0/8), link-local +(169.254.0.0/16), or link-local multicast (224.0.0.0/24). They cannot be the +cluster IPs of other Kubernetes services either because the `kube-proxy` +component doesn't support virtual IPs as destination yet. +{{< /note >}} + +Accessing a `Service` without a selector works the same as if it had a selector. +The traffic will be routed to endpoints defined by the user (`1.2.3.4:9376` in +this example). + +An ExternalName service is a special case of service that does not have +selectors and uses DNS names instead. For more information, see the +[ExternalName](#externalname) section later in this document. + +## Virtual IPs and service proxies + +Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is +responsible for implementing a form of virtual IP for `Services` of type other +than [`ExternalName`](#externalname). + +In Kubernetes v1.0, `Services` are a "layer 4" (TCP/UDP over IP) construct, the +proxy was purely in userspace. In Kubernetes v1.1, the `Ingress` API was added +(beta) to represent "layer 7"(HTTP) services, iptables proxy was added too, +and became the default operating mode since Kubernetes v1.2. In Kubernetes v1.8.0-beta.0, +ipvs proxy was added. + +### Proxy-mode: userspace + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of `Service` and `Endpoints` objects. For each `Service` it opens a +port (randomly chosen) on the local node. Any connections to this "proxy port" +will be proxied to one of the `Service`'s backend `Pods` (as reported in +`Endpoints`). Which backend `Pod` to use is decided based on the +`SessionAffinity` of the `Service`. Lastly, it installs iptables rules which +capture traffic to the `Service`'s `clusterIP` (which is virtual) and `Port` +and redirects that traffic to the proxy port which proxies the backend `Pod`. +By default, the choice of backend is round robin. + +![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) + +### Proxy-mode: iptables + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of `Service` and `Endpoints` objects. For each `Service`, it installs +iptables rules which capture traffic to the `Service`'s `clusterIP` (which is +virtual) and `Port` and redirects that traffic to one of the `Service`'s +backend sets. For each `Endpoints` object, it installs iptables rules which +select a backend `Pod`. By default, the choice of backend is random. + +Obviously, iptables need not switch back between userspace and kernelspace, it should be +faster and more reliable than the userspace proxy. However, unlike the +userspace proxier, the iptables proxier cannot automatically retry another +`Pod` if the one it initially selects does not respond, so it depends on +having working [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#defining-readiness-probes). + +![Services overview diagram for iptables proxy](/images/docs/services-iptables-overview.svg) + +### Proxy-mode: ipvs + +{{< feature-state for_k8s_version="v1.9" state="beta" >}} + +In this mode, kube-proxy watches Kubernetes Services and Endpoints, +calls `netlink` interface to create ipvs rules accordingly and syncs ipvs rules with Kubernetes +Services and Endpoints periodically, to make sure ipvs status is +consistent with the expectation. When Service is accessed, traffic will +be redirected to one of the backend Pods. + +Similar to iptables, Ipvs is based on netfilter hook function, but uses hash +table as the underlying data structure and works in the kernel space. +That means ipvs redirects traffic much faster, and has much +better performance when syncing proxy rules. Furthermore, ipvs provides more +options for load balancing algorithm, such as: + +- `rr`: round-robin +- `lc`: least connection +- `dh`: destination hashing +- `sh`: source hashing +- `sed`: shortest expected delay +- `nq`: never queue + +{{< note >}} +ipvs mode assumes IPVS kernel modules are installed on the node +before running kube-proxy. When kube-proxy starts with ipvs proxy mode, +kube-proxy would validate if IPVS modules are installed on the node, if +it's not installed kube-proxy will fall back to iptables proxy mode. +{{< /note >}} + +![Services overview diagram for ipvs proxy](/images/docs/services-ipvs-overview.svg) + +In any of these proxy model, any traffic bound for the Service’s IP:Port is +proxied to an appropriate backend without the clients knowing anything +about Kubernetes or Services or Pods. Client-IP based session affinity +can be selected by setting `service.spec.sessionAffinity` to "ClientIP" +(the default is "None"), and you can set the max session sticky time by +setting the field `service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` +if you have already set `service.spec.sessionAffinity` to "ClientIP" +(the default is “10800”). + +## Multi-Port Services + +Many `Services` need to expose more than one port. For this case, Kubernetes +supports multiple port definitions on a `Service` object. When using multiple +ports you must give all of your ports names, so that endpoints can be +disambiguated. For example: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + - name: https + protocol: TCP + port: 443 + targetPort: 9377 +``` + +Note that the port names must only contain lowercase alphanumeric characters and `-`, and must begin & end with an alphanumeric character. `123-abc` and `web` are valid, but `123_abc` and `-web` are not valid names. + +## Choosing your own IP address + +You can specify your own cluster IP address as part of a `Service` creation +request. To do this, set the `.spec.clusterIP` field. For example, if you +already have an existing DNS entry that you wish to reuse, or legacy systems +that are configured for a specific IP address and difficult to re-configure. +The IP address that a user chooses must be a valid IP address and within the +`service-cluster-ip-range` CIDR range that is specified by flag to the API +server. If the IP address value is invalid, the apiserver returns a 422 HTTP +status code to indicate that the value is invalid. + +### Why not use round-robin DNS? + +A question that pops up every now and then is why we do all this stuff with +virtual IPs rather than just use standard round-robin DNS. There are a few +reasons: + + * There is a long history of DNS libraries not respecting DNS TTLs and + caching the results of name lookups. + * Many apps do DNS lookups once and cache the results. + * Even if apps and libraries did proper re-resolution, the load of every + client re-resolving DNS over and over would be difficult to manage. + +We try to discourage users from doing things that hurt themselves. That said, +if enough people ask for this, we may implement it as an alternative. + +## Discovering services + +Kubernetes supports 2 primary modes of finding a `Service` - environment +variables and DNS. + +### Environment variables + +When a `Pod` is run on a `Node`, the kubelet adds a set of environment variables +for each active `Service`. It supports both [Docker links +compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see +[makeLinkVariables](http://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, +where the Service name is upper-cased and dashes are converted to underscores. + +For example, the Service `"redis-master"` which exposes TCP port 6379 and has been +allocated cluster IP address 10.0.0.11 produces the following environment +variables: + +```shell +REDIS_MASTER_SERVICE_HOST=10.0.0.11 +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_PORT=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP_PROTO=tcp +REDIS_MASTER_PORT_6379_TCP_PORT=6379 +REDIS_MASTER_PORT_6379_TCP_ADDR=10.0.0.11 +``` + +*This does imply an ordering requirement* - any `Service` that a `Pod` wants to +access must be created before the `Pod` itself, or else the environment +variables will not be populated. DNS does not have this restriction. + +### DNS + +An optional (though strongly recommended) [cluster +add-on](/docs/concepts/cluster-administration/addons/) is a DNS server. The +DNS server watches the Kubernetes API for new `Services` and creates a set of +DNS records for each. If DNS has been enabled throughout the cluster then all +`Pods` should be able to do name resolution of `Services` automatically. + +For example, if you have a `Service` called `"my-service"` in a Kubernetes +`Namespace` called `"my-ns"`, a DNS record for `"my-service.my-ns"` is created. `Pods` +which exist in the `"my-ns"` `Namespace` should be able to find it by simply doing +a name lookup for `"my-service"`. `Pods` which exist in other `Namespaces` must +qualify the name as `"my-service.my-ns"`. The result of these name lookups is the +cluster IP. + +Kubernetes also supports DNS SRV (service) records for named ports. If the +`"my-service.my-ns"` `Service` has a port named `"http"` with protocol `TCP`, you +can do a DNS SRV query for `"_http._tcp.my-service.my-ns"` to discover the port +number for `"http"`. + +The Kubernetes DNS server is the only way to access services of type +`ExternalName`. More information is available in the [DNS Pods and +Services](/docs/concepts/services-networking/dns-pod-service/). + +## Headless services + +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create "headless" services by specifying `"None"` for the +cluster IP (`.spec.clusterIP`). + +This option allows developers to reduce coupling to the Kubernetes system by +allowing them freedom to do discovery their own way. Applications can still use +a self-registration pattern and adapters for other discovery systems could easily +be built upon this API. + +For such `Services`, a cluster IP is not allocated, kube-proxy does not handle +these services, and there is no load balancing or proxying done by the platform +for them. How DNS is automatically configured depends on whether the service has +selectors defined. + +### With selectors + +For headless services that define selectors, the endpoints controller creates +`Endpoints` records in the API, and modifies the DNS configuration to return A +records (addresses) that point directly to the `Pods` backing the `Service`. + +### Without selectors + +For headless services that do not define selectors, the endpoints controller does +not create `Endpoints` records. However, the DNS system looks for and configures +either: + + * CNAME records for [`ExternalName`](#externalname)-type services. + * A records for any `Endpoints` that share a name with the service, for all + other types. + +## Publishing services - service types + +For some parts of your application (e.g. frontends) you may want to expose a +Service onto an external (outside of your cluster) IP address. + + +Kubernetes `ServiceTypes` allow you to specify what kind of service you want. +The default is `ClusterIP`. + +`Type` values and their behaviors are: + + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the + default `ServiceType`. + * [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port + (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will + route, is automatically created. You'll be able to contact the `NodePort` service, + from outside the cluster, + by requesting `:`. + * [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud + provider's load balancer. `NodePort` and `ClusterIP` services, to which the external + load balancer will route, are automatically created. + * [`ExternalName`](#externalname): Maps the service to the contents of the + `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record + with its value. No proxying of any kind is set up. This requires version 1.7 or + higher of `kube-dns`. + +### Type NodePort {#nodeport} + +If you set the `type` field to `NodePort`, the Kubernetes master will +allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767), and each +Node will proxy that port (the same port number on every Node) into your `Service`. +That port will be reported in your `Service`'s `.spec.ports[*].nodePort` field. + +If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s) (which is supported since Kubernetes v1.10). A comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 1.2.3.4/32) is used to filter addresses local to this node. For example, if you start kube-proxy with flag `--nodeport-addresses=127.0.0.0/8`, kube-proxy will select only the loopback interface for NodePort Services. The `--nodeport-addresses` is defaulted to empty (`[]`), which means select all available interfaces and is in compliance with current NodePort behaviors. + +If you want a specific port number, you can specify a value in the `nodePort` +field, and the system will allocate you that port or else the API transaction +will fail (i.e. you need to take care about possible port collisions yourself). +The value you specify must be in the configured range for node ports. + +This gives developers the freedom to set up their own load balancers, to +configure environments that are not fully supported by Kubernetes, or +even to just expose one or more nodes' IPs directly. + +Note that this Service will be visible as both `:spec.ports[*].nodePort` +and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) + +### Type LoadBalancer {#loadbalancer} + +On cloud providers which support external load balancers, setting the `type` +field to `LoadBalancer` will provision a load balancer for your `Service`. +The actual creation of the load balancer happens asynchronously, and +information about the provisioned balancer will be published in the `Service`'s +`.status.loadBalancer` field. For example: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + clusterIP: 10.0.171.239 + loadBalancerIP: 78.11.24.19 + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 146.148.47.155 +``` + +Traffic from the external load balancer will be directed at the backend `Pods`, +though exactly how that works depends on the cloud provider. Some cloud providers allow +the `loadBalancerIP` to be specified. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +an ephemeral IP will be assigned to the loadBalancer. If the `loadBalancerIP` is specified, but the +cloud provider does not support the feature, the field will be ignored. + +**Special notes for Azure**: To use user-specified public type `loadBalancerIP`, a static type +public IP address resource needs to be created first, and it should be in the same resource +group of the other automatically created resources of the cluster. For example, `MC_myResourceGroup_myAKSCluster_eastus`. Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357). + +{{< note >}} +The support of SCTP in the cloud provider's load balancer is up to the cloud provider's +load balancer implementation. If SCTP is not supported by the cloud provider's load balancer the +Service creation request is accepted but the creation of the load balancer fails. +{{< /note >}} + +#### Internal load balancer +In a mixed environment it is sometimes necessary to route traffic from services inside the same VPC. + +In a split-horizon DNS environment you would need two services to be able to route both external and internal traffic to your endpoints. + +This can be achieved by adding the following annotations to the service based on cloud provider. + +{{< tabs name="service_tabs" >}} +{{% tab name="Default" %}} +Select one of the tabs. +{{% /tab %}} +{{% tab name="GCP" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + cloud.google.com/load-balancer-type: "Internal" +[...] +``` +Use `cloud.google.com/load-balancer-type: "internal"` for masters with version 1.7.0 to 1.7.3. +For more information, see the [docs](https://cloud.google.com/kubernetes-engine/docs/internal-load-balancing). +{{% /tab %}} +{{% tab name="AWS" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +[...] +``` +{{% /tab %}} +{{% tab name="Azure" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +[...] +``` +{{% /tab %}} +{{% tab name="OpenStack" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" +[...] +``` +{{% /tab %}} +{{% tab name="Baidu Cloud" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" +[...] +``` +{{% /tab %}} +{{< /tabs >}} + + +#### SSL support on AWS +For partial SSL support on clusters running on AWS, starting with 1.3 three +annotations can be added to a `LoadBalancer` service: + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 +``` + +The first specifies the ARN of the certificate to use. It can be either a +certificate from a third party issuer that was uploaded to IAM or one created +within AWS Certificate Manager. + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp) +``` + +The second annotation specifies which protocol a pod speaks. For HTTPS and +SSL, the ELB will expect the pod to authenticate itself over the encrypted +connection. + +HTTP and HTTPS will select layer 7 proxying: the ELB will terminate +the connection with the user, parse headers and inject the `X-Forwarded-For` +header with the user's IP address (pods will only see the IP address of the +ELB at the other end of its connection) when forwarding requests. + +TCP and SSL will select layer 4 proxying: the ELB will forward traffic without +modifying the headers. + +In a mixed-use environment where some ports are secured and others are left unencrypted, +the following annotations may be used: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443" +``` + +In the above example, if the service contained three ports, `80`, `443`, and +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just +be proxied HTTP. + +Beginning in 1.9, services can use [predefined AWS SSL policies](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) +for any HTTPS or SSL listeners. To see which policies are available for use, run +the awscli command: + +```bash +aws elb describe-load-balancer-policies --query 'PolicyDescriptions[].PolicyName' +``` + +Any one of those policies can then be specified using the +"`service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`" +annotation, for example: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" +``` + +#### PROXY protocol support on AWS + +To enable [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) +support for clusters running on AWS, you can use the following service +annotation: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" +``` + +Since version 1.3.0 the use of this annotation applies to all ports proxied by the ELB +and cannot be configured otherwise. + +#### ELB Access Logs on AWS + +There are several annotations to manage access logs for ELB services on AWS. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` +controls whether access logs are enabled. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval` +controls the interval in minutes for publishing the access logs. You can specify +an interval of either 5 or 60. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` +controls the name of the Amazon S3 bucket where load balancer access logs are +stored. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` +specifies the logical hierarchy you created for your Amazon S3 bucket. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" + # Specifies whether access logs are enabled for the load balancer + service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" + # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" + # The name of the Amazon S3 bucket where the access logs are stored + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" + # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` +``` + +#### Connection Draining on AWS + +Connection draining for Classic ELBs can be managed with the annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled` set +to the value of `"true"`. The annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout` can +also be used to set maximum time, in seconds, to keep the existing connections open before deregistering the instances. + + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60" +``` + +#### Other ELB annotations + +There are other annotations to manage Classic Elastic Load Balancers that are described below. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" + # The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer + + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # Specifies whether cross-zone load balancing is enabled for the load balancer + + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops" + # A comma-separated list of key-value pairs which will be recorded as + # additional tags in the ELB. + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: "" + # The number of successive successful health checks required for a backend to + # be considered healthy for traffic. Defaults to 2, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3" + # The number of unsuccessful health checks required for a backend to be + # considered unhealthy for traffic. Defaults to 6, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20" + # The approximate interval, in seconds, between health checks of an + # individual instance. Defaults to 10, must be between 5 and 300 + service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5" + # The amount of time, in seconds, during which no response means a failed + # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval + # value. Defaults to 5, must be between 2 and 60 + + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" + # A list of additional security groups to be added to ELB +``` + +#### Network Load Balancer support on AWS [alpha] + +{{< warning >}} +This is an alpha feature and not recommended for production clusters yet. +{{< /warning >}} + +Starting in version 1.9.0, Kubernetes supports Network Load Balancer (NLB). To +use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` +with the value set to `nlb`. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +Unlike Classic Elastic Load Balancers, Network Load Balancers (NLBs) forward the +client's IP through to the node. If a service's `.spec.externalTrafficPolicy` is +set to `Cluster`, the client's IP address will not be propagated to the end +pods. + +By setting `.spec.externalTrafficPolicy` to `Local`, client IP addresses will be +propagated to the end pods, but this could result in uneven distribution of +traffic. Nodes without any pods for a particular LoadBalancer service will fail +the NLB Target Group's health check on the auto-assigned +`.spec.healthCheckNodePort` and not receive any traffic. + +In order to achieve even traffic, either use a DaemonSet, or specify a +[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) +to not locate pods on the same node. + +NLB can also be used with the [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer) +annotation. + +In order for client traffic to reach instances behind an NLB, the Node security +groups are modified with the following IP rules: + +| Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | +|------|----------|---------|------------|---------------------| +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | +| MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | + +Be aware that if `.spec.loadBalancerSourceRanges` is not set, Kubernetes will +allow traffic from `0.0.0.0/0` to the Node Security Group(s). If nodes have +public IP addresses, be aware that non-NLB traffic can also reach all instances +in those modified security groups. + +In order to limit which client IP's can access the Network Load Balancer, +specify `loadBalancerSourceRanges`. + +```yaml +spec: + loadBalancerSourceRanges: + - "143.231.0.0/16" +``` + +{{< note >}} +NLB only works with certain instance classes, see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets) +for supported instance types. +{{< /note >}} + +### Type ExternalName {#externalname} + +Services of type ExternalName map a service to a DNS name, not to a typical selector such as +`my-service` or `cassandra`. You specify these services with the `spec.externalName` parameter. + +This Service definition, for example, maps +the `my-service` Service in the `prod` namespace to `my.database.example.com`: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service + namespace: prod +spec: + type: ExternalName + externalName: my.database.example.com +``` +{{< note >}} +ExternalName accepts an IPv4 address string, but as a DNS name comprised of digits, not as an IP address. ExternalNames that resemble IPv4 addresses are not resolved by CoreDNS or ingress-nginx because ExternalName +is intended to specify a canonical DNS name. To hardcode an IP address, consider headless services. +{{< /note >}} + +When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS service +will return a `CNAME` record with the value `my.database.example.com`. Accessing +`my-service` works in the same way as other Services but with the crucial +difference that redirection happens at the DNS level rather than via proxying or +forwarding. Should you later decide to move your database into your cluster, you +can start its pods, add appropriate selectors or endpoints, and change the +service's `type`. + +{{< note >}} +This section is indebted to the [Kubernetes Tips - Part +1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/). +{{< /note >}} + +### External IPs + +If there are external IPs that route to one or more cluster nodes, Kubernetes services can be exposed on those +`externalIPs`. Traffic that ingresses into the cluster with the external IP (as destination IP), on the service port, +will be routed to one of the service endpoints. `externalIPs` are not managed by Kubernetes and are the responsibility +of the cluster administrator. + +In the `ServiceSpec`, `externalIPs` can be specified along with any of the `ServiceTypes`. +In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`"" (`externalIP:port`) + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + externalIPs: + - 80.11.12.10 +``` + +## Shortcomings + +Using the userspace proxy for VIPs will work at small to medium scale, but will +not scale to very large clusters with thousands of Services. See [the original +design proposal for portals](http://issue.k8s.io/1107) for more details. + +Using the userspace proxy obscures the source-IP of a packet accessing a `Service`. +This makes some kinds of firewalling impossible. The iptables proxier does not +obscure in-cluster source IPs, but it does still impact clients coming through +a load-balancer or node-port. + +The `Type` field is designed as nested functionality - each level adds to the +previous. This is not strictly required on all cloud providers (e.g. Google Compute Engine does +not need to allocate a `NodePort` to make `LoadBalancer` work, but AWS does) +but the current API requires it. + +## Future work + +In the future we envision that the proxy policy can become more nuanced than +simple round robin balancing, for example master-elected or sharded. We also +envision that some `Services` will have "real" load balancers, in which case the +VIP will simply transport the packets there. + +We intend to improve our support for L7 (HTTP) `Services`. + +We intend to have more flexible ingress modes for `Services` which encompass +the current `ClusterIP`, `NodePort`, and `LoadBalancer` modes and more. + +## The gory details of virtual IPs + +The previous information should be sufficient for many people who just want to +use `Services`. However, there is a lot going on behind the scenes that may be +worth understanding. + +### Avoiding collisions + +One of the primary philosophies of Kubernetes is that users should not be +exposed to situations that could cause their actions to fail through no fault +of their own. In this situation, we are looking at network ports - users +should not have to choose a port number if that choice might collide with +another user. That is an isolation failure. + +In order to allow users to choose a port number for their `Services`, we must +ensure that no two `Services` can collide. We do that by allocating each +`Service` its own IP address. + +To ensure each service receives a unique IP, an internal allocator atomically +updates a global allocation map in etcd prior to creating each service. The map object +must exist in the registry for services to get IPs, otherwise creations will +fail with a message indicating an IP could not be allocated. A background +controller is responsible for creating that map (to migrate from older versions +of Kubernetes that used in memory locking) as well as checking for invalid +assignments due to administrator intervention and cleaning up any IPs +that were allocated but which no service currently uses. + +### IPs and VIPs + +Unlike `Pod` IP addresses, which actually route to a fixed destination, +`Service` IPs are not actually answered by a single host. Instead, we use +`iptables` (packet processing logic in Linux) to define virtual IP addresses +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for `Services` are actually populated in +terms of the `Service`'s VIP and port. + +We support three proxy modes - userspace, iptables and ipvs which operate +slightly differently. + +#### Userspace + +As an example, consider the image processing application described above. +When the backend `Service` is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the +`Service` is observed by all of the `kube-proxy` instances in the cluster. +When a proxy sees a new `Service`, it opens a new random port, establishes an +iptables redirect from the VIP to this new port, and starts accepting +connections on it. + +When a client connects to the VIP the iptables rule kicks in, and redirects +the packets to the `Service proxy`'s own port. The `Service proxy` chooses a +backend, and starts proxying traffic from the client to the backend. + +This means that `Service` owners can choose any port they want without risk of +collision. Clients can simply connect to an IP and port, without being aware +of which `Pods` they are actually accessing. + +#### Iptables + +Again, consider the image processing application described above. +When the backend `Service` is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the +`Service` is observed by all of the `kube-proxy` instances in the cluster. +When a proxy sees a new `Service`, it installs a series of iptables rules which +redirect from the VIP to per-`Service` rules. The per-`Service` rules link to +per-`Endpoint` rules which redirect (Destination NAT) to the backends. + +When a client connects to the VIP the iptables rule kicks in. A backend is +chosen (either based on session affinity or randomly) and packets are +redirected to the backend. Unlike the userspace proxy, packets are never +copied to userspace, the kube-proxy does not have to be running for the VIP to +work, and the client IP is not altered. + +This same basic flow executes when traffic comes in through a node-port or +through a load-balancer, though in those cases the client IP does get altered. + +#### Ipvs + +Iptables operations slow down dramatically in large scale cluster e.g 10,000 Services. IPVS is designed for load balancing and based on in-kernel hash tables. So we can achieve performance consistency in large number of services from IPVS-based kube-proxy. Meanwhile, IPVS-based kube-proxy has more sophisticated load balancing algorithms (least conns, locality, weighted, persistence). + +## API Object + +Service is a top-level resource in the Kubernetes REST API. More details about the +API object can be found at: +[Service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core). + +## Supported protocols {#protocol-support} + +### TCP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use TCP for any kind of service, and it's the default network protocol. + +### UDP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use UDP for most services. For type=LoadBalancer services, UDP support +depends on the cloud provider offering this facility. + +### HTTP + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it, you can use a Service in LoadBalancer mode +to set up external HTTP / HTTPS reverse proxying, forwarded to the Endpoints +of the Service. + +{{< note >}} +You can also use {{< glossary_tooltip term_id="ingress" >}} in place of Service +to expose HTTP / HTTPS services. +{{< /note >}} + +### PROXY protocol + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it (eg, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +you can use a Service in LoadBalancer mode to configure a load balancer outside +of Kubernetes itself, that will forward connections prefixed with +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). + +The load balancer will send an initial series of octets describing the +incoming connection, similar to this example + +``` +PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n +``` +followed by the data from the client. + +### SCTP + +{{< feature-state for_k8s_version="v1.12" state="alpha" >}} + +Kubernetes supports SCTP as a `protocol` value in `Service`, `Endpoint`, `NetworkPolicy` and `Pod` definitions as an alpha feature. To enable this feature, the cluster administrator needs to enable the `SCTPSupport` feature gate on the apiserver, for example, `“--feature-gates=SCTPSupport=true,...”`. When the feature gate is enabled, users can set the `protocol` field of a `Service`, `Endpoint`, `NetworkPolicy` and `Pod` to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections. + +#### Warnings {#caveat-sctp-overview} + +##### Support for multihomed SCTP associations {#caveat-sctp-multihomed} + +The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a `Pod`. + +NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules. + +##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type} + +A `Service` with `type` LoadBalancer and `protocol` SCTP can be created only if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise the `Service` creation request is rejected. The current set of cloud load balancer providers (`Azure`, `AWS`, `CloudStack`, `GCE`, `OpenStack`) do not support SCTP. + +##### Windows {#caveat-sctp-windows-os} + +SCTP is not supported on Windows based nodes. + +##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace} + +The kube-proxy does not support the management of SCTP associations when it is in userspace mode. + +{{% /capture %}} + +{{% capture whatsnext %}} + +Read [Connecting a Front End to a Back End Using a Service](/docs/tasks/access-application-cluster/connecting-frontend-backend/). + +{{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/service_BASE_80035.md b/content/en/docs/concepts/services-networking/service_BASE_80035.md new file mode 100644 index 0000000000000..a6bc859b79cab --- /dev/null +++ b/content/en/docs/concepts/services-networking/service_BASE_80035.md @@ -0,0 +1,1021 @@ +--- +reviewers: +- bprashanth +title: Services +feature: + title: Service discovery and load balancing + description: > + No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives containers their own IP addresses and a single DNS name for a set of containers, and can load-balance across them. + +content_template: templates/concept +weight: 10 +--- + + +{{% capture overview %}} + +Kubernetes [`Pods`](/docs/concepts/workloads/pods/pod/) are mortal. They are born and when they die, they +are not resurrected. [`ReplicaSets`](/docs/concepts/workloads/controllers/replicaset/) in +particular create and destroy `Pods` dynamically (e.g. when scaling out or in). While each `Pod` gets its own IP address, even +those IP addresses cannot be relied upon to be stable over time. This leads to +a problem: if some set of `Pods` (let's call them backends) provides +functionality to other `Pods` (let's call them frontends) inside the Kubernetes +cluster, how do those frontends find out and keep track of which backends are +in that set? + +Enter `Services`. + +A Kubernetes `Service` is an abstraction which defines a logical set of `Pods` +and a policy by which to access them - sometimes called a micro-service. The +set of `Pods` targeted by a `Service` is (usually) determined by a [`Label +Selector`](/docs/concepts/overview/working-with-objects/labels/#label-selectors) (see below for why you might want a +`Service` without a selector). + +As an example, consider an image-processing backend which is running with 3 +replicas. Those replicas are fungible - frontends do not care which backend +they use. While the actual `Pods` that compose the backend set may change, the +frontend clients should not need to be aware of that or keep track of the list +of backends themselves. The `Service` abstraction enables this decoupling. + +For Kubernetes-native applications, Kubernetes offers a simple `Endpoints` API +that is updated whenever the set of `Pods` in a `Service` changes. For +non-native applications, Kubernetes offers a virtual-IP-based bridge to Services +which redirects to the backend `Pods`. + +{{% /capture %}} + +{{% capture body %}} + +## Defining a service + +A `Service` in Kubernetes is a REST object, similar to a `Pod`. Like all of the +REST objects, a `Service` definition can be POSTed to the apiserver to create a +new instance. For example, suppose you have a set of `Pods` that each expose +port 9376 and carry a label `"app=MyApp"`. + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +This specification will create a new `Service` object named "my-service" which +targets TCP port 9376 on any `Pod` with the `"app=MyApp"` label. This `Service` +will also be assigned an IP address (sometimes called the "cluster IP"), which +is used by the service proxies (see below). The `Service`'s selector will be +evaluated continuously and the results will be POSTed to an `Endpoints` object +also named "my-service". + +Note that a `Service` can map an incoming port to any `targetPort`. By default +the `targetPort` will be set to the same value as the `port` field. Perhaps +more interesting is that `targetPort` can be a string, referring to the name of +a port in the backend `Pods`. The actual port number assigned to that name can +be different in each backend `Pod`. This offers a lot of flexibility for +deploying and evolving your `Services`. For example, you can change the port +number that pods expose in the next version of your backend software, without +breaking clients. + +`TCP` is the default protocol for services, and you can also use any other +[supported protocol](#protocol-support). At the moment, you can only set a +single `port` and `protocol` for a Service. + +### Services without selectors + +Services generally abstract access to Kubernetes `Pods`, but they can also +abstract other kinds of backends. For example: + + * You want to have an external database cluster in production, but in test + you use your own databases. + * You want to point your service to a service in another + [`Namespace`](/docs/concepts/overview/working-with-objects/namespaces/) or on another cluster. + * You are migrating your workload to Kubernetes and some of your backends run + outside of Kubernetes. + +In any of these scenarios you can define a service without a selector: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +Because this service has no selector, the corresponding `Endpoints` object will not be +created. You can manually map the service to your own specific endpoints: + +```yaml +kind: Endpoints +apiVersion: v1 +metadata: + name: my-service +subsets: + - addresses: + - ip: 1.2.3.4 + ports: + - port: 9376 +``` + +{{< note >}} +The endpoint IPs may not be loopback (127.0.0.0/8), link-local +(169.254.0.0/16), or link-local multicast (224.0.0.0/24). They cannot be the +cluster IPs of other Kubernetes services either because the `kube-proxy` +component doesn't support virtual IPs as destination yet. +{{< /note >}} + +Accessing a `Service` without a selector works the same as if it had a selector. +The traffic will be routed to endpoints defined by the user (`1.2.3.4:9376` in +this example). + +An ExternalName service is a special case of service that does not have +selectors and uses DNS names instead. For more information, see the +[ExternalName](#externalname) section later in this document. + +## Virtual IPs and service proxies + +Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is +responsible for implementing a form of virtual IP for `Services` of type other +than [`ExternalName`](#externalname). + +In Kubernetes v1.0, `Services` are a "layer 4" (TCP/UDP over IP) construct, the +proxy was purely in userspace. In Kubernetes v1.1, the `Ingress` API was added +(beta) to represent "layer 7"(HTTP) services, iptables proxy was added too, +and became the default operating mode since Kubernetes v1.2. In Kubernetes v1.8.0-beta.0, +ipvs proxy was added. + +### Proxy-mode: userspace + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of `Service` and `Endpoints` objects. For each `Service` it opens a +port (randomly chosen) on the local node. Any connections to this "proxy port" +will be proxied to one of the `Service`'s backend `Pods` (as reported in +`Endpoints`). Which backend `Pod` to use is decided based on the +`SessionAffinity` of the `Service`. Lastly, it installs iptables rules which +capture traffic to the `Service`'s `clusterIP` (which is virtual) and `Port` +and redirects that traffic to the proxy port which proxies the backend `Pod`. +By default, the choice of backend is round robin. + +![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) + +### Proxy-mode: iptables + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of `Service` and `Endpoints` objects. For each `Service`, it installs +iptables rules which capture traffic to the `Service`'s `clusterIP` (which is +virtual) and `Port` and redirects that traffic to one of the `Service`'s +backend sets. For each `Endpoints` object, it installs iptables rules which +select a backend `Pod`. By default, the choice of backend is random. + +Obviously, iptables need not switch back between userspace and kernelspace, it should be +faster and more reliable than the userspace proxy. However, unlike the +userspace proxier, the iptables proxier cannot automatically retry another +`Pod` if the one it initially selects does not respond, so it depends on +having working [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#defining-readiness-probes). + +![Services overview diagram for iptables proxy](/images/docs/services-iptables-overview.svg) + +### Proxy-mode: ipvs + +{{< feature-state for_k8s_version="v1.9" state="beta" >}} + +In this mode, kube-proxy watches Kubernetes Services and Endpoints, +calls `netlink` interface to create ipvs rules accordingly and syncs ipvs rules with Kubernetes +Services and Endpoints periodically, to make sure ipvs status is +consistent with the expectation. When Service is accessed, traffic will +be redirected to one of the backend Pods. + +Similar to iptables, Ipvs is based on netfilter hook function, but uses hash +table as the underlying data structure and works in the kernel space. +That means ipvs redirects traffic much faster, and has much +better performance when syncing proxy rules. Furthermore, ipvs provides more +options for load balancing algorithm, such as: + +- `rr`: round-robin +- `lc`: least connection +- `dh`: destination hashing +- `sh`: source hashing +- `sed`: shortest expected delay +- `nq`: never queue + +{{< note >}} +ipvs mode assumes IPVS kernel modules are installed on the node +before running kube-proxy. When kube-proxy starts with ipvs proxy mode, +kube-proxy would validate if IPVS modules are installed on the node, if +it's not installed kube-proxy will fall back to iptables proxy mode. +{{< /note >}} + +![Services overview diagram for ipvs proxy](/images/docs/services-ipvs-overview.svg) + +In any of these proxy model, any traffic bound for the Service’s IP:Port is +proxied to an appropriate backend without the clients knowing anything +about Kubernetes or Services or Pods. Client-IP based session affinity +can be selected by setting `service.spec.sessionAffinity` to "ClientIP" +(the default is "None"), and you can set the max session sticky time by +setting the field `service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` +if you have already set `service.spec.sessionAffinity` to "ClientIP" +(the default is “10800”). + +## Multi-Port Services + +Many `Services` need to expose more than one port. For this case, Kubernetes +supports multiple port definitions on a `Service` object. When using multiple +ports you must give all of your ports names, so that endpoints can be +disambiguated. For example: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + - name: https + protocol: TCP + port: 443 + targetPort: 9377 +``` + +Note that the port names must only contain lowercase alphanumeric characters and `-`, and must begin & end with an alphanumeric character. `123-abc` and `web` are valid, but `123_abc` and `-web` are not valid names. + +## Choosing your own IP address + +You can specify your own cluster IP address as part of a `Service` creation +request. To do this, set the `.spec.clusterIP` field. For example, if you +already have an existing DNS entry that you wish to reuse, or legacy systems +that are configured for a specific IP address and difficult to re-configure. +The IP address that a user chooses must be a valid IP address and within the +`service-cluster-ip-range` CIDR range that is specified by flag to the API +server. If the IP address value is invalid, the apiserver returns a 422 HTTP +status code to indicate that the value is invalid. + +### Why not use round-robin DNS? + +A question that pops up every now and then is why we do all this stuff with +virtual IPs rather than just use standard round-robin DNS. There are a few +reasons: + + * There is a long history of DNS libraries not respecting DNS TTLs and + caching the results of name lookups. + * Many apps do DNS lookups once and cache the results. + * Even if apps and libraries did proper re-resolution, the load of every + client re-resolving DNS over and over would be difficult to manage. + +We try to discourage users from doing things that hurt themselves. That said, +if enough people ask for this, we may implement it as an alternative. + +## Discovering services + +Kubernetes supports 2 primary modes of finding a `Service` - environment +variables and DNS. + +### Environment variables + +When a `Pod` is run on a `Node`, the kubelet adds a set of environment variables +for each active `Service`. It supports both [Docker links +compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see +[makeLinkVariables](http://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, +where the Service name is upper-cased and dashes are converted to underscores. + +For example, the Service `"redis-master"` which exposes TCP port 6379 and has been +allocated cluster IP address 10.0.0.11 produces the following environment +variables: + +```shell +REDIS_MASTER_SERVICE_HOST=10.0.0.11 +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_PORT=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP_PROTO=tcp +REDIS_MASTER_PORT_6379_TCP_PORT=6379 +REDIS_MASTER_PORT_6379_TCP_ADDR=10.0.0.11 +``` + +*This does imply an ordering requirement* - any `Service` that a `Pod` wants to +access must be created before the `Pod` itself, or else the environment +variables will not be populated. DNS does not have this restriction. + +### DNS + +An optional (though strongly recommended) [cluster +add-on](/docs/concepts/cluster-administration/addons/) is a DNS server. The +DNS server watches the Kubernetes API for new `Services` and creates a set of +DNS records for each. If DNS has been enabled throughout the cluster then all +`Pods` should be able to do name resolution of `Services` automatically. + +For example, if you have a `Service` called `"my-service"` in a Kubernetes +`Namespace` called `"my-ns"`, a DNS record for `"my-service.my-ns"` is created. `Pods` +which exist in the `"my-ns"` `Namespace` should be able to find it by simply doing +a name lookup for `"my-service"`. `Pods` which exist in other `Namespaces` must +qualify the name as `"my-service.my-ns"`. The result of these name lookups is the +cluster IP. + +Kubernetes also supports DNS SRV (service) records for named ports. If the +`"my-service.my-ns"` `Service` has a port named `"http"` with protocol `TCP`, you +can do a DNS SRV query for `"_http._tcp.my-service.my-ns"` to discover the port +number for `"http"`. + +The Kubernetes DNS server is the only way to access services of type +`ExternalName`. More information is available in the [DNS Pods and +Services](/docs/concepts/services-networking/dns-pod-service/). + +## Headless services + +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create "headless" services by specifying `"None"` for the +cluster IP (`.spec.clusterIP`). + +This option allows developers to reduce coupling to the Kubernetes system by +allowing them freedom to do discovery their own way. Applications can still use +a self-registration pattern and adapters for other discovery systems could easily +be built upon this API. + +For such `Services`, a cluster IP is not allocated, kube-proxy does not handle +these services, and there is no load balancing or proxying done by the platform +for them. How DNS is automatically configured depends on whether the service has +selectors defined. + +### With selectors + +For headless services that define selectors, the endpoints controller creates +`Endpoints` records in the API, and modifies the DNS configuration to return A +records (addresses) that point directly to the `Pods` backing the `Service`. + +### Without selectors + +For headless services that do not define selectors, the endpoints controller does +not create `Endpoints` records. However, the DNS system looks for and configures +either: + + * CNAME records for [`ExternalName`](#externalname)-type services. + * A records for any `Endpoints` that share a name with the service, for all + other types. + +## Publishing services - service types + +For some parts of your application (e.g. frontends) you may want to expose a +Service onto an external (outside of your cluster) IP address. + + +Kubernetes `ServiceTypes` allow you to specify what kind of service you want. +The default is `ClusterIP`. + +`Type` values and their behaviors are: + + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the + default `ServiceType`. + * [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port + (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will + route, is automatically created. You'll be able to contact the `NodePort` service, + from outside the cluster, + by requesting `:`. + * [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud + provider's load balancer. `NodePort` and `ClusterIP` services, to which the external + load balancer will route, are automatically created. + * [`ExternalName`](#externalname): Maps the service to the contents of the + `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record + with its value. No proxying of any kind is set up. This requires version 1.7 or + higher of `kube-dns`. + +### Type NodePort {#nodeport} + +If you set the `type` field to `NodePort`, the Kubernetes master will +allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767), and each +Node will proxy that port (the same port number on every Node) into your `Service`. +That port will be reported in your `Service`'s `.spec.ports[*].nodePort` field. + +If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s) (which is supported since Kubernetes v1.10). A comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 1.2.3.4/32) is used to filter addresses local to this node. For example, if you start kube-proxy with flag `--nodeport-addresses=127.0.0.0/8`, kube-proxy will select only the loopback interface for NodePort Services. The `--nodeport-addresses` is defaulted to empty (`[]`), which means select all available interfaces and is in compliance with current NodePort behaviors. + +If you want a specific port number, you can specify a value in the `nodePort` +field, and the system will allocate you that port or else the API transaction +will fail (i.e. you need to take care about possible port collisions yourself). +The value you specify must be in the configured range for node ports. + +This gives developers the freedom to set up their own load balancers, to +configure environments that are not fully supported by Kubernetes, or +even to just expose one or more nodes' IPs directly. + +Note that this Service will be visible as both `:spec.ports[*].nodePort` +and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) + +### Type LoadBalancer {#loadbalancer} + +On cloud providers which support external load balancers, setting the `type` +field to `LoadBalancer` will provision a load balancer for your `Service`. +The actual creation of the load balancer happens asynchronously, and +information about the provisioned balancer will be published in the `Service`'s +`.status.loadBalancer` field. For example: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + clusterIP: 10.0.171.239 + loadBalancerIP: 78.11.24.19 + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 146.148.47.155 +``` + +Traffic from the external load balancer will be directed at the backend `Pods`, +though exactly how that works depends on the cloud provider. Some cloud providers allow +the `loadBalancerIP` to be specified. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +an ephemeral IP will be assigned to the loadBalancer. If the `loadBalancerIP` is specified, but the +cloud provider does not support the feature, the field will be ignored. + +**Special notes for Azure**: To use user-specified public type `loadBalancerIP`, a static type +public IP address resource needs to be created first, and it should be in the same resource +group of the other automatically created resources of the cluster. For example, `MC_myResourceGroup_myAKSCluster_eastus`. Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357). + +{{< note >}} +The support of SCTP in the cloud provider's load balancer is up to the cloud provider's +load balancer implementation. If SCTP is not supported by the cloud provider's load balancer the +Service creation request is accepted but the creation of the load balancer fails. +{{< /note >}} + +#### Internal load balancer +In a mixed environment it is sometimes necessary to route traffic from services inside the same VPC. + +In a split-horizon DNS environment you would need two services to be able to route both external and internal traffic to your endpoints. + +This can be achieved by adding the following annotations to the service based on cloud provider. + +{{< tabs name="service_tabs" >}} +{{% tab name="Default" %}} +Select one of the tabs. +{{% /tab %}} +{{% tab name="GCP" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + cloud.google.com/load-balancer-type: "Internal" +[...] +``` +Use `cloud.google.com/load-balancer-type: "internal"` for masters with version 1.7.0 to 1.7.3. +For more information, see the [docs](https://cloud.google.com/kubernetes-engine/docs/internal-load-balancing). +{{% /tab %}} +{{% tab name="AWS" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +[...] +``` +{{% /tab %}} +{{% tab name="Azure" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +[...] +``` +{{% /tab %}} +{{% tab name="OpenStack" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" +[...] +``` +{{% /tab %}} +{{% tab name="Baidu Cloud" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" +[...] +``` +{{% /tab %}} +{{< /tabs >}} + + +#### SSL support on AWS +For partial SSL support on clusters running on AWS, starting with 1.3 three +annotations can be added to a `LoadBalancer` service: + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 +``` + +The first specifies the ARN of the certificate to use. It can be either a +certificate from a third party issuer that was uploaded to IAM or one created +within AWS Certificate Manager. + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp) +``` + +The second annotation specifies which protocol a pod speaks. For HTTPS and +SSL, the ELB will expect the pod to authenticate itself over the encrypted +connection. + +HTTP and HTTPS will select layer 7 proxying: the ELB will terminate +the connection with the user, parse headers and inject the `X-Forwarded-For` +header with the user's IP address (pods will only see the IP address of the +ELB at the other end of its connection) when forwarding requests. + +TCP and SSL will select layer 4 proxying: the ELB will forward traffic without +modifying the headers. + +In a mixed-use environment where some ports are secured and others are left unencrypted, +the following annotations may be used: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443" +``` + +In the above example, if the service contained three ports, `80`, `443`, and +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just +be proxied HTTP. + +Beginning in 1.9, services can use [predefined AWS SSL policies](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) +for any HTTPS or SSL listeners. To see which policies are available for use, run +the awscli command: + +```bash +aws elb describe-load-balancer-policies --query 'PolicyDescriptions[].PolicyName' +``` + +Any one of those policies can then be specified using the +"`service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`" +annotation, for example: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" +``` + +#### PROXY protocol support on AWS + +To enable [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) +support for clusters running on AWS, you can use the following service +annotation: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" +``` + +Since version 1.3.0 the use of this annotation applies to all ports proxied by the ELB +and cannot be configured otherwise. + +#### ELB Access Logs on AWS + +There are several annotations to manage access logs for ELB services on AWS. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` +controls whether access logs are enabled. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval` +controls the interval in minutes for publishing the access logs. You can specify +an interval of either 5 or 60. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` +controls the name of the Amazon S3 bucket where load balancer access logs are +stored. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` +specifies the logical hierarchy you created for your Amazon S3 bucket. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" + # Specifies whether access logs are enabled for the load balancer + service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" + # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" + # The name of the Amazon S3 bucket where the access logs are stored + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" + # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` +``` + +#### Connection Draining on AWS + +Connection draining for Classic ELBs can be managed with the annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled` set +to the value of `"true"`. The annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout` can +also be used to set maximum time, in seconds, to keep the existing connections open before deregistering the instances. + + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60" +``` + +#### Other ELB annotations + +There are other annotations to manage Classic Elastic Load Balancers that are described below. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" + # The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer + + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # Specifies whether cross-zone load balancing is enabled for the load balancer + + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops" + # A comma-separated list of key-value pairs which will be recorded as + # additional tags in the ELB. + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: "" + # The number of successive successful health checks required for a backend to + # be considered healthy for traffic. Defaults to 2, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3" + # The number of unsuccessful health checks required for a backend to be + # considered unhealthy for traffic. Defaults to 6, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20" + # The approximate interval, in seconds, between health checks of an + # individual instance. Defaults to 10, must be between 5 and 300 + service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5" + # The amount of time, in seconds, during which no response means a failed + # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval + # value. Defaults to 5, must be between 2 and 60 + + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" + # A list of additional security groups to be added to ELB +``` + +#### Network Load Balancer support on AWS [alpha] + +{{< warning >}} +This is an alpha feature and not recommended for production clusters yet. +{{< /warning >}} + +Starting in version 1.9.0, Kubernetes supports Network Load Balancer (NLB). To +use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` +with the value set to `nlb`. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +Unlike Classic Elastic Load Balancers, Network Load Balancers (NLBs) forward the +client's IP through to the node. If a service's `.spec.externalTrafficPolicy` is +set to `Cluster`, the client's IP address will not be propagated to the end +pods. + +By setting `.spec.externalTrafficPolicy` to `Local`, client IP addresses will be +propagated to the end pods, but this could result in uneven distribution of +traffic. Nodes without any pods for a particular LoadBalancer service will fail +the NLB Target Group's health check on the auto-assigned +`.spec.healthCheckNodePort` and not receive any traffic. + +In order to achieve even traffic, either use a DaemonSet, or specify a +[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) +to not locate pods on the same node. + +NLB can also be used with the [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer) +annotation. + +In order for client traffic to reach instances behind an NLB, the Node security +groups are modified with the following IP rules: + +| Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | +|------|----------|---------|------------|---------------------| +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | +| MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | + +Be aware that if `.spec.loadBalancerSourceRanges` is not set, Kubernetes will +allow traffic from `0.0.0.0/0` to the Node Security Group(s). If nodes have +public IP addresses, be aware that non-NLB traffic can also reach all instances +in those modified security groups. + +In order to limit which client IP's can access the Network Load Balancer, +specify `loadBalancerSourceRanges`. + +```yaml +spec: + loadBalancerSourceRanges: + - "143.231.0.0/16" +``` + +{{< note >}} +NLB only works with certain instance classes, see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets) +for supported instance types. +{{< /note >}} + +### Type ExternalName {#externalname} + +Services of type ExternalName map a service to a DNS name, not to a typical selector such as +`my-service` or `cassandra`. You specify these services with the `spec.externalName` parameter. + +This Service definition, for example, maps +the `my-service` Service in the `prod` namespace to `my.database.example.com`: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service + namespace: prod +spec: + type: ExternalName + externalName: my.database.example.com +``` +{{< note >}} +ExternalName accepts an IPv4 address string, but as a DNS name comprised of digits, not as an IP address. ExternalNames that resemble IPv4 addresses are not resolved by CoreDNS or ingress-nginx because ExternalName +is intended to specify a canonical DNS name. To hardcode an IP address, consider headless services. +{{< /note >}} + +When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS service +will return a `CNAME` record with the value `my.database.example.com`. Accessing +`my-service` works in the same way as other Services but with the crucial +difference that redirection happens at the DNS level rather than via proxying or +forwarding. Should you later decide to move your database into your cluster, you +can start its pods, add appropriate selectors or endpoints, and change the +service's `type`. + +{{< note >}} +This section is indebted to the [Kubernetes Tips - Part +1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/). +{{< /note >}} + +### External IPs + +If there are external IPs that route to one or more cluster nodes, Kubernetes services can be exposed on those +`externalIPs`. Traffic that ingresses into the cluster with the external IP (as destination IP), on the service port, +will be routed to one of the service endpoints. `externalIPs` are not managed by Kubernetes and are the responsibility +of the cluster administrator. + +In the `ServiceSpec`, `externalIPs` can be specified along with any of the `ServiceTypes`. +In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`"" (`externalIP:port`) + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + externalIPs: + - 80.11.12.10 +``` + +## Shortcomings + +Using the userspace proxy for VIPs will work at small to medium scale, but will +not scale to very large clusters with thousands of Services. See [the original +design proposal for portals](http://issue.k8s.io/1107) for more details. + +Using the userspace proxy obscures the source-IP of a packet accessing a `Service`. +This makes some kinds of firewalling impossible. The iptables proxier does not +obscure in-cluster source IPs, but it does still impact clients coming through +a load-balancer or node-port. + +The `Type` field is designed as nested functionality - each level adds to the +previous. This is not strictly required on all cloud providers (e.g. Google Compute Engine does +not need to allocate a `NodePort` to make `LoadBalancer` work, but AWS does) +but the current API requires it. + +## Future work + +In the future we envision that the proxy policy can become more nuanced than +simple round robin balancing, for example master-elected or sharded. We also +envision that some `Services` will have "real" load balancers, in which case the +VIP will simply transport the packets there. + +We intend to improve our support for L7 (HTTP) `Services`. + +We intend to have more flexible ingress modes for `Services` which encompass +the current `ClusterIP`, `NodePort`, and `LoadBalancer` modes and more. + +## The gory details of virtual IPs + +The previous information should be sufficient for many people who just want to +use `Services`. However, there is a lot going on behind the scenes that may be +worth understanding. + +### Avoiding collisions + +One of the primary philosophies of Kubernetes is that users should not be +exposed to situations that could cause their actions to fail through no fault +of their own. In this situation, we are looking at network ports - users +should not have to choose a port number if that choice might collide with +another user. That is an isolation failure. + +In order to allow users to choose a port number for their `Services`, we must +ensure that no two `Services` can collide. We do that by allocating each +`Service` its own IP address. + +To ensure each service receives a unique IP, an internal allocator atomically +updates a global allocation map in etcd prior to creating each service. The map object +must exist in the registry for services to get IPs, otherwise creations will +fail with a message indicating an IP could not be allocated. A background +controller is responsible for creating that map (to migrate from older versions +of Kubernetes that used in memory locking) as well as checking for invalid +assignments due to administrator intervention and cleaning up any IPs +that were allocated but which no service currently uses. + +### IPs and VIPs + +Unlike `Pod` IP addresses, which actually route to a fixed destination, +`Service` IPs are not actually answered by a single host. Instead, we use +`iptables` (packet processing logic in Linux) to define virtual IP addresses +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for `Services` are actually populated in +terms of the `Service`'s VIP and port. + +We support three proxy modes - userspace, iptables and ipvs which operate +slightly differently. + +#### Userspace + +As an example, consider the image processing application described above. +When the backend `Service` is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the +`Service` is observed by all of the `kube-proxy` instances in the cluster. +When a proxy sees a new `Service`, it opens a new random port, establishes an +iptables redirect from the VIP to this new port, and starts accepting +connections on it. + +When a client connects to the VIP the iptables rule kicks in, and redirects +the packets to the `Service proxy`'s own port. The `Service proxy` chooses a +backend, and starts proxying traffic from the client to the backend. + +This means that `Service` owners can choose any port they want without risk of +collision. Clients can simply connect to an IP and port, without being aware +of which `Pods` they are actually accessing. + +#### Iptables + +Again, consider the image processing application described above. +When the backend `Service` is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the +`Service` is observed by all of the `kube-proxy` instances in the cluster. +When a proxy sees a new `Service`, it installs a series of iptables rules which +redirect from the VIP to per-`Service` rules. The per-`Service` rules link to +per-`Endpoint` rules which redirect (Destination NAT) to the backends. + +When a client connects to the VIP the iptables rule kicks in. A backend is +chosen (either based on session affinity or randomly) and packets are +redirected to the backend. Unlike the userspace proxy, packets are never +copied to userspace, the kube-proxy does not have to be running for the VIP to +work, and the client IP is not altered. + +This same basic flow executes when traffic comes in through a node-port or +through a load-balancer, though in those cases the client IP does get altered. + +#### Ipvs + +Iptables operations slow down dramatically in large scale cluster e.g 10,000 Services. IPVS is designed for load balancing and based on in-kernel hash tables. So we can achieve performance consistency in large number of services from IPVS-based kube-proxy. Meanwhile, IPVS-based kube-proxy has more sophisticated load balancing algorithms (least conns, locality, weighted, persistence). + +## API Object + +Service is a top-level resource in the Kubernetes REST API. More details about the +API object can be found at: +[Service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core). + +## Supported protocols {#protocol-support} + +### TCP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use TCP for any kind of service, and it's the default network protocol. + +### UDP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use UDP for most services. For type=LoadBalancer services, UDP support +depends on the cloud provider offering this facility. + +### HTTP + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it, you can use a Service in LoadBalancer mode +to set up external HTTP / HTTPS reverse proxying, forwarded to the Endpoints +of the Service. + +{{< note >}} +You can also use {{< glossary_tooltip term_id="ingress" >}} in place of Service +to expose HTTP / HTTPS services. +{{< /note >}} + +### PROXY protocol + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it (eg, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +you can use a Service in LoadBalancer mode to configure a load balancer outside +of Kubernetes itself, that will forward connections prefixed with +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). + +The load balancer will send an initial series of octets describing the +incoming connection, similar to this example + +``` +PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n +``` +followed by the data from the client. + +### SCTP + +{{< feature-state for_k8s_version="v1.12" state="alpha" >}} + +Kubernetes supports SCTP as a `protocol` value in `Service`, `Endpoint`, `NetworkPolicy` and `Pod` definitions as an alpha feature. To enable this feature, the cluster administrator needs to enable the `SCTPSupport` feature gate on the apiserver, for example, `“--feature-gates=SCTPSupport=true,...”`. When the feature gate is enabled, users can set the `protocol` field of a `Service`, `Endpoint`, `NetworkPolicy` and `Pod` to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections. + +#### Warnings {#caveat-sctp-overview} + +##### Support for multihomed SCTP associations {#caveat-sctp-multihomed} + +The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a `Pod`. + +NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules. + +##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type} + +A `Service` with `type` LoadBalancer and `protocol` SCTP can be created only if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise the `Service` creation request is rejected. The current set of cloud load balancer providers (`Azure`, `AWS`, `CloudStack`, `GCE`, `OpenStack`) do not support SCTP. + +##### Windows {#caveat-sctp-windows-os} + +SCTP is not supported on Windows based nodes. + +##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace} + +The kube-proxy does not support the management of SCTP associations when it is in userspace mode. + +{{% /capture %}} + +{{% capture whatsnext %}} + +Read [Connecting a Front End to a Back End Using a Service](/docs/tasks/access-application-cluster/connecting-frontend-backend/). + +{{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/service_LOCAL_16221.md b/content/en/docs/concepts/services-networking/service_LOCAL_16221.md new file mode 100644 index 0000000000000..ab58196c67a15 --- /dev/null +++ b/content/en/docs/concepts/services-networking/service_LOCAL_16221.md @@ -0,0 +1,1017 @@ +--- +reviewers: +- bprashanth +title: Services +feature: + title: Service discovery and load balancing + description: > + No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives containers their own IP addresses and a single DNS name for a set of containers, and can load-balance across them. + +content_template: templates/concept +weight: 10 +--- + + +{{% capture overview %}} + +Kubernetes [`Pods`](/docs/concepts/workloads/pods/pod/) are mortal. They are born and when they die, they +are not resurrected. [`ReplicaSets`](/docs/concepts/workloads/controllers/replicaset/) in +particular create and destroy `Pods` dynamically (e.g. when scaling out or in). While each `Pod` gets its own IP address, even +those IP addresses cannot be relied upon to be stable over time. This leads to +a problem: if some set of `Pods` (let's call them backends) provides +functionality to other `Pods` (let's call them frontends) inside the Kubernetes +cluster, how do those frontends find out and keep track of which backends are +in that set? + +Enter `Services`. + +A Kubernetes `Service` is an abstraction which defines a logical set of `Pods` +and a policy by which to access them - sometimes called a micro-service. The +set of `Pods` targeted by a `Service` is (usually) determined by a [`Label +Selector`](/docs/concepts/overview/working-with-objects/labels/#label-selectors) (see below for why you might want a +`Service` without a selector). + +As an example, consider an image-processing backend which is running with 3 +replicas. Those replicas are fungible - frontends do not care which backend +they use. While the actual `Pods` that compose the backend set may change, the +frontend clients should not need to be aware of that or keep track of the list +of backends themselves. The `Service` abstraction enables this decoupling. + +For Kubernetes-native applications, Kubernetes offers a simple `Endpoints` API +that is updated whenever the set of `Pods` in a `Service` changes. For +non-native applications, Kubernetes offers a virtual-IP-based bridge to Services +which redirects to the backend `Pods`. + +{{% /capture %}} + +{{% capture body %}} + +## Defining a service + +A `Service` in Kubernetes is a REST object, similar to a `Pod`. Like all of the +REST objects, a `Service` definition can be POSTed to the apiserver to create a +new instance. For example, suppose you have a set of `Pods` that each expose +port 9376 and carry a label `"app=MyApp"`. + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +This specification will create a new `Service` object named "my-service" which +targets TCP port 9376 on any `Pod` with the `"app=MyApp"` label. This `Service` +will also be assigned an IP address (sometimes called the "cluster IP"), which +is used by the service proxies (see below). The `Service`'s selector will be +evaluated continuously and the results will be POSTed to an `Endpoints` object +also named "my-service". + +Note that a `Service` can map an incoming port to any `targetPort`. By default +the `targetPort` will be set to the same value as the `port` field. Perhaps +more interesting is that `targetPort` can be a string, referring to the name of +a port in the backend `Pods`. The actual port number assigned to that name can +be different in each backend `Pod`. This offers a lot of flexibility for +deploying and evolving your `Services`. For example, you can change the port +number that pods expose in the next version of your backend software, without +breaking clients. + +`TCP` is the default protocol for services, and you can also use any other +[supported protocol](#protocol-support). At the moment, you can only set a +single `port` and `protocol` for a Service. + +### Services without selectors + +Services generally abstract access to Kubernetes `Pods`, but they can also +abstract other kinds of backends. For example: + + * You want to have an external database cluster in production, but in test + you use your own databases. + * You want to point your service to a service in another + [`Namespace`](/docs/concepts/overview/working-with-objects/namespaces/) or on another cluster. + * You are migrating your workload to Kubernetes and some of your backends run + outside of Kubernetes. + +In any of these scenarios you can define a service without a selector: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +Because this service has no selector, the corresponding `Endpoints` object will not be +created. You can manually map the service to your own specific endpoints: + +```yaml +kind: Endpoints +apiVersion: v1 +metadata: + name: my-service +subsets: + - addresses: + - ip: 1.2.3.4 + ports: + - port: 9376 +``` + +{{< note >}} +The endpoint IPs may not be loopback (127.0.0.0/8), link-local +(169.254.0.0/16), or link-local multicast (224.0.0.0/24). They cannot be the +cluster IPs of other Kubernetes services either because the `kube-proxy` +component doesn't support virtual IPs as destination yet. +{{< /note >}} + +Accessing a `Service` without a selector works the same as if it had a selector. +The traffic will be routed to endpoints defined by the user (`1.2.3.4:9376` in +this example). + +An ExternalName service is a special case of service that does not have +selectors and uses DNS names instead. For more information, see the +[ExternalName](#externalname) section later in this document. + +## Virtual IPs and service proxies + +Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is +responsible for implementing a form of virtual IP for `Services` of type other +than [`ExternalName`](#externalname). + +In Kubernetes v1.0, `Services` are a "layer 4" (TCP/UDP over IP) construct, the +proxy was purely in userspace. In Kubernetes v1.1, the `Ingress` API was added +(beta) to represent "layer 7"(HTTP) services, iptables proxy was added too, +and became the default operating mode since Kubernetes v1.2. In Kubernetes v1.8.0-beta.0, +ipvs proxy was added. + +### Proxy-mode: userspace + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of `Service` and `Endpoints` objects. For each `Service` it opens a +port (randomly chosen) on the local node. Any connections to this "proxy port" +will be proxied to one of the `Service`'s backend `Pods` (as reported in +`Endpoints`). Which backend `Pod` to use is decided based on the +`SessionAffinity` of the `Service`. Lastly, it installs iptables rules which +capture traffic to the `Service`'s `clusterIP` (which is virtual) and `Port` +and redirects that traffic to the proxy port which proxies the backend `Pod`. +By default, the choice of backend is round robin. + +![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) + +### Proxy-mode: iptables + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of `Service` and `Endpoints` objects. For each `Service`, it installs +iptables rules which capture traffic to the `Service`'s `clusterIP` (which is +virtual) and `Port` and redirects that traffic to one of the `Service`'s +backend sets. For each `Endpoints` object, it installs iptables rules which +select a backend `Pod`. By default, the choice of backend is random. + +Obviously, iptables need not switch back between userspace and kernelspace, it should be +faster and more reliable than the userspace proxy. However, unlike the +userspace proxier, the iptables proxier cannot automatically retry another +`Pod` if the one it initially selects does not respond, so it depends on +having working [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#defining-readiness-probes). + +![Services overview diagram for iptables proxy](/images/docs/services-iptables-overview.svg) + +### Proxy-mode: ipvs + +{{< feature-state for_k8s_version="v1.9" state="beta" >}} + +In this mode, kube-proxy watches Kubernetes Services and Endpoints, +calls `netlink` interface to create ipvs rules accordingly and syncs ipvs rules with Kubernetes +Services and Endpoints periodically, to make sure ipvs status is +consistent with the expectation. When Service is accessed, traffic will +be redirected to one of the backend Pods. + +Similar to iptables, Ipvs is based on netfilter hook function, but uses hash +table as the underlying data structure and works in the kernel space. +That means ipvs redirects traffic much faster, and has much +better performance when syncing proxy rules. Furthermore, ipvs provides more +options for load balancing algorithm, such as: + +- `rr`: round-robin +- `lc`: least connection +- `dh`: destination hashing +- `sh`: source hashing +- `sed`: shortest expected delay +- `nq`: never queue + +{{< note >}} +ipvs mode assumes IPVS kernel modules are installed on the node +before running kube-proxy. When kube-proxy starts with ipvs proxy mode, +kube-proxy would validate if IPVS modules are installed on the node, if +it's not installed kube-proxy will fall back to iptables proxy mode. +{{< /note >}} + +![Services overview diagram for ipvs proxy](/images/docs/services-ipvs-overview.svg) + +In any of these proxy model, any traffic bound for the Service’s IP:Port is +proxied to an appropriate backend without the clients knowing anything +about Kubernetes or Services or Pods. Client-IP based session affinity +can be selected by setting `service.spec.sessionAffinity` to "ClientIP" +(the default is "None"), and you can set the max session sticky time by +setting the field `service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` +if you have already set `service.spec.sessionAffinity` to "ClientIP" +(the default is “10800”). + +## Multi-Port Services + +Many `Services` need to expose more than one port. For this case, Kubernetes +supports multiple port definitions on a `Service` object. When using multiple +ports you must give all of your ports names, so that endpoints can be +disambiguated. For example: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + - name: https + protocol: TCP + port: 443 + targetPort: 9377 +``` + +Note that the port names must only contain lowercase alphanumeric characters and `-`, and must begin & end with an alphanumeric character. `123-abc` and `web` are valid, but `123_abc` and `-web` are not valid names. + +## Choosing your own IP address + +You can specify your own cluster IP address as part of a `Service` creation +request. To do this, set the `.spec.clusterIP` field. For example, if you +already have an existing DNS entry that you wish to reuse, or legacy systems +that are configured for a specific IP address and difficult to re-configure. +The IP address that a user chooses must be a valid IP address and within the +`service-cluster-ip-range` CIDR range that is specified by flag to the API +server. If the IP address value is invalid, the apiserver returns a 422 HTTP +status code to indicate that the value is invalid. + +### Why not use round-robin DNS? + +A question that pops up every now and then is why we do all this stuff with +virtual IPs rather than just use standard round-robin DNS. There are a few +reasons: + + * There is a long history of DNS libraries not respecting DNS TTLs and + caching the results of name lookups. + * Many apps do DNS lookups once and cache the results. + * Even if apps and libraries did proper re-resolution, the load of every + client re-resolving DNS over and over would be difficult to manage. + +We try to discourage users from doing things that hurt themselves. That said, +if enough people ask for this, we may implement it as an alternative. + +## Discovering services + +Kubernetes supports 2 primary modes of finding a `Service` - environment +variables and DNS. + +### Environment variables + +When a `Pod` is run on a `Node`, the kubelet adds a set of environment variables +for each active `Service`. It supports both [Docker links +compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see +[makeLinkVariables](http://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, +where the Service name is upper-cased and dashes are converted to underscores. + +For example, the Service `"redis-master"` which exposes TCP port 6379 and has been +allocated cluster IP address 10.0.0.11 produces the following environment +variables: + +```shell +REDIS_MASTER_SERVICE_HOST=10.0.0.11 +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_PORT=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP_PROTO=tcp +REDIS_MASTER_PORT_6379_TCP_PORT=6379 +REDIS_MASTER_PORT_6379_TCP_ADDR=10.0.0.11 +``` + +*This does imply an ordering requirement* - any `Service` that a `Pod` wants to +access must be created before the `Pod` itself, or else the environment +variables will not be populated. DNS does not have this restriction. + +### DNS + +An optional (though strongly recommended) [cluster +add-on](/docs/concepts/cluster-administration/addons/) is a DNS server. The +DNS server watches the Kubernetes API for new `Services` and creates a set of +DNS records for each. If DNS has been enabled throughout the cluster then all +`Pods` should be able to do name resolution of `Services` automatically. + +For example, if you have a `Service` called `"my-service"` in a Kubernetes +`Namespace` called `"my-ns"`, a DNS record for `"my-service.my-ns"` is created. `Pods` +which exist in the `"my-ns"` `Namespace` should be able to find it by simply doing +a name lookup for `"my-service"`. `Pods` which exist in other `Namespaces` must +qualify the name as `"my-service.my-ns"`. The result of these name lookups is the +cluster IP. + +Kubernetes also supports DNS SRV (service) records for named ports. If the +`"my-service.my-ns"` `Service` has a port named `"http"` with protocol `TCP`, you +can do a DNS SRV query for `"_http._tcp.my-service.my-ns"` to discover the port +number for `"http"`. + +The Kubernetes DNS server is the only way to access services of type +`ExternalName`. More information is available in the [DNS Pods and +Services](/docs/concepts/services-networking/dns-pod-service/). + +## Headless services + +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create "headless" services by specifying `"None"` for the +cluster IP (`.spec.clusterIP`). + +This option allows developers to reduce coupling to the Kubernetes system by +allowing them freedom to do discovery their own way. Applications can still use +a self-registration pattern and adapters for other discovery systems could easily +be built upon this API. + +For such `Services`, a cluster IP is not allocated, kube-proxy does not handle +these services, and there is no load balancing or proxying done by the platform +for them. How DNS is automatically configured depends on whether the service has +selectors defined. + +### With selectors + +For headless services that define selectors, the endpoints controller creates +`Endpoints` records in the API, and modifies the DNS configuration to return A +records (addresses) that point directly to the `Pods` backing the `Service`. + +### Without selectors + +For headless services that do not define selectors, the endpoints controller does +not create `Endpoints` records. However, the DNS system looks for and configures +either: + + * CNAME records for [`ExternalName`](#externalname)-type services. + * A records for any `Endpoints` that share a name with the service, for all + other types. + +## Publishing services - service types + +For some parts of your application (e.g. frontends) you may want to expose a +Service onto an external (outside of your cluster) IP address. + + +Kubernetes `ServiceTypes` allow you to specify what kind of service you want. +The default is `ClusterIP`. + +`Type` values and their behaviors are: + + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the + default `ServiceType`. + * [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port + (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will + route, is automatically created. You'll be able to contact the `NodePort` service, + from outside the cluster, + by requesting `:`. + * [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud + provider's load balancer. `NodePort` and `ClusterIP` services, to which the external + load balancer will route, are automatically created. + * [`ExternalName`](#externalname): Maps the service to the contents of the + `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record + with its value. No proxying of any kind is set up. This requires version 1.7 or + higher of `kube-dns`. + +### Type NodePort {#nodeport} + +If you set the `type` field to `NodePort`, the Kubernetes master will +allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767), and each +Node will proxy that port (the same port number on every Node) into your `Service`. +That port will be reported in your `Service`'s `.spec.ports[*].nodePort` field. + +If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s) (which is supported since Kubernetes v1.10). A comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 1.2.3.4/32) is used to filter addresses local to this node. For example, if you start kube-proxy with flag `--nodeport-addresses=127.0.0.0/8`, kube-proxy will select only the loopback interface for NodePort Services. The `--nodeport-addresses` is defaulted to empty (`[]`), which means select all available interfaces and is in compliance with current NodePort behaviors. + +If you want a specific port number, you can specify a value in the `nodePort` +field, and the system will allocate you that port or else the API transaction +will fail (i.e. you need to take care about possible port collisions yourself). +The value you specify must be in the configured range for node ports. + +This gives developers the freedom to set up their own load balancers, to +configure environments that are not fully supported by Kubernetes, or +even to just expose one or more nodes' IPs directly. + +Note that this Service will be visible as both `:spec.ports[*].nodePort` +and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) + +### Type LoadBalancer {#loadbalancer} + +On cloud providers which support external load balancers, setting the `type` +field to `LoadBalancer` will provision a load balancer for your `Service`. +The actual creation of the load balancer happens asynchronously, and +information about the provisioned balancer will be published in the `Service`'s +`.status.loadBalancer` field. For example: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + clusterIP: 10.0.171.239 + loadBalancerIP: 78.11.24.19 + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 146.148.47.155 +``` + +Traffic from the external load balancer will be directed at the backend `Pods`, +though exactly how that works depends on the cloud provider. Some cloud providers allow +the `loadBalancerIP` to be specified. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +an ephemeral IP will be assigned to the loadBalancer. If the `loadBalancerIP` is specified, but the +cloud provider does not support the feature, the field will be ignored. + +**Special notes for Azure**: To use user-specified public type `loadBalancerIP`, a static type +public IP address resource needs to be created first, and it should be in the same resource +group of the other automatically created resources of the cluster. For example, `MC_myResourceGroup_myAKSCluster_eastus`. Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357). + +{{< note >}} +The support of SCTP in the cloud provider's load balancer is up to the cloud provider's +load balancer implementation. If SCTP is not supported by the cloud provider's load balancer the +Service creation request is accepted but the creation of the load balancer fails. +{{< /note >}} + +#### Internal load balancer +In a mixed environment it is sometimes necessary to route traffic from services inside the same VPC. + +In a split-horizon DNS environment you would need two services to be able to route both external and internal traffic to your endpoints. + +This can be achieved by adding the following annotations to the service based on cloud provider. + +{{< tabs name="service_tabs" >}} +{{% tab name="Default" %}} +Select one of the tabs. +{{% /tab %}} +{{% tab name="GCP" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + cloud.google.com/load-balancer-type: "Internal" +[...] +``` +Use `cloud.google.com/load-balancer-type: "internal"` for masters with version 1.7.0 to 1.7.3. +For more information, see the [docs](https://cloud.google.com/kubernetes-engine/docs/internal-load-balancing). +{{% /tab %}} +{{% tab name="AWS" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +[...] +``` +{{% /tab %}} +{{% tab name="Azure" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +[...] +``` +{{% /tab %}} +{{% tab name="OpenStack" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" +[...] +``` +{{% /tab %}} +{{% tab name="Baidu Cloud" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" +[...] +``` +{{% /tab %}} +{{< /tabs >}} + + +#### SSL support on AWS +For partial SSL support on clusters running on AWS, starting with 1.3 three +annotations can be added to a `LoadBalancer` service: + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 +``` + +The first specifies the ARN of the certificate to use. It can be either a +certificate from a third party issuer that was uploaded to IAM or one created +within AWS Certificate Manager. + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp) +``` + +The second annotation specifies which protocol a pod speaks. For HTTPS and +SSL, the ELB will expect the pod to authenticate itself over the encrypted +connection. + +HTTP and HTTPS will select layer 7 proxying: the ELB will terminate +the connection with the user, parse headers and inject the `X-Forwarded-For` +header with the user's IP address (pods will only see the IP address of the +ELB at the other end of its connection) when forwarding requests. + +TCP and SSL will select layer 4 proxying: the ELB will forward traffic without +modifying the headers. + +In a mixed-use environment where some ports are secured and others are left unencrypted, +the following annotations may be used: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443" +``` + +In the above example, if the service contained three ports, `80`, `443`, and +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just +be proxied HTTP. + +Beginning in 1.9, services can use [predefined AWS SSL policies](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) +for any HTTPS or SSL listeners. To see which policies are available for use, run +the awscli command: + +```bash +aws elb describe-load-balancer-policies --query 'PolicyDescriptions[].PolicyName' +``` + +Any one of those policies can then be specified using the +"`service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`" +annotation, for example: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" +``` + +#### PROXY protocol support on AWS + +To enable [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) +support for clusters running on AWS, you can use the following service +annotation: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" +``` + +Since version 1.3.0 the use of this annotation applies to all ports proxied by the ELB +and cannot be configured otherwise. + +#### ELB Access Logs on AWS + +There are several annotations to manage access logs for ELB services on AWS. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` +controls whether access logs are enabled. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval` +controls the interval in minutes for publishing the access logs. You can specify +an interval of either 5 or 60. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` +controls the name of the Amazon S3 bucket where load balancer access logs are +stored. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` +specifies the logical hierarchy you created for your Amazon S3 bucket. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" + # Specifies whether access logs are enabled for the load balancer + service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" + # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" + # The name of the Amazon S3 bucket where the access logs are stored + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" + # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` +``` + +#### Connection Draining on AWS + +Connection draining for Classic ELBs can be managed with the annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled` set +to the value of `"true"`. The annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout` can +also be used to set maximum time, in seconds, to keep the existing connections open before deregistering the instances. + + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60" +``` + +#### Other ELB annotations + +There are other annotations to manage Classic Elastic Load Balancers that are described below. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" + # The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer + + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # Specifies whether cross-zone load balancing is enabled for the load balancer + + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops" + # A comma-separated list of key-value pairs which will be recorded as + # additional tags in the ELB. + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: "" + # The number of successive successful health checks required for a backend to + # be considered healthy for traffic. Defaults to 2, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3" + # The number of unsuccessful health checks required for a backend to be + # considered unhealthy for traffic. Defaults to 6, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20" + # The approximate interval, in seconds, between health checks of an + # individual instance. Defaults to 10, must be between 5 and 300 + service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5" + # The amount of time, in seconds, during which no response means a failed + # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval + # value. Defaults to 5, must be between 2 and 60 + + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" + # A list of additional security groups to be added to ELB +``` + +#### Network Load Balancer support on AWS + +{{< feature-state for_k8s_version="v1.15" state="beta" >}} + +To use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` with the value set to `nlb`. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +Unlike Classic Elastic Load Balancers, Network Load Balancers (NLBs) forward the +client's IP through to the node. If a service's `.spec.externalTrafficPolicy` is +set to `Cluster`, the client's IP address will not be propagated to the end +pods. + +By setting `.spec.externalTrafficPolicy` to `Local`, client IP addresses will be +propagated to the end pods, but this could result in uneven distribution of +traffic. Nodes without any pods for a particular LoadBalancer service will fail +the NLB Target Group's health check on the auto-assigned +`.spec.healthCheckNodePort` and not receive any traffic. + +In order to achieve even traffic, either use a DaemonSet, or specify a +[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) +to not locate pods on the same node. + +NLB can also be used with the [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer) +annotation. + +In order for client traffic to reach instances behind an NLB, the Node security +groups are modified with the following IP rules: + +| Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | +|------|----------|---------|------------|---------------------| +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | +| MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | + +Be aware that if `.spec.loadBalancerSourceRanges` is not set, Kubernetes will +allow traffic from `0.0.0.0/0` to the Node Security Group(s). If nodes have +public IP addresses, be aware that non-NLB traffic can also reach all instances +in those modified security groups. + +In order to limit which client IP's can access the Network Load Balancer, +specify `loadBalancerSourceRanges`. + +```yaml +spec: + loadBalancerSourceRanges: + - "143.231.0.0/16" +``` + +{{< note >}} +NLB only works with certain instance classes, see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets) +for supported instance types. +{{< /note >}} + +### Type ExternalName {#externalname} + +Services of type ExternalName map a service to a DNS name, not to a typical selector such as +`my-service` or `cassandra`. You specify these services with the `spec.externalName` parameter. + +This Service definition, for example, maps +the `my-service` Service in the `prod` namespace to `my.database.example.com`: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service + namespace: prod +spec: + type: ExternalName + externalName: my.database.example.com +``` +{{< note >}} +ExternalName accepts an IPv4 address string, but as a DNS name comprised of digits, not as an IP address. ExternalNames that resemble IPv4 addresses are not resolved by CoreDNS or ingress-nginx because ExternalName +is intended to specify a canonical DNS name. To hardcode an IP address, consider headless services. +{{< /note >}} + +When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS service +will return a `CNAME` record with the value `my.database.example.com`. Accessing +`my-service` works in the same way as other Services but with the crucial +difference that redirection happens at the DNS level rather than via proxying or +forwarding. Should you later decide to move your database into your cluster, you +can start its pods, add appropriate selectors or endpoints, and change the +service's `type`. + +{{< note >}} +This section is indebted to the [Kubernetes Tips - Part +1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/). +{{< /note >}} + +### External IPs + +If there are external IPs that route to one or more cluster nodes, Kubernetes services can be exposed on those +`externalIPs`. Traffic that ingresses into the cluster with the external IP (as destination IP), on the service port, +will be routed to one of the service endpoints. `externalIPs` are not managed by Kubernetes and are the responsibility +of the cluster administrator. + +In the `ServiceSpec`, `externalIPs` can be specified along with any of the `ServiceTypes`. +In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`"" (`externalIP:port`) + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + externalIPs: + - 80.11.12.10 +``` + +## Shortcomings + +Using the userspace proxy for VIPs will work at small to medium scale, but will +not scale to very large clusters with thousands of Services. See [the original +design proposal for portals](http://issue.k8s.io/1107) for more details. + +Using the userspace proxy obscures the source-IP of a packet accessing a `Service`. +This makes some kinds of firewalling impossible. The iptables proxier does not +obscure in-cluster source IPs, but it does still impact clients coming through +a load-balancer or node-port. + +The `Type` field is designed as nested functionality - each level adds to the +previous. This is not strictly required on all cloud providers (e.g. Google Compute Engine does +not need to allocate a `NodePort` to make `LoadBalancer` work, but AWS does) +but the current API requires it. + +## Future work + +In the future we envision that the proxy policy can become more nuanced than +simple round robin balancing, for example master-elected or sharded. We also +envision that some `Services` will have "real" load balancers, in which case the +VIP will simply transport the packets there. + +We intend to improve our support for L7 (HTTP) `Services`. + +We intend to have more flexible ingress modes for `Services` which encompass +the current `ClusterIP`, `NodePort`, and `LoadBalancer` modes and more. + +## The gory details of virtual IPs + +The previous information should be sufficient for many people who just want to +use `Services`. However, there is a lot going on behind the scenes that may be +worth understanding. + +### Avoiding collisions + +One of the primary philosophies of Kubernetes is that users should not be +exposed to situations that could cause their actions to fail through no fault +of their own. In this situation, we are looking at network ports - users +should not have to choose a port number if that choice might collide with +another user. That is an isolation failure. + +In order to allow users to choose a port number for their `Services`, we must +ensure that no two `Services` can collide. We do that by allocating each +`Service` its own IP address. + +To ensure each service receives a unique IP, an internal allocator atomically +updates a global allocation map in etcd prior to creating each service. The map object +must exist in the registry for services to get IPs, otherwise creations will +fail with a message indicating an IP could not be allocated. A background +controller is responsible for creating that map (to migrate from older versions +of Kubernetes that used in memory locking) as well as checking for invalid +assignments due to administrator intervention and cleaning up any IPs +that were allocated but which no service currently uses. + +### IPs and VIPs + +Unlike `Pod` IP addresses, which actually route to a fixed destination, +`Service` IPs are not actually answered by a single host. Instead, we use +`iptables` (packet processing logic in Linux) to define virtual IP addresses +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for `Services` are actually populated in +terms of the `Service`'s VIP and port. + +We support three proxy modes - userspace, iptables and ipvs which operate +slightly differently. + +#### Userspace + +As an example, consider the image processing application described above. +When the backend `Service` is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the +`Service` is observed by all of the `kube-proxy` instances in the cluster. +When a proxy sees a new `Service`, it opens a new random port, establishes an +iptables redirect from the VIP to this new port, and starts accepting +connections on it. + +When a client connects to the VIP the iptables rule kicks in, and redirects +the packets to the `Service proxy`'s own port. The `Service proxy` chooses a +backend, and starts proxying traffic from the client to the backend. + +This means that `Service` owners can choose any port they want without risk of +collision. Clients can simply connect to an IP and port, without being aware +of which `Pods` they are actually accessing. + +#### Iptables + +Again, consider the image processing application described above. +When the backend `Service` is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the +`Service` is observed by all of the `kube-proxy` instances in the cluster. +When a proxy sees a new `Service`, it installs a series of iptables rules which +redirect from the VIP to per-`Service` rules. The per-`Service` rules link to +per-`Endpoint` rules which redirect (Destination NAT) to the backends. + +When a client connects to the VIP the iptables rule kicks in. A backend is +chosen (either based on session affinity or randomly) and packets are +redirected to the backend. Unlike the userspace proxy, packets are never +copied to userspace, the kube-proxy does not have to be running for the VIP to +work, and the client IP is not altered. + +This same basic flow executes when traffic comes in through a node-port or +through a load-balancer, though in those cases the client IP does get altered. + +#### Ipvs + +Iptables operations slow down dramatically in large scale cluster e.g 10,000 Services. IPVS is designed for load balancing and based on in-kernel hash tables. So we can achieve performance consistency in large number of services from IPVS-based kube-proxy. Meanwhile, IPVS-based kube-proxy has more sophisticated load balancing algorithms (least conns, locality, weighted, persistence). + +## API Object + +Service is a top-level resource in the Kubernetes REST API. More details about the +API object can be found at: +[Service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core). + +## Supported protocols {#protocol-support} + +### TCP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use TCP for any kind of service, and it's the default network protocol. + +### UDP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use UDP for most services. For type=LoadBalancer services, UDP support +depends on the cloud provider offering this facility. + +### HTTP + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it, you can use a Service in LoadBalancer mode +to set up external HTTP / HTTPS reverse proxying, forwarded to the Endpoints +of the Service. + +{{< note >}} +You can also use {{< glossary_tooltip term_id="ingress" >}} in place of Service +to expose HTTP / HTTPS services. +{{< /note >}} + +### PROXY protocol + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it (eg, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +you can use a Service in LoadBalancer mode to configure a load balancer outside +of Kubernetes itself, that will forward connections prefixed with +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). + +The load balancer will send an initial series of octets describing the +incoming connection, similar to this example + +``` +PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n +``` +followed by the data from the client. + +### SCTP + +{{< feature-state for_k8s_version="v1.12" state="alpha" >}} + +Kubernetes supports SCTP as a `protocol` value in `Service`, `Endpoint`, `NetworkPolicy` and `Pod` definitions as an alpha feature. To enable this feature, the cluster administrator needs to enable the `SCTPSupport` feature gate on the apiserver, for example, `“--feature-gates=SCTPSupport=true,...”`. When the feature gate is enabled, users can set the `protocol` field of a `Service`, `Endpoint`, `NetworkPolicy` and `Pod` to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections. + +#### Warnings {#caveat-sctp-overview} + +##### Support for multihomed SCTP associations {#caveat-sctp-multihomed} + +The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a `Pod`. + +NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules. + +##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type} + +A `Service` with `type` LoadBalancer and `protocol` SCTP can be created only if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise the `Service` creation request is rejected. The current set of cloud load balancer providers (`Azure`, `AWS`, `CloudStack`, `GCE`, `OpenStack`) do not support SCTP. + +##### Windows {#caveat-sctp-windows-os} + +SCTP is not supported on Windows based nodes. + +##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace} + +The kube-proxy does not support the management of SCTP associations when it is in userspace mode. + +{{% /capture %}} + +{{% capture whatsnext %}} + +Read [Connecting a Front End to a Back End Using a Service](/docs/tasks/access-application-cluster/connecting-frontend-backend/). + +{{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/service_LOCAL_80035.md b/content/en/docs/concepts/services-networking/service_LOCAL_80035.md new file mode 100644 index 0000000000000..ab58196c67a15 --- /dev/null +++ b/content/en/docs/concepts/services-networking/service_LOCAL_80035.md @@ -0,0 +1,1017 @@ +--- +reviewers: +- bprashanth +title: Services +feature: + title: Service discovery and load balancing + description: > + No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives containers their own IP addresses and a single DNS name for a set of containers, and can load-balance across them. + +content_template: templates/concept +weight: 10 +--- + + +{{% capture overview %}} + +Kubernetes [`Pods`](/docs/concepts/workloads/pods/pod/) are mortal. They are born and when they die, they +are not resurrected. [`ReplicaSets`](/docs/concepts/workloads/controllers/replicaset/) in +particular create and destroy `Pods` dynamically (e.g. when scaling out or in). While each `Pod` gets its own IP address, even +those IP addresses cannot be relied upon to be stable over time. This leads to +a problem: if some set of `Pods` (let's call them backends) provides +functionality to other `Pods` (let's call them frontends) inside the Kubernetes +cluster, how do those frontends find out and keep track of which backends are +in that set? + +Enter `Services`. + +A Kubernetes `Service` is an abstraction which defines a logical set of `Pods` +and a policy by which to access them - sometimes called a micro-service. The +set of `Pods` targeted by a `Service` is (usually) determined by a [`Label +Selector`](/docs/concepts/overview/working-with-objects/labels/#label-selectors) (see below for why you might want a +`Service` without a selector). + +As an example, consider an image-processing backend which is running with 3 +replicas. Those replicas are fungible - frontends do not care which backend +they use. While the actual `Pods` that compose the backend set may change, the +frontend clients should not need to be aware of that or keep track of the list +of backends themselves. The `Service` abstraction enables this decoupling. + +For Kubernetes-native applications, Kubernetes offers a simple `Endpoints` API +that is updated whenever the set of `Pods` in a `Service` changes. For +non-native applications, Kubernetes offers a virtual-IP-based bridge to Services +which redirects to the backend `Pods`. + +{{% /capture %}} + +{{% capture body %}} + +## Defining a service + +A `Service` in Kubernetes is a REST object, similar to a `Pod`. Like all of the +REST objects, a `Service` definition can be POSTed to the apiserver to create a +new instance. For example, suppose you have a set of `Pods` that each expose +port 9376 and carry a label `"app=MyApp"`. + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +This specification will create a new `Service` object named "my-service" which +targets TCP port 9376 on any `Pod` with the `"app=MyApp"` label. This `Service` +will also be assigned an IP address (sometimes called the "cluster IP"), which +is used by the service proxies (see below). The `Service`'s selector will be +evaluated continuously and the results will be POSTed to an `Endpoints` object +also named "my-service". + +Note that a `Service` can map an incoming port to any `targetPort`. By default +the `targetPort` will be set to the same value as the `port` field. Perhaps +more interesting is that `targetPort` can be a string, referring to the name of +a port in the backend `Pods`. The actual port number assigned to that name can +be different in each backend `Pod`. This offers a lot of flexibility for +deploying and evolving your `Services`. For example, you can change the port +number that pods expose in the next version of your backend software, without +breaking clients. + +`TCP` is the default protocol for services, and you can also use any other +[supported protocol](#protocol-support). At the moment, you can only set a +single `port` and `protocol` for a Service. + +### Services without selectors + +Services generally abstract access to Kubernetes `Pods`, but they can also +abstract other kinds of backends. For example: + + * You want to have an external database cluster in production, but in test + you use your own databases. + * You want to point your service to a service in another + [`Namespace`](/docs/concepts/overview/working-with-objects/namespaces/) or on another cluster. + * You are migrating your workload to Kubernetes and some of your backends run + outside of Kubernetes. + +In any of these scenarios you can define a service without a selector: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +Because this service has no selector, the corresponding `Endpoints` object will not be +created. You can manually map the service to your own specific endpoints: + +```yaml +kind: Endpoints +apiVersion: v1 +metadata: + name: my-service +subsets: + - addresses: + - ip: 1.2.3.4 + ports: + - port: 9376 +``` + +{{< note >}} +The endpoint IPs may not be loopback (127.0.0.0/8), link-local +(169.254.0.0/16), or link-local multicast (224.0.0.0/24). They cannot be the +cluster IPs of other Kubernetes services either because the `kube-proxy` +component doesn't support virtual IPs as destination yet. +{{< /note >}} + +Accessing a `Service` without a selector works the same as if it had a selector. +The traffic will be routed to endpoints defined by the user (`1.2.3.4:9376` in +this example). + +An ExternalName service is a special case of service that does not have +selectors and uses DNS names instead. For more information, see the +[ExternalName](#externalname) section later in this document. + +## Virtual IPs and service proxies + +Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is +responsible for implementing a form of virtual IP for `Services` of type other +than [`ExternalName`](#externalname). + +In Kubernetes v1.0, `Services` are a "layer 4" (TCP/UDP over IP) construct, the +proxy was purely in userspace. In Kubernetes v1.1, the `Ingress` API was added +(beta) to represent "layer 7"(HTTP) services, iptables proxy was added too, +and became the default operating mode since Kubernetes v1.2. In Kubernetes v1.8.0-beta.0, +ipvs proxy was added. + +### Proxy-mode: userspace + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of `Service` and `Endpoints` objects. For each `Service` it opens a +port (randomly chosen) on the local node. Any connections to this "proxy port" +will be proxied to one of the `Service`'s backend `Pods` (as reported in +`Endpoints`). Which backend `Pod` to use is decided based on the +`SessionAffinity` of the `Service`. Lastly, it installs iptables rules which +capture traffic to the `Service`'s `clusterIP` (which is virtual) and `Port` +and redirects that traffic to the proxy port which proxies the backend `Pod`. +By default, the choice of backend is round robin. + +![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) + +### Proxy-mode: iptables + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of `Service` and `Endpoints` objects. For each `Service`, it installs +iptables rules which capture traffic to the `Service`'s `clusterIP` (which is +virtual) and `Port` and redirects that traffic to one of the `Service`'s +backend sets. For each `Endpoints` object, it installs iptables rules which +select a backend `Pod`. By default, the choice of backend is random. + +Obviously, iptables need not switch back between userspace and kernelspace, it should be +faster and more reliable than the userspace proxy. However, unlike the +userspace proxier, the iptables proxier cannot automatically retry another +`Pod` if the one it initially selects does not respond, so it depends on +having working [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#defining-readiness-probes). + +![Services overview diagram for iptables proxy](/images/docs/services-iptables-overview.svg) + +### Proxy-mode: ipvs + +{{< feature-state for_k8s_version="v1.9" state="beta" >}} + +In this mode, kube-proxy watches Kubernetes Services and Endpoints, +calls `netlink` interface to create ipvs rules accordingly and syncs ipvs rules with Kubernetes +Services and Endpoints periodically, to make sure ipvs status is +consistent with the expectation. When Service is accessed, traffic will +be redirected to one of the backend Pods. + +Similar to iptables, Ipvs is based on netfilter hook function, but uses hash +table as the underlying data structure and works in the kernel space. +That means ipvs redirects traffic much faster, and has much +better performance when syncing proxy rules. Furthermore, ipvs provides more +options for load balancing algorithm, such as: + +- `rr`: round-robin +- `lc`: least connection +- `dh`: destination hashing +- `sh`: source hashing +- `sed`: shortest expected delay +- `nq`: never queue + +{{< note >}} +ipvs mode assumes IPVS kernel modules are installed on the node +before running kube-proxy. When kube-proxy starts with ipvs proxy mode, +kube-proxy would validate if IPVS modules are installed on the node, if +it's not installed kube-proxy will fall back to iptables proxy mode. +{{< /note >}} + +![Services overview diagram for ipvs proxy](/images/docs/services-ipvs-overview.svg) + +In any of these proxy model, any traffic bound for the Service’s IP:Port is +proxied to an appropriate backend without the clients knowing anything +about Kubernetes or Services or Pods. Client-IP based session affinity +can be selected by setting `service.spec.sessionAffinity` to "ClientIP" +(the default is "None"), and you can set the max session sticky time by +setting the field `service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` +if you have already set `service.spec.sessionAffinity` to "ClientIP" +(the default is “10800”). + +## Multi-Port Services + +Many `Services` need to expose more than one port. For this case, Kubernetes +supports multiple port definitions on a `Service` object. When using multiple +ports you must give all of your ports names, so that endpoints can be +disambiguated. For example: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + - name: https + protocol: TCP + port: 443 + targetPort: 9377 +``` + +Note that the port names must only contain lowercase alphanumeric characters and `-`, and must begin & end with an alphanumeric character. `123-abc` and `web` are valid, but `123_abc` and `-web` are not valid names. + +## Choosing your own IP address + +You can specify your own cluster IP address as part of a `Service` creation +request. To do this, set the `.spec.clusterIP` field. For example, if you +already have an existing DNS entry that you wish to reuse, or legacy systems +that are configured for a specific IP address and difficult to re-configure. +The IP address that a user chooses must be a valid IP address and within the +`service-cluster-ip-range` CIDR range that is specified by flag to the API +server. If the IP address value is invalid, the apiserver returns a 422 HTTP +status code to indicate that the value is invalid. + +### Why not use round-robin DNS? + +A question that pops up every now and then is why we do all this stuff with +virtual IPs rather than just use standard round-robin DNS. There are a few +reasons: + + * There is a long history of DNS libraries not respecting DNS TTLs and + caching the results of name lookups. + * Many apps do DNS lookups once and cache the results. + * Even if apps and libraries did proper re-resolution, the load of every + client re-resolving DNS over and over would be difficult to manage. + +We try to discourage users from doing things that hurt themselves. That said, +if enough people ask for this, we may implement it as an alternative. + +## Discovering services + +Kubernetes supports 2 primary modes of finding a `Service` - environment +variables and DNS. + +### Environment variables + +When a `Pod` is run on a `Node`, the kubelet adds a set of environment variables +for each active `Service`. It supports both [Docker links +compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see +[makeLinkVariables](http://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, +where the Service name is upper-cased and dashes are converted to underscores. + +For example, the Service `"redis-master"` which exposes TCP port 6379 and has been +allocated cluster IP address 10.0.0.11 produces the following environment +variables: + +```shell +REDIS_MASTER_SERVICE_HOST=10.0.0.11 +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_PORT=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP_PROTO=tcp +REDIS_MASTER_PORT_6379_TCP_PORT=6379 +REDIS_MASTER_PORT_6379_TCP_ADDR=10.0.0.11 +``` + +*This does imply an ordering requirement* - any `Service` that a `Pod` wants to +access must be created before the `Pod` itself, or else the environment +variables will not be populated. DNS does not have this restriction. + +### DNS + +An optional (though strongly recommended) [cluster +add-on](/docs/concepts/cluster-administration/addons/) is a DNS server. The +DNS server watches the Kubernetes API for new `Services` and creates a set of +DNS records for each. If DNS has been enabled throughout the cluster then all +`Pods` should be able to do name resolution of `Services` automatically. + +For example, if you have a `Service` called `"my-service"` in a Kubernetes +`Namespace` called `"my-ns"`, a DNS record for `"my-service.my-ns"` is created. `Pods` +which exist in the `"my-ns"` `Namespace` should be able to find it by simply doing +a name lookup for `"my-service"`. `Pods` which exist in other `Namespaces` must +qualify the name as `"my-service.my-ns"`. The result of these name lookups is the +cluster IP. + +Kubernetes also supports DNS SRV (service) records for named ports. If the +`"my-service.my-ns"` `Service` has a port named `"http"` with protocol `TCP`, you +can do a DNS SRV query for `"_http._tcp.my-service.my-ns"` to discover the port +number for `"http"`. + +The Kubernetes DNS server is the only way to access services of type +`ExternalName`. More information is available in the [DNS Pods and +Services](/docs/concepts/services-networking/dns-pod-service/). + +## Headless services + +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create "headless" services by specifying `"None"` for the +cluster IP (`.spec.clusterIP`). + +This option allows developers to reduce coupling to the Kubernetes system by +allowing them freedom to do discovery their own way. Applications can still use +a self-registration pattern and adapters for other discovery systems could easily +be built upon this API. + +For such `Services`, a cluster IP is not allocated, kube-proxy does not handle +these services, and there is no load balancing or proxying done by the platform +for them. How DNS is automatically configured depends on whether the service has +selectors defined. + +### With selectors + +For headless services that define selectors, the endpoints controller creates +`Endpoints` records in the API, and modifies the DNS configuration to return A +records (addresses) that point directly to the `Pods` backing the `Service`. + +### Without selectors + +For headless services that do not define selectors, the endpoints controller does +not create `Endpoints` records. However, the DNS system looks for and configures +either: + + * CNAME records for [`ExternalName`](#externalname)-type services. + * A records for any `Endpoints` that share a name with the service, for all + other types. + +## Publishing services - service types + +For some parts of your application (e.g. frontends) you may want to expose a +Service onto an external (outside of your cluster) IP address. + + +Kubernetes `ServiceTypes` allow you to specify what kind of service you want. +The default is `ClusterIP`. + +`Type` values and their behaviors are: + + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the + default `ServiceType`. + * [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port + (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will + route, is automatically created. You'll be able to contact the `NodePort` service, + from outside the cluster, + by requesting `:`. + * [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud + provider's load balancer. `NodePort` and `ClusterIP` services, to which the external + load balancer will route, are automatically created. + * [`ExternalName`](#externalname): Maps the service to the contents of the + `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record + with its value. No proxying of any kind is set up. This requires version 1.7 or + higher of `kube-dns`. + +### Type NodePort {#nodeport} + +If you set the `type` field to `NodePort`, the Kubernetes master will +allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767), and each +Node will proxy that port (the same port number on every Node) into your `Service`. +That port will be reported in your `Service`'s `.spec.ports[*].nodePort` field. + +If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s) (which is supported since Kubernetes v1.10). A comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 1.2.3.4/32) is used to filter addresses local to this node. For example, if you start kube-proxy with flag `--nodeport-addresses=127.0.0.0/8`, kube-proxy will select only the loopback interface for NodePort Services. The `--nodeport-addresses` is defaulted to empty (`[]`), which means select all available interfaces and is in compliance with current NodePort behaviors. + +If you want a specific port number, you can specify a value in the `nodePort` +field, and the system will allocate you that port or else the API transaction +will fail (i.e. you need to take care about possible port collisions yourself). +The value you specify must be in the configured range for node ports. + +This gives developers the freedom to set up their own load balancers, to +configure environments that are not fully supported by Kubernetes, or +even to just expose one or more nodes' IPs directly. + +Note that this Service will be visible as both `:spec.ports[*].nodePort` +and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) + +### Type LoadBalancer {#loadbalancer} + +On cloud providers which support external load balancers, setting the `type` +field to `LoadBalancer` will provision a load balancer for your `Service`. +The actual creation of the load balancer happens asynchronously, and +information about the provisioned balancer will be published in the `Service`'s +`.status.loadBalancer` field. For example: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + clusterIP: 10.0.171.239 + loadBalancerIP: 78.11.24.19 + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 146.148.47.155 +``` + +Traffic from the external load balancer will be directed at the backend `Pods`, +though exactly how that works depends on the cloud provider. Some cloud providers allow +the `loadBalancerIP` to be specified. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +an ephemeral IP will be assigned to the loadBalancer. If the `loadBalancerIP` is specified, but the +cloud provider does not support the feature, the field will be ignored. + +**Special notes for Azure**: To use user-specified public type `loadBalancerIP`, a static type +public IP address resource needs to be created first, and it should be in the same resource +group of the other automatically created resources of the cluster. For example, `MC_myResourceGroup_myAKSCluster_eastus`. Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357). + +{{< note >}} +The support of SCTP in the cloud provider's load balancer is up to the cloud provider's +load balancer implementation. If SCTP is not supported by the cloud provider's load balancer the +Service creation request is accepted but the creation of the load balancer fails. +{{< /note >}} + +#### Internal load balancer +In a mixed environment it is sometimes necessary to route traffic from services inside the same VPC. + +In a split-horizon DNS environment you would need two services to be able to route both external and internal traffic to your endpoints. + +This can be achieved by adding the following annotations to the service based on cloud provider. + +{{< tabs name="service_tabs" >}} +{{% tab name="Default" %}} +Select one of the tabs. +{{% /tab %}} +{{% tab name="GCP" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + cloud.google.com/load-balancer-type: "Internal" +[...] +``` +Use `cloud.google.com/load-balancer-type: "internal"` for masters with version 1.7.0 to 1.7.3. +For more information, see the [docs](https://cloud.google.com/kubernetes-engine/docs/internal-load-balancing). +{{% /tab %}} +{{% tab name="AWS" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +[...] +``` +{{% /tab %}} +{{% tab name="Azure" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +[...] +``` +{{% /tab %}} +{{% tab name="OpenStack" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" +[...] +``` +{{% /tab %}} +{{% tab name="Baidu Cloud" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" +[...] +``` +{{% /tab %}} +{{< /tabs >}} + + +#### SSL support on AWS +For partial SSL support on clusters running on AWS, starting with 1.3 three +annotations can be added to a `LoadBalancer` service: + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 +``` + +The first specifies the ARN of the certificate to use. It can be either a +certificate from a third party issuer that was uploaded to IAM or one created +within AWS Certificate Manager. + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp) +``` + +The second annotation specifies which protocol a pod speaks. For HTTPS and +SSL, the ELB will expect the pod to authenticate itself over the encrypted +connection. + +HTTP and HTTPS will select layer 7 proxying: the ELB will terminate +the connection with the user, parse headers and inject the `X-Forwarded-For` +header with the user's IP address (pods will only see the IP address of the +ELB at the other end of its connection) when forwarding requests. + +TCP and SSL will select layer 4 proxying: the ELB will forward traffic without +modifying the headers. + +In a mixed-use environment where some ports are secured and others are left unencrypted, +the following annotations may be used: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443" +``` + +In the above example, if the service contained three ports, `80`, `443`, and +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just +be proxied HTTP. + +Beginning in 1.9, services can use [predefined AWS SSL policies](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) +for any HTTPS or SSL listeners. To see which policies are available for use, run +the awscli command: + +```bash +aws elb describe-load-balancer-policies --query 'PolicyDescriptions[].PolicyName' +``` + +Any one of those policies can then be specified using the +"`service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`" +annotation, for example: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" +``` + +#### PROXY protocol support on AWS + +To enable [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) +support for clusters running on AWS, you can use the following service +annotation: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" +``` + +Since version 1.3.0 the use of this annotation applies to all ports proxied by the ELB +and cannot be configured otherwise. + +#### ELB Access Logs on AWS + +There are several annotations to manage access logs for ELB services on AWS. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` +controls whether access logs are enabled. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval` +controls the interval in minutes for publishing the access logs. You can specify +an interval of either 5 or 60. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` +controls the name of the Amazon S3 bucket where load balancer access logs are +stored. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` +specifies the logical hierarchy you created for your Amazon S3 bucket. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" + # Specifies whether access logs are enabled for the load balancer + service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" + # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" + # The name of the Amazon S3 bucket where the access logs are stored + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" + # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` +``` + +#### Connection Draining on AWS + +Connection draining for Classic ELBs can be managed with the annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled` set +to the value of `"true"`. The annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout` can +also be used to set maximum time, in seconds, to keep the existing connections open before deregistering the instances. + + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60" +``` + +#### Other ELB annotations + +There are other annotations to manage Classic Elastic Load Balancers that are described below. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" + # The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer + + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # Specifies whether cross-zone load balancing is enabled for the load balancer + + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops" + # A comma-separated list of key-value pairs which will be recorded as + # additional tags in the ELB. + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: "" + # The number of successive successful health checks required for a backend to + # be considered healthy for traffic. Defaults to 2, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3" + # The number of unsuccessful health checks required for a backend to be + # considered unhealthy for traffic. Defaults to 6, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20" + # The approximate interval, in seconds, between health checks of an + # individual instance. Defaults to 10, must be between 5 and 300 + service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5" + # The amount of time, in seconds, during which no response means a failed + # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval + # value. Defaults to 5, must be between 2 and 60 + + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" + # A list of additional security groups to be added to ELB +``` + +#### Network Load Balancer support on AWS + +{{< feature-state for_k8s_version="v1.15" state="beta" >}} + +To use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` with the value set to `nlb`. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +Unlike Classic Elastic Load Balancers, Network Load Balancers (NLBs) forward the +client's IP through to the node. If a service's `.spec.externalTrafficPolicy` is +set to `Cluster`, the client's IP address will not be propagated to the end +pods. + +By setting `.spec.externalTrafficPolicy` to `Local`, client IP addresses will be +propagated to the end pods, but this could result in uneven distribution of +traffic. Nodes without any pods for a particular LoadBalancer service will fail +the NLB Target Group's health check on the auto-assigned +`.spec.healthCheckNodePort` and not receive any traffic. + +In order to achieve even traffic, either use a DaemonSet, or specify a +[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) +to not locate pods on the same node. + +NLB can also be used with the [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer) +annotation. + +In order for client traffic to reach instances behind an NLB, the Node security +groups are modified with the following IP rules: + +| Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | +|------|----------|---------|------------|---------------------| +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | +| MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | + +Be aware that if `.spec.loadBalancerSourceRanges` is not set, Kubernetes will +allow traffic from `0.0.0.0/0` to the Node Security Group(s). If nodes have +public IP addresses, be aware that non-NLB traffic can also reach all instances +in those modified security groups. + +In order to limit which client IP's can access the Network Load Balancer, +specify `loadBalancerSourceRanges`. + +```yaml +spec: + loadBalancerSourceRanges: + - "143.231.0.0/16" +``` + +{{< note >}} +NLB only works with certain instance classes, see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets) +for supported instance types. +{{< /note >}} + +### Type ExternalName {#externalname} + +Services of type ExternalName map a service to a DNS name, not to a typical selector such as +`my-service` or `cassandra`. You specify these services with the `spec.externalName` parameter. + +This Service definition, for example, maps +the `my-service` Service in the `prod` namespace to `my.database.example.com`: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service + namespace: prod +spec: + type: ExternalName + externalName: my.database.example.com +``` +{{< note >}} +ExternalName accepts an IPv4 address string, but as a DNS name comprised of digits, not as an IP address. ExternalNames that resemble IPv4 addresses are not resolved by CoreDNS or ingress-nginx because ExternalName +is intended to specify a canonical DNS name. To hardcode an IP address, consider headless services. +{{< /note >}} + +When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS service +will return a `CNAME` record with the value `my.database.example.com`. Accessing +`my-service` works in the same way as other Services but with the crucial +difference that redirection happens at the DNS level rather than via proxying or +forwarding. Should you later decide to move your database into your cluster, you +can start its pods, add appropriate selectors or endpoints, and change the +service's `type`. + +{{< note >}} +This section is indebted to the [Kubernetes Tips - Part +1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/). +{{< /note >}} + +### External IPs + +If there are external IPs that route to one or more cluster nodes, Kubernetes services can be exposed on those +`externalIPs`. Traffic that ingresses into the cluster with the external IP (as destination IP), on the service port, +will be routed to one of the service endpoints. `externalIPs` are not managed by Kubernetes and are the responsibility +of the cluster administrator. + +In the `ServiceSpec`, `externalIPs` can be specified along with any of the `ServiceTypes`. +In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`"" (`externalIP:port`) + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + externalIPs: + - 80.11.12.10 +``` + +## Shortcomings + +Using the userspace proxy for VIPs will work at small to medium scale, but will +not scale to very large clusters with thousands of Services. See [the original +design proposal for portals](http://issue.k8s.io/1107) for more details. + +Using the userspace proxy obscures the source-IP of a packet accessing a `Service`. +This makes some kinds of firewalling impossible. The iptables proxier does not +obscure in-cluster source IPs, but it does still impact clients coming through +a load-balancer or node-port. + +The `Type` field is designed as nested functionality - each level adds to the +previous. This is not strictly required on all cloud providers (e.g. Google Compute Engine does +not need to allocate a `NodePort` to make `LoadBalancer` work, but AWS does) +but the current API requires it. + +## Future work + +In the future we envision that the proxy policy can become more nuanced than +simple round robin balancing, for example master-elected or sharded. We also +envision that some `Services` will have "real" load balancers, in which case the +VIP will simply transport the packets there. + +We intend to improve our support for L7 (HTTP) `Services`. + +We intend to have more flexible ingress modes for `Services` which encompass +the current `ClusterIP`, `NodePort`, and `LoadBalancer` modes and more. + +## The gory details of virtual IPs + +The previous information should be sufficient for many people who just want to +use `Services`. However, there is a lot going on behind the scenes that may be +worth understanding. + +### Avoiding collisions + +One of the primary philosophies of Kubernetes is that users should not be +exposed to situations that could cause their actions to fail through no fault +of their own. In this situation, we are looking at network ports - users +should not have to choose a port number if that choice might collide with +another user. That is an isolation failure. + +In order to allow users to choose a port number for their `Services`, we must +ensure that no two `Services` can collide. We do that by allocating each +`Service` its own IP address. + +To ensure each service receives a unique IP, an internal allocator atomically +updates a global allocation map in etcd prior to creating each service. The map object +must exist in the registry for services to get IPs, otherwise creations will +fail with a message indicating an IP could not be allocated. A background +controller is responsible for creating that map (to migrate from older versions +of Kubernetes that used in memory locking) as well as checking for invalid +assignments due to administrator intervention and cleaning up any IPs +that were allocated but which no service currently uses. + +### IPs and VIPs + +Unlike `Pod` IP addresses, which actually route to a fixed destination, +`Service` IPs are not actually answered by a single host. Instead, we use +`iptables` (packet processing logic in Linux) to define virtual IP addresses +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for `Services` are actually populated in +terms of the `Service`'s VIP and port. + +We support three proxy modes - userspace, iptables and ipvs which operate +slightly differently. + +#### Userspace + +As an example, consider the image processing application described above. +When the backend `Service` is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the +`Service` is observed by all of the `kube-proxy` instances in the cluster. +When a proxy sees a new `Service`, it opens a new random port, establishes an +iptables redirect from the VIP to this new port, and starts accepting +connections on it. + +When a client connects to the VIP the iptables rule kicks in, and redirects +the packets to the `Service proxy`'s own port. The `Service proxy` chooses a +backend, and starts proxying traffic from the client to the backend. + +This means that `Service` owners can choose any port they want without risk of +collision. Clients can simply connect to an IP and port, without being aware +of which `Pods` they are actually accessing. + +#### Iptables + +Again, consider the image processing application described above. +When the backend `Service` is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the +`Service` is observed by all of the `kube-proxy` instances in the cluster. +When a proxy sees a new `Service`, it installs a series of iptables rules which +redirect from the VIP to per-`Service` rules. The per-`Service` rules link to +per-`Endpoint` rules which redirect (Destination NAT) to the backends. + +When a client connects to the VIP the iptables rule kicks in. A backend is +chosen (either based on session affinity or randomly) and packets are +redirected to the backend. Unlike the userspace proxy, packets are never +copied to userspace, the kube-proxy does not have to be running for the VIP to +work, and the client IP is not altered. + +This same basic flow executes when traffic comes in through a node-port or +through a load-balancer, though in those cases the client IP does get altered. + +#### Ipvs + +Iptables operations slow down dramatically in large scale cluster e.g 10,000 Services. IPVS is designed for load balancing and based on in-kernel hash tables. So we can achieve performance consistency in large number of services from IPVS-based kube-proxy. Meanwhile, IPVS-based kube-proxy has more sophisticated load balancing algorithms (least conns, locality, weighted, persistence). + +## API Object + +Service is a top-level resource in the Kubernetes REST API. More details about the +API object can be found at: +[Service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core). + +## Supported protocols {#protocol-support} + +### TCP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use TCP for any kind of service, and it's the default network protocol. + +### UDP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use UDP for most services. For type=LoadBalancer services, UDP support +depends on the cloud provider offering this facility. + +### HTTP + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it, you can use a Service in LoadBalancer mode +to set up external HTTP / HTTPS reverse proxying, forwarded to the Endpoints +of the Service. + +{{< note >}} +You can also use {{< glossary_tooltip term_id="ingress" >}} in place of Service +to expose HTTP / HTTPS services. +{{< /note >}} + +### PROXY protocol + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it (eg, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +you can use a Service in LoadBalancer mode to configure a load balancer outside +of Kubernetes itself, that will forward connections prefixed with +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). + +The load balancer will send an initial series of octets describing the +incoming connection, similar to this example + +``` +PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n +``` +followed by the data from the client. + +### SCTP + +{{< feature-state for_k8s_version="v1.12" state="alpha" >}} + +Kubernetes supports SCTP as a `protocol` value in `Service`, `Endpoint`, `NetworkPolicy` and `Pod` definitions as an alpha feature. To enable this feature, the cluster administrator needs to enable the `SCTPSupport` feature gate on the apiserver, for example, `“--feature-gates=SCTPSupport=true,...”`. When the feature gate is enabled, users can set the `protocol` field of a `Service`, `Endpoint`, `NetworkPolicy` and `Pod` to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections. + +#### Warnings {#caveat-sctp-overview} + +##### Support for multihomed SCTP associations {#caveat-sctp-multihomed} + +The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a `Pod`. + +NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules. + +##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type} + +A `Service` with `type` LoadBalancer and `protocol` SCTP can be created only if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise the `Service` creation request is rejected. The current set of cloud load balancer providers (`Azure`, `AWS`, `CloudStack`, `GCE`, `OpenStack`) do not support SCTP. + +##### Windows {#caveat-sctp-windows-os} + +SCTP is not supported on Windows based nodes. + +##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace} + +The kube-proxy does not support the management of SCTP associations when it is in userspace mode. + +{{% /capture %}} + +{{% capture whatsnext %}} + +Read [Connecting a Front End to a Back End Using a Service](/docs/tasks/access-application-cluster/connecting-frontend-backend/). + +{{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/service_REMOTE_16221.md b/content/en/docs/concepts/services-networking/service_REMOTE_16221.md new file mode 100644 index 0000000000000..ad3344beffa27 --- /dev/null +++ b/content/en/docs/concepts/services-networking/service_REMOTE_16221.md @@ -0,0 +1,1151 @@ +--- +reviewers: +- bprashanth +title: Service +feature: + title: Service discovery and load balancing + description: > + No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, and can load-balance across them. + +content_template: templates/concept +weight: 10 +--- + + +{{% capture overview %}} + +{{< glossary_definition term_id="service" length="short" >}} + +No need to modify your application to use an unfamiliar service discovery mechanism. +Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, +and can load-balance across them. + +{{% /capture %}} + +{{% capture body %}} + +## Motivation + +Kubernetes {{< glossary_tooltip term_id="pod" text="Pods" >}} are mortal. +They are born and when they die, they are not resurrected. +If you use a {{< glossary_tooltip term_id="deployment" >}} to run your app, +it can create and destroy Pods dynamically (e.g. when scaling out or in). + +Each Pod gets its own IP address, however the set of Pods +for a Deployment running in one moment in time could be different from +the set of Pods running that application a moment later. + +This leads to a problem: if some set of Pods (call them “backends”) provides +functionality to other Pods (call them “frontends”) inside your cluster, +how do those frontends find out and keep track of which IP address to connect +to, so that the frontend can use the backend part of the workload? + +Enter _Services_. + +## Service resources {#service-resource} + +In Kubernetes, a Service is an abstraction which defines a logical set of Pods +and a policy by which to access them (you'll sometimes see this pattern called +a micro-service). The set of Pods targeted by a Service is usually determined +by a {{< glossary_tooltip text="selector" term_id="selector" >}} +(see [below](#services-without-selectors) for why you might want a Service +_without_ a selector). + +For example: consider a stateless image-processing backend which is running with +3 replicas. Those replicas are fungible—frontends do not care which backend +they use. While the actual Pods that compose the backend set may change, the +frontend clients should not need to be aware of that, nor should they need to keep +track of the set of backends themselves. + +The Service abstraction enables this decoupling. + +### Cloud-native service discovery + +If you're able to use Kubernetes APIs for service discovery in your application, +you can query the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} +for Endpoints, that will be updated whenever the set of Pods in a Service changes. + +For non-native applications, Kubernetes offers ways to place a network port or load +balancer in between your application and the backend Pods. + +## Defining a service + +A Service in Kubernetes is a REST object, similar to a Pod. Like all of the +REST objects, you can `POST` a Service definition to the API server to create +a new instance. + +For example, suppose you have a set of Pods that each listen on TCP port 9376 +and carry a label `"app=MyApp"`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +This specification will create a new Service object named “my-service” which +targets TCP port 9376 on any Pod with the `"app=MyApp"` label. + +This Service will also be assigned an IP address (sometimes called the "cluster IP"), +which is used by the service proxies +(see [Virtual IPs and service proxies](#virtual-ips-and-service-proxies) below). + +The controller for the Service selector will continuously scan for Pods that +match its selector, and will then POST any updates to an Endpoint object +also named “my-service”. + +{{< note >}} +A Service can map _any_ incoming `port` to a `targetPort`. By default, and +for convenience, the `targetPort` will be set to the same value as the `port` +field. +{{< /note >}} + +Port definitions in Pods have names, and you can reference these names in the +targetPort attribute of a Service. This will work even if there are a mixture +of Pods in the Service, with the same network protocol available via different +port numbers but a single configured name. +This offers a lot of flexibility for deploying and evolving your Services. +For example, you can change the port number that pods expose in the next +version of your backend software, without breaking clients. + +The default protocol for services is TCP; you can also use any other +[supported protocol](#protocol-support). + +As many Services need to expose more than one port, Kubernetes supports multiple +port definitions on a Service object. +Each port definition can have the same `protocol`, or a different one. + +### Services without selectors + +Services most commonly abstract access to Kubernetes Pods, but they can also +abstract other kinds of backends. For example: + + * You want to have an external database cluster in production, but in your + test environment you use your own databases. + * You want to point your service to a service in a different + {{< glossary_tooltip term_id="namespace" >}} or on another cluster. + * You are migrating a workload to Kubernetes. Whilst evaluating the approach, + you run only a proportion of your backends in Kubernetes. + +In any of these scenarios you can define a service _without_ a Pod selector. +For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +Because this service has no selector, the corresponding Endpoint object will *not* be +created automatically. You can manually map the service to the network address and port +where it's running, by adding an Endpoint object manually: + +```yaml +apiVersion: v1 +kind: Endpoints +metadata: + name: my-service +subsets: + - addresses: + - ip: 192.0.2.42 + ports: + - port: 9376 +``` + +{{< note >}} +The endpoint IPs _must not_ be: loopback (127.0.0.0/8 for IPv4, ::1/128 for IPv6), or +link-local (169.254.0.0/16 and 224.0.0.0/24 for IPv4, fe80::/64 for IPv6). + +Endpoint IP addresses also cannot be the cluster IPs of other Kubernetes services, +because {{< glossary_tooltip term_id="kube-proxy" >}} doesn't support virtual IPs +as a destination. +{{< /note >}} + +Accessing a Service without a selector works the same as if it had a selector. +In the example above, traffic will be routed to the single endpoint defined in +the YAML: `192.0.2.42:9376` (TCP). + +An ExternalName Service is a special case of service that does not have +selectors and uses DNS names instead. For more information, see the +[ExternalName](#externalname) section later in this document. + +## Virtual IPs and service proxies + +Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is +responsible for implementing a form of virtual IP for `Services` of type other +than [`ExternalName`](#externalname). + +### Why not use round-robin DNS? + +A question that pops up every now and then is why Kubernetes relies on +proxying to forward inbound traffic to backends. What about other +approaches? For example, would it be possible to configure DNS records that +have multiple A values (or AAAA for IPv6), and rely on round-robin name +resolution? + +There are a few reasons for using proxying for Services: + + * There is a long history of DNS implementations not respecting record TTLs, + and caching the results of name lookups after they should have expired. + * Some apps do DNS lookups only once and cache the results indefinitely. + * Even if apps and libraries did proper re-resolution, the low or zero TTLs + on the DNS records could impose a high load on DNS that then becomes + difficult to manage. + +### Version compatibility + +Since Kubernetes v1.0 you have been able to use the +[userspace proxy mode](#proxy-mode-userspace). +Kubernetes v1.1 added iptables mode proxying, and in Kubernetes v1.2 the +iptables mode for kube-proxy became the default. +Kubernetes v1.8 added ipvs proxy mode. + +### User space proxy mode {#proxy-mode-userspace} + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of Service and Endpoint objects. For each Service it opens a +port (randomly chosen) on the local node. Any connections to this "proxy port" +will be proxied to one of the Service's backend Pods (as reported via +Endpoints). kube-proxy takes the `SessionAffinity` setting of the Service into +account when deciding which backend Pod to use. + +Lastly, the user-space proxy installs iptables rules which capture traffic to +the Service's `clusterIP` (which is virtual) and `port`. The rules +redirect that traffic to the proxy port which proxies the backend Pod. + +By default, kube-proxy in userspace mode chooses a backend via a round-robin algorithm. + +![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) + +### `iptables` proxy mode {#proxy-mode-iptables} + +In this mode, kube-proxy watches the Kubernetes control plane for the addition and +removal of Service and Endpoint objects. For each Service, it installs +iptables rules which capture traffic to the Service's `clusterIP` (which is +virtual) and `port` and redirects that traffic to one of the Service's +backend sets. For each Endpoint object, it installs iptables rules which +select a backend Pod. + +By default, kube-proxy in iptables mode chooses a backend at random. + +Using iptables to handle traffic has a lower system overhead, because traffic +is handled by Linux netfilter without the need switch between userspace and the +kernel space. This approach is also likely to be more reliable. + +If kube-proxy is running in iptables mode and the first Pod that's selected +does not respond, the connection will fail. This is different from userspace +mode: in that scenario, kube-proxy would detect that the connection to the first +Pod had failed and would automatically retry with a different backend Pod. + +You can use Pod [readiness probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes) +to verify that backend Pods are working OK, so that kube-proxy in iptables mode +only sees backends that test out as healthy. Doing this means you avoid +having traffic sent via kube-proxy to a Pod that's known to have failed. + +![Services overview diagram for iptables proxy](/images/docs/services-iptables-overview.svg) + +### IPVS proxy mode {#proxy-mode-ipvs} + +{{< feature-state for_k8s_version="v1.11" state="stable" >}} + +In `ipvs` mode, kube-proxy watches Kubernetes Services and Endpoints, +calls `netlink` interface to create IPVS rules accordingly and synchronizes +IPVS rules with Kubernetes Services and Endpoints periodically. +This control loop ensures that IPVS status matches the desired +state. +When accessing a Service, IPVS will direct traffic to one of the backend Pods. + +The IPVS proxy mode is based on netfilter hook function that is similar to +iptables mode, but uses hash table as the underlying data structure and works +in the kernel space. +That means kube-proxy in IPVS mode redirects traffic with a lower latency than +kube-proxy in iptables mode, with much better performance when synchronising +proxy rules. Compared to the other proxy modes, IPVS mode also supports a +higher throughput of network traffic. + +IPVS provides more options for balancing traffic to backend Pods; +these are: + +- `rr`: round-robin +- `lc`: least connection (smallest number of open connections) +- `dh`: destination hashing +- `sh`: source hashing +- `sed`: shortest expected delay +- `nq`: never queue + +{{< note >}} +To run kube-proxy in IPVS mode, you must make the IPVS Linux available on +the node before you starting kube-proxy. + +When kube-proxy starts in IPVS proxy mode, it will verify whether IPVS +kernel modules are available, and if those are not detected then kube-proxy +fall back to running in iptables proxy mode. +{{< /note >}} + +![Services overview diagram for IPVS proxy](/images/docs/services-ipvs-overview.svg) + +In any of these proxy models, any traffic bound for the Service’s IP:Port is +proxied to an appropriate backend without the clients knowing anything +about Kubernetes or Services or Pods. + +If you want to make sure that connections from a particular client +are passed to the same Pod each time, you can select session affinity based +the on client's IP addresses by setting `service.spec.sessionAffinity` to "ClientIP" +(the default is "None"). +You can then also set the maximum session sticky time by setting +`service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` appropriately. +(the default value is 10800, which works out to be 3 hours). + +## Multi-Port Services + +For some Services, you need to expose more than one port. +Kubernetes lets you configure multiple port definitions on a Service object. +When using multiple ports for a Service, you must give all of your ports names +so that these are unambiguous. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + - name: https + protocol: TCP + port: 443 + targetPort: 9377 +``` + +{{< note >}} +As with Kubernetes {{< glossary_tooltip term_id="name" text="names">}} in general, names for ports +must only contain lowercase alphanumeric characters and `-`. Port names must +also start and end with an alphanumeric character. + +For example, the names `123-abc` and `web` are valid, but `123_abc` and `-web` are not. +{{< /note >}} + +## Choosing your own IP address + +You can specify your own cluster IP address as part of a `Service` creation +request. To do this, set the `.spec.clusterIP` field. For example, if you +already have an existing DNS entry that you wish to reuse, or legacy systems +that are configured for a specific IP address and difficult to re-configure. + +The IP address that you choose must be a valid IPv4 or IPv6 address from within the +`service-cluster-ip-range` CIDR range that is configured for the API server. +If you try to create a Service with an invalid clusterIP address value, the API +server will returns a 422 HTTP status code to indicate that there's a problem. + +## Discovering services + +Kubernetes supports 2 primary modes of finding a Service - environment +variables and DNS. + +### Environment variables + +When a Pod is run on a Node, the kubelet adds a set of environment variables +for each active Service. It supports both [Docker links +compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see +[makeLinkVariables](http://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, +where the Service name is upper-cased and dashes are converted to underscores. + +For example, the Service `"redis-master"` which exposes TCP port 6379 and has been +allocated cluster IP address 10.0.0.11 produces the following environment +variables: + +```shell +REDIS_MASTER_SERVICE_HOST=10.0.0.11 +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_PORT=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP_PROTO=tcp +REDIS_MASTER_PORT_6379_TCP_PORT=6379 +REDIS_MASTER_PORT_6379_TCP_ADDR=10.0.0.11 +``` + +{{< note >}} +When you have a Pod that might need to acccess a Service, and you are using +the environment variable method to publish the port and cluster IP to the client +Pods, you must create the Service *before* the client Pods come into existence. +Otherwise, those client Pods won't have their environment variables populated. + +If you only use DNS to discover the cluster IP for a Service, you don't need to +worry about this ordering issue. +{{< /note >}} + +### DNS + +You can (and almost always should) set up a DNS service for your Kubernetes +cluster using an [add-on](/docs/concepts/cluster-administration/addons/). + +A cluster-aware DNS server, such as CoreDNS, watches the Kubernetes API for new +Services and creates a set of DNS records for each one. If DNS has been enabled +throughout your cluster then all Pods should automatically be able to resolve +Services by their DNS name. + +For example, if you have a Service called `"my-service"` in a Kubernetes +Namespace `"my-ns"`, the control plane and the DNS service acting together will +create a DNS record for `"my-service.my-ns"`. Pods in the `"my-ns"` Namespace +should be able to find it by simply doing a name lookup for `my-service` +(`"my-service.my-ns"` would also work). + +Pods in other Namespaces must qualify the name as `my-service.my-ns`. These names +will resolve to the cluster IP assigned for the Service. + +Kubernetes also supports DNS SRV (service) records for named ports. If the +`"my-service.my-ns"` Service has a port named `"http"` with protocol set to +`TCP`, you can do a DNS SRV query for `_http._tcp.my-service.my-ns` to discover +the port number for `"http"`, as well as the IP address. + +The Kubernetes DNS server is the only way to access `ExternalName` Services. +You can find more information about `ExternalName` resolution in +[DNS Pods and Services](/docs/concepts/services-networking/dns-pod-service/). + +## Headless services + +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create what are termed “headless” Services, by explicitly +specifying `"None"` for the cluster IP (`.spec.clusterIP`). + +You can use a headless Service to interface with other service discovery mechanisms, +without being tied to Kubernetes' implementation. For example, you could implement +a custom [Operator]( +be built upon this API. + +For such `Services`, a cluster IP is not allocated, kube-proxy does not handle +these services, and there is no load balancing or proxying done by the platform +for them. How DNS is automatically configured depends on whether the service has +selectors defined. + +### With selectors + +For headless services that define selectors, the endpoints controller creates +`Endpoints` records in the API, and modifies the DNS configuration to return A +records (addresses) that point directly to the `Pods` backing the `Service`. + +### Without selectors + +For headless services that do not define selectors, the endpoints controller does +not create `Endpoints` records. However, the DNS system looks for and configures +either: + + * CNAME records for [`ExternalName`](#externalname)-type services. + * A records for any `Endpoints` that share a name with the service, for all + other types. + +## Publishing services (ServiceTypes) {#publishing-services-service-types} + +For some parts of your application (e.g. frontends) you may want to expose a +Service onto an external IP address, one that's outside of your cluster. + +Kubernetes `ServiceTypes` allow you to specify what kind of service you want. +The default is `ClusterIP`. + +`Type` values and their behaviors are: + + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the + default `ServiceType`. + * [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port + (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will + route, is automatically created. You'll be able to contact the `NodePort` service, + from outside the cluster, + by requesting `:`. + * [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud + provider's load balancer. `NodePort` and `ClusterIP` services, to which the external + load balancer will route, are automatically created. + * [`ExternalName`](#externalname): Maps the service to the contents of the + `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record + with its value. No proxying of any kind is set up. + +{{< note >}} + +You need CoreDNS version 1.7 or higher to use the `ExternalName` type. + +{{< /note >}} + +### Type NodePort {#nodeport} + +If you set the `type` field to `NodePort`, the Kubernetes control plane will +allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767). +Each node will proxy that port each (the same port number on every Node) into your Service. +Your service will report that allocated port in its `.spec.ports[*].nodePort` field. + + +If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s); this is supported since Kubernetes v1.10. +This flag takes a comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 192.0.2.0/25) to specify IP address ranges that kube-proxy should consider as local to this node. + +For example, if you start kube-proxy with flag `--nodeport-addresses=127.0.0.0/8`, kube-proxy will select only the loopback interface for NodePort Services. The default for `--nodeport-addresses` is an empty list, and means that kube-proxy should consider all available network interfaces for NodePort. (That's also compatible with earlier Kubernetes releases). + +If you want a specific port number, you can specify a value in the `nodePort` +field. The control plane will either allocate you that port or report that +the API transaction failed. +This means that you need to take care about possible port collisions yourself). +You also have to use a valid port number, one that's inside the range configured +for NodePort use. + +Using a NodePort gives you the freedom to set up your own load balancing solution, +to configure environments that are not fully supported by Kubernetes, or even +to just expose one or more nodes' IPs directly. + +Note that this Service will be visible as both `:spec.ports[*].nodePort` +and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) + +### Type LoadBalancer {#loadbalancer} + +On cloud providers which support external load balancers, setting the `type` +field to `LoadBalancer` will provision a load balancer for your Service. +The actual creation of the load balancer happens asynchronously, and +information about the provisioned balancer will be published in the Service's +`.status.loadBalancer` field. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + clusterIP: 10.0.171.239 + loadBalancerIP: 78.11.24.19 + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 146.148.47.155 +``` + +Traffic from the external load balancer will be directed at the backend Pods, +though exactly how that works depends on the cloud provider. + +Some cloud providers allow you to specify the `loadBalancerIP`. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +the loadBalancer will be set up with an ephemeral IP address. If you specify a `loadBalancerIP` +but your cloud provider does not support the feature, the `loadbalancerIP` field that you +set will be ignored. + +{{< note >}} +If you're using SCTP, see the [caveat](#caveat-sctp-loadbalancer-service-type) below about the +`LoadBalancer` Service type. +{{< /note >}} + +{{< note >}} + +On **Azure**, if you want to use a user-specified public type `loadBalancerIP`, you first need +to create a static type public IP address resource. This public IP address resource should +be in the same resource group of the other automatically created resources of the cluster. +For example, `MC_myResourceGroup_myAKSCluster_eastus`. + +Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357). + +{{< /note >}} + +#### Internal load balancer +In a mixed environment it is sometimes necessary to route traffic from services inside the same +(virtual) network address block. + +In a split-horizon DNS environment you would need two services to be able to route both external and internal traffic to your endpoints. + +You can achieve this by adding one the following annotations to a Service. +The annotation to add depends on the cloud service provider you're using. + +{{< tabs name="service_tabs" >}} +{{% tab name="Default" %}} +Select one of the tabs. +{{% /tab %}} +{{% tab name="GCP" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + cloud.google.com/load-balancer-type: "Internal" +[...] +``` +Use `cloud.google.com/load-balancer-type: "internal"` for masters with version 1.7.0 to 1.7.3. +For more information, see the [docs](https://cloud.google.com/kubernetes-engine/docs/internal-load-balancing). +{{% /tab %}} +{{% tab name="AWS" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +[...] +``` +{{% /tab %}} +{{% tab name="Azure" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +[...] +``` +{{% /tab %}} +{{% tab name="OpenStack" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" +[...] +``` +{{% /tab %}} +{{% tab name="Baidu Cloud" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" +[...] +``` +{{% /tab %}} +{{< /tabs >}} + + +#### TLS support on AWS {#ssl-support-on-aws} + +For partial TLS / SSL support on clusters running on AWS, you can add three +annotations to a `LoadBalancer` service: + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 +``` + +The first specifies the ARN of the certificate to use. It can be either a +certificate from a third party issuer that was uploaded to IAM or one created +within AWS Certificate Manager. + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp) +``` + +The second annotation specifies which protocol a Pod speaks. For HTTPS and +SSL, the ELB will expect the Pod to authenticate itself over the encrypted +connection, using a certificate. + +HTTP and HTTPS will select layer 7 proxying: the ELB will terminate +the connection with the user, parse headers and inject the `X-Forwarded-For` +header with the user's IP address (Pods will only see the IP address of the +ELB at the other end of its connection) when forwarding requests. + +TCP and SSL will select layer 4 proxying: the ELB will forward traffic without +modifying the headers. + +In a mixed-use environment where some ports are secured and others are left unencrypted, +you can use the following annotations: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443" +``` + +In the above example, if the service contained three ports, `80`, `443`, and +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just +be proxied HTTP. + +From Kubernetes v1.9 onwrds you can use [predefined AWS SSL policies](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) with HTTPS or SSL listeners for your Services. +To see which policies are available for use, you can the `aws` command line tool: + +```bash +aws elb describe-load-balancer-policies --query 'PolicyDescriptions[].PolicyName' +``` + +You can then specify any one of those policies using the +"`service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`" +annotation; for example: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" +``` + +#### PROXY protocol support on AWS + +To enable [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) +support for clusters running on AWS, you can use the following service +annotation: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" +``` + +Since version 1.3.0, the use of this annotation applies to all ports proxied by the ELB +and cannot be configured otherwise. + +#### ELB Access Logs on AWS + +There are several annotations to manage access logs for ELB services on AWS. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` +controls whether access logs are enabled. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval` +controls the interval in minutes for publishing the access logs. You can specify +an interval of either 5 or 60 minutes. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` +controls the name of the Amazon S3 bucket where load balancer access logs are +stored. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` +specifies the logical hierarchy you created for your Amazon S3 bucket. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" + # Specifies whether access logs are enabled for the load balancer + service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" + # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" + # The name of the Amazon S3 bucket where the access logs are stored + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" + # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` +``` + +#### Connection Draining on AWS + +Connection draining for Classic ELBs can be managed with the annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled` set +to the value of `"true"`. The annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout` can +also be used to set maximum time, in seconds, to keep the existing connections open before deregistering the instances. + + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60" +``` + +#### Other ELB annotations + +There are other annotations to manage Classic Elastic Load Balancers that are described below. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" + # The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer + + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # Specifies whether cross-zone load balancing is enabled for the load balancer + + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops" + # A comma-separated list of key-value pairs which will be recorded as + # additional tags in the ELB. + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: "" + # The number of successive successful health checks required for a backend to + # be considered healthy for traffic. Defaults to 2, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3" + # The number of unsuccessful health checks required for a backend to be + # considered unhealthy for traffic. Defaults to 6, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20" + # The approximate interval, in seconds, between health checks of an + # individual instance. Defaults to 10, must be between 5 and 300 + service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5" + # The amount of time, in seconds, during which no response means a failed + # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval + # value. Defaults to 5, must be between 2 and 60 + + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" + # A list of additional security groups to be added to the ELB +``` + +#### Network Load Balancer support on AWS [alpha] {#aws-nlb-support} + +{{< warning >}} +This is an alpha feature and is not yet recommended for production clusters. +{{< /warning >}} + +Starting from Kubernetes v1.9.0, you can use AWS Network Load Balancer (NLB) with Services. To +use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` +with the value set to `nlb`. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +{{< note >}} +NLB only works with certain instance classes; see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets) +on Elastic Load Balancing for a list of supported instance types. +{{< /note >}} + +Unlike Classic Elastic Load Balancers, Network Load Balancers (NLBs) forward the +client's IP address through to the node. If a service's `.spec.externalTrafficPolicy` +is set to `Cluster`, the client's IP address will not be propagated to the end +pods. + +By setting `.spec.externalTrafficPolicy` to `Local`, client IP addresses will be +propagated to the end pods, but this could result in uneven distribution of +traffic. Nodes without any pods for a particular LoadBalancer service will fail +the NLB Target Group's health check on the auto-assigned +`.spec.healthCheckNodePort` and not receive any traffic. + +In order to achieve even traffic, either use a DaemonSet, or specify a +[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) +to not locate on the same node. + +You can also use NLB Services with the [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer) +annotation. + +In order for client traffic to reach instances behind an NLB, the Node security +groups are modified with the following IP rules: + +| Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | +|------|----------|---------|------------|---------------------| +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | +| MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | + +In order to limit which client IP's can access the Network Load Balancer, +specify `loadBalancerSourceRanges`. + +```yaml +spec: + loadBalancerSourceRanges: + - "143.231.0.0/16" +``` + +{{< note >}} +If `.spec.loadBalancerSourceRanges` is not set, Kubernetes will +allow traffic from `0.0.0.0/0` to the Node Security Group(s). If nodes have +public IP addresses, be aware that non-NLB traffic can also reach all instances +in those modified security groups. + +{{< /note >}} + +### Type ExternalName {#externalname} + +Services of type ExternalName map a service to a DNS name, not to a typical selector such as +`my-service` or `cassandra`. You specify these services with the `spec.externalName` parameter. + +This Service definition, for example, maps +the `my-service` Service in the `prod` namespace to `my.database.example.com`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: prod +spec: + type: ExternalName + externalName: my.database.example.com +``` +{{< note >}} +ExternalName accepts an IPv4 address string, but as a DNS names comprised of digits, not as an IP address. ExternalNames that resemble IPv4 addresses are not resolved by CoreDNS or ingress-nginx because ExternalName +is intended to specify a canonical DNS name. To hardcode an IP address, consider using +[headless services](#headless-services). +{{< /note >}} + +When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS service +will return a `CNAME` record with the value `my.database.example.com`. Accessing +`my-service` works in the same way as other Services but with the crucial +difference that redirection happens at the DNS level rather than via proxying or +forwarding. Should you later decide to move your database into your cluster, you +can start its pods, add appropriate selectors or endpoints, and change the +Service's `type`. + + +{{< note >}} +This section is indebted to the [Kubernetes Tips - Part +1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/). +{{< /note >}} + +### External IPs + +If there are external IPs that route to one or more cluster nodes, Kubernetes services can be exposed on those +`externalIPs`. Traffic that ingresses into the cluster with the external IP (as destination IP), on the service port, +will be routed to one of the service endpoints. `externalIPs` are not managed by Kubernetes and are the responsibility +of the cluster administrator. + +In the Service spec, `externalIPs` can be specified along with any of the `ServiceTypes`. +In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`" (`externalIP:port`) + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + externalIPs: + - 80.11.12.10 +``` + +## Shortcomings + +Using the userspace proxy for VIPs will work at small to medium scale, but will +not scale to very large clusters with thousands of Services. The [original +design proposal for portals](http://issue.k8s.io/1107) has more details on +this. + +Using the userspace proxy obscures the source IP address of a packet accessing +a Service. +This makes some kinds of network filtering (firewalling) impossible. The iptables +proxy mode does not +obscure in-cluster source IPs, but it does still impact clients coming through +a load balancer or node-port. + +The `Type` field is designed as nested functionality - each level adds to the +previous. This is not strictly required on all cloud providers (e.g. Google Compute Engine does +not need to allocate a `NodePort` to make `LoadBalancer` work, but AWS does) +but the current API requires it. + +## Virtual IP implementation {#the-gory-details-of-virtual-ips} + +The previous information should be sufficient for many people who just want to +use Services. However, there is a lot going on behind the scenes that may be +worth understanding. + +### Avoiding collisions + +One of the primary philosophies of Kubernetes is that you should not be +exposed to situations that could cause your actions to fail through no fault +of your own. For the design of the Service resource, this means not making +you choose your own port number for a if that choice might collide with +someone else's choice. That is an isolation failure. + +In order to allow you to choose a port number for your Services, we must +ensure that no two Services can collide. Kubernetes does that by allocating each +Service its own IP address. + +To ensure each service receives a unique IP, an internal allocator atomically +updates a global allocation map in {{< glossary_tooltip term_id="etcd" >}} +prior to creating each Service. The map object must exist in the registry for +Services to get IP address assignments, otherwise creations will +fail with a message indicating an IP address could not be allocated. + +In the control plane, a background controller is responsible for creating that +map (needed to support migrating from older versions of Kubernetes that used +in-memory locking). Kubernetes also uses controllers to checking for invalid +assignments (eg due to administrator intervention) and for cleaning up allocated +IP addresses that are no longer used by any Services. + +### Service IP addresses {#ips-and-vips} + +Unlike Pod IP addresses, which actually route to a fixed destination, +Service IPs are not actually answered by a single host. Instead, kube-proxy +uses iptables (packet processing logic in Linux) to define _virtual_ IP addresses +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for Services are actually populated in +terms of the Service's virtual IP address (and port). + +kube-proxy supports three proxy modes—userspace, iptables and IPVS—which +each operate slightly differently. + +#### Userspace + +As an example, consider the image processing application described above. +When the backend Service is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it opens a new random port, establishes an +iptables redirect from the virtual IP address to this new port, and starts accepting +connections on it. + +When a client connects to the Service's virtual IP address, the iptables +rule kicks in, and redirects the packets to the proxy's own port. +The “Service proxy” chooses a backend, and starts proxying traffic from the client to the backend. + +This means that Service owners can choose any port they want without risk of +collision. Clients can simply connect to an IP and port, without being aware +of which Pods they are actually accessing. + +#### iptables + +Again, consider the image processing application described above. +When the backend Service is created, the Kubernetes control plane assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it installs a series of iptables rules which +redirect from the virtual IP address to per-Service rules. The per-Service +rules link to per-Endpoint rules which redirect traffic (using destination NAT) +to the backends. + +When a client connects to the Service's virtual IP address the iptables rule kicks in. +A backend is chosen (either based on session affinity or randomly) and packets are +redirected to the backend. Unlike the userspace proxy, packets are never +copied to userspace, the kube-proxy does not have to be running for the virtual +IP address to work, and Nodes see traffic arriving from the unaltered client IP +address. + +This same basic flow executes when traffic comes in through a node-port or +through a load-balancer, though in those cases the client IP does get altered. + +#### IPVS + +iptables operations slow down dramatically in large scale cluster e.g 10,000 Services. +IPVS is designed for load balancing and based on in-kernel hash tables. So you can achieve performance consistency in large number of services from IPVS-based kube-proxy. Meanwhile, IPVS-based kube-proxy has more sophisticated load balancing algorithms (least conns, locality, weighted, persistence). + +## API Object + +Service is a top-level resource in the Kubernetes REST API. You can find more details +about the API object at: [Service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core). + +## Supported protocols {#protocol-support} + +### TCP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use TCP for any kind of service, and it's the default network protocol. + +### UDP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use UDP for most services. For type=LoadBalancer services, UDP support +depends on the cloud provider offering this facility. + +### HTTP + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it, you can use a Service in LoadBalancer mode +to set up external HTTP / HTTPS reverse proxying, forwarded to the Endpoints +of the Service. + +{{< note >}} +You can also use {{< glossary_tooltip term_id="ingress" >}} in place of Service +to expose HTTP / HTTPS services. +{{< /note >}} + +### PROXY protocol + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it (eg, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +you can use a Service in LoadBalancer mode to configure a load balancer outside +of Kubernetes itself, that will forward connections prefixed with +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). + +The load balancer will send an initial series of octets describing the +incoming connection, similar to this example + +``` +PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n +``` +followed by the data from the client. + +### SCTP + +{{< feature-state for_k8s_version="v1.12" state="alpha" >}} + +Kubernetes supports SCTP as a `protocol` value in Service, Endpoint, NetworkPolicy and Pod definitions as an alpha feature. To enable this feature, the cluster administrator needs to enable the `SCTPSupport` feature gate on the apiserver, for example, `--feature-gates=SCTPSupport=true,…`. + +When the feature gate is enabled, you can set the `protocol` field of a Service, Endpoint, NetworkPolicy or Pod to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections. + +#### Warnings {#caveat-sctp-overview} + +##### Support for multihomed SCTP associations {#caveat-sctp-multihomed} + +{{< warning >}} +The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a Pod. + +NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules. +{{< /warning >}} + +##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type} + +{{< warning >}} +You can only create a Service with `type` LoadBalancer plus `protocol` SCTP if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise, the Service creation request is rejected. The current set of cloud load balancer providers (Azure, AWS, CloudStack, GCE, OpenStack) all lack support for SCTP. +{{< /warning >}} + +##### Windows {#caveat-sctp-windows-os} + +{{< warning >}} +SCTP is not supported on Windows based nodes. +{{< /warning >}} + +##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace} + +{{< warning >}} +The kube-proxy does not support the management of SCTP associations when it is in userspace mode. +{{< /warning >}} + +## Future work + +In the future, the proxy policy for Services can become more nuanced than +simple round-robin balancing, for example master-elected or sharded. We also +envision that some Services will have "real" load balancers, in which case the +virtual IP address will simply transport the packets there. + +The Kubernetes project intends to improve support for L7 (HTTP) Services. + +The Kubernetes project intends to have more flexible ingress modes for Services +which encompass the current ClusterIP, NodePort, and LoadBalancer modes and more. + + +{{% /capture %}} + +{{% capture whatsnext %}} + +* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) +* Read about [Ingress](/docs/concepts/services-networking/ingress/) + +{{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/service_REMOTE_80035.md b/content/en/docs/concepts/services-networking/service_REMOTE_80035.md new file mode 100644 index 0000000000000..ad3344beffa27 --- /dev/null +++ b/content/en/docs/concepts/services-networking/service_REMOTE_80035.md @@ -0,0 +1,1151 @@ +--- +reviewers: +- bprashanth +title: Service +feature: + title: Service discovery and load balancing + description: > + No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, and can load-balance across them. + +content_template: templates/concept +weight: 10 +--- + + +{{% capture overview %}} + +{{< glossary_definition term_id="service" length="short" >}} + +No need to modify your application to use an unfamiliar service discovery mechanism. +Kubernetes gives pods their own IP addresses and a single DNS name for a set of pods, +and can load-balance across them. + +{{% /capture %}} + +{{% capture body %}} + +## Motivation + +Kubernetes {{< glossary_tooltip term_id="pod" text="Pods" >}} are mortal. +They are born and when they die, they are not resurrected. +If you use a {{< glossary_tooltip term_id="deployment" >}} to run your app, +it can create and destroy Pods dynamically (e.g. when scaling out or in). + +Each Pod gets its own IP address, however the set of Pods +for a Deployment running in one moment in time could be different from +the set of Pods running that application a moment later. + +This leads to a problem: if some set of Pods (call them “backends”) provides +functionality to other Pods (call them “frontends”) inside your cluster, +how do those frontends find out and keep track of which IP address to connect +to, so that the frontend can use the backend part of the workload? + +Enter _Services_. + +## Service resources {#service-resource} + +In Kubernetes, a Service is an abstraction which defines a logical set of Pods +and a policy by which to access them (you'll sometimes see this pattern called +a micro-service). The set of Pods targeted by a Service is usually determined +by a {{< glossary_tooltip text="selector" term_id="selector" >}} +(see [below](#services-without-selectors) for why you might want a Service +_without_ a selector). + +For example: consider a stateless image-processing backend which is running with +3 replicas. Those replicas are fungible—frontends do not care which backend +they use. While the actual Pods that compose the backend set may change, the +frontend clients should not need to be aware of that, nor should they need to keep +track of the set of backends themselves. + +The Service abstraction enables this decoupling. + +### Cloud-native service discovery + +If you're able to use Kubernetes APIs for service discovery in your application, +you can query the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} +for Endpoints, that will be updated whenever the set of Pods in a Service changes. + +For non-native applications, Kubernetes offers ways to place a network port or load +balancer in between your application and the backend Pods. + +## Defining a service + +A Service in Kubernetes is a REST object, similar to a Pod. Like all of the +REST objects, you can `POST` a Service definition to the API server to create +a new instance. + +For example, suppose you have a set of Pods that each listen on TCP port 9376 +and carry a label `"app=MyApp"`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +This specification will create a new Service object named “my-service” which +targets TCP port 9376 on any Pod with the `"app=MyApp"` label. + +This Service will also be assigned an IP address (sometimes called the "cluster IP"), +which is used by the service proxies +(see [Virtual IPs and service proxies](#virtual-ips-and-service-proxies) below). + +The controller for the Service selector will continuously scan for Pods that +match its selector, and will then POST any updates to an Endpoint object +also named “my-service”. + +{{< note >}} +A Service can map _any_ incoming `port` to a `targetPort`. By default, and +for convenience, the `targetPort` will be set to the same value as the `port` +field. +{{< /note >}} + +Port definitions in Pods have names, and you can reference these names in the +targetPort attribute of a Service. This will work even if there are a mixture +of Pods in the Service, with the same network protocol available via different +port numbers but a single configured name. +This offers a lot of flexibility for deploying and evolving your Services. +For example, you can change the port number that pods expose in the next +version of your backend software, without breaking clients. + +The default protocol for services is TCP; you can also use any other +[supported protocol](#protocol-support). + +As many Services need to expose more than one port, Kubernetes supports multiple +port definitions on a Service object. +Each port definition can have the same `protocol`, or a different one. + +### Services without selectors + +Services most commonly abstract access to Kubernetes Pods, but they can also +abstract other kinds of backends. For example: + + * You want to have an external database cluster in production, but in your + test environment you use your own databases. + * You want to point your service to a service in a different + {{< glossary_tooltip term_id="namespace" >}} or on another cluster. + * You are migrating a workload to Kubernetes. Whilst evaluating the approach, + you run only a proportion of your backends in Kubernetes. + +In any of these scenarios you can define a service _without_ a Pod selector. +For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + ports: + - protocol: TCP + port: 80 + targetPort: 9376 +``` + +Because this service has no selector, the corresponding Endpoint object will *not* be +created automatically. You can manually map the service to the network address and port +where it's running, by adding an Endpoint object manually: + +```yaml +apiVersion: v1 +kind: Endpoints +metadata: + name: my-service +subsets: + - addresses: + - ip: 192.0.2.42 + ports: + - port: 9376 +``` + +{{< note >}} +The endpoint IPs _must not_ be: loopback (127.0.0.0/8 for IPv4, ::1/128 for IPv6), or +link-local (169.254.0.0/16 and 224.0.0.0/24 for IPv4, fe80::/64 for IPv6). + +Endpoint IP addresses also cannot be the cluster IPs of other Kubernetes services, +because {{< glossary_tooltip term_id="kube-proxy" >}} doesn't support virtual IPs +as a destination. +{{< /note >}} + +Accessing a Service without a selector works the same as if it had a selector. +In the example above, traffic will be routed to the single endpoint defined in +the YAML: `192.0.2.42:9376` (TCP). + +An ExternalName Service is a special case of service that does not have +selectors and uses DNS names instead. For more information, see the +[ExternalName](#externalname) section later in this document. + +## Virtual IPs and service proxies + +Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is +responsible for implementing a form of virtual IP for `Services` of type other +than [`ExternalName`](#externalname). + +### Why not use round-robin DNS? + +A question that pops up every now and then is why Kubernetes relies on +proxying to forward inbound traffic to backends. What about other +approaches? For example, would it be possible to configure DNS records that +have multiple A values (or AAAA for IPv6), and rely on round-robin name +resolution? + +There are a few reasons for using proxying for Services: + + * There is a long history of DNS implementations not respecting record TTLs, + and caching the results of name lookups after they should have expired. + * Some apps do DNS lookups only once and cache the results indefinitely. + * Even if apps and libraries did proper re-resolution, the low or zero TTLs + on the DNS records could impose a high load on DNS that then becomes + difficult to manage. + +### Version compatibility + +Since Kubernetes v1.0 you have been able to use the +[userspace proxy mode](#proxy-mode-userspace). +Kubernetes v1.1 added iptables mode proxying, and in Kubernetes v1.2 the +iptables mode for kube-proxy became the default. +Kubernetes v1.8 added ipvs proxy mode. + +### User space proxy mode {#proxy-mode-userspace} + +In this mode, kube-proxy watches the Kubernetes master for the addition and +removal of Service and Endpoint objects. For each Service it opens a +port (randomly chosen) on the local node. Any connections to this "proxy port" +will be proxied to one of the Service's backend Pods (as reported via +Endpoints). kube-proxy takes the `SessionAffinity` setting of the Service into +account when deciding which backend Pod to use. + +Lastly, the user-space proxy installs iptables rules which capture traffic to +the Service's `clusterIP` (which is virtual) and `port`. The rules +redirect that traffic to the proxy port which proxies the backend Pod. + +By default, kube-proxy in userspace mode chooses a backend via a round-robin algorithm. + +![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) + +### `iptables` proxy mode {#proxy-mode-iptables} + +In this mode, kube-proxy watches the Kubernetes control plane for the addition and +removal of Service and Endpoint objects. For each Service, it installs +iptables rules which capture traffic to the Service's `clusterIP` (which is +virtual) and `port` and redirects that traffic to one of the Service's +backend sets. For each Endpoint object, it installs iptables rules which +select a backend Pod. + +By default, kube-proxy in iptables mode chooses a backend at random. + +Using iptables to handle traffic has a lower system overhead, because traffic +is handled by Linux netfilter without the need switch between userspace and the +kernel space. This approach is also likely to be more reliable. + +If kube-proxy is running in iptables mode and the first Pod that's selected +does not respond, the connection will fail. This is different from userspace +mode: in that scenario, kube-proxy would detect that the connection to the first +Pod had failed and would automatically retry with a different backend Pod. + +You can use Pod [readiness probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes) +to verify that backend Pods are working OK, so that kube-proxy in iptables mode +only sees backends that test out as healthy. Doing this means you avoid +having traffic sent via kube-proxy to a Pod that's known to have failed. + +![Services overview diagram for iptables proxy](/images/docs/services-iptables-overview.svg) + +### IPVS proxy mode {#proxy-mode-ipvs} + +{{< feature-state for_k8s_version="v1.11" state="stable" >}} + +In `ipvs` mode, kube-proxy watches Kubernetes Services and Endpoints, +calls `netlink` interface to create IPVS rules accordingly and synchronizes +IPVS rules with Kubernetes Services and Endpoints periodically. +This control loop ensures that IPVS status matches the desired +state. +When accessing a Service, IPVS will direct traffic to one of the backend Pods. + +The IPVS proxy mode is based on netfilter hook function that is similar to +iptables mode, but uses hash table as the underlying data structure and works +in the kernel space. +That means kube-proxy in IPVS mode redirects traffic with a lower latency than +kube-proxy in iptables mode, with much better performance when synchronising +proxy rules. Compared to the other proxy modes, IPVS mode also supports a +higher throughput of network traffic. + +IPVS provides more options for balancing traffic to backend Pods; +these are: + +- `rr`: round-robin +- `lc`: least connection (smallest number of open connections) +- `dh`: destination hashing +- `sh`: source hashing +- `sed`: shortest expected delay +- `nq`: never queue + +{{< note >}} +To run kube-proxy in IPVS mode, you must make the IPVS Linux available on +the node before you starting kube-proxy. + +When kube-proxy starts in IPVS proxy mode, it will verify whether IPVS +kernel modules are available, and if those are not detected then kube-proxy +fall back to running in iptables proxy mode. +{{< /note >}} + +![Services overview diagram for IPVS proxy](/images/docs/services-ipvs-overview.svg) + +In any of these proxy models, any traffic bound for the Service’s IP:Port is +proxied to an appropriate backend without the clients knowing anything +about Kubernetes or Services or Pods. + +If you want to make sure that connections from a particular client +are passed to the same Pod each time, you can select session affinity based +the on client's IP addresses by setting `service.spec.sessionAffinity` to "ClientIP" +(the default is "None"). +You can then also set the maximum session sticky time by setting +`service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` appropriately. +(the default value is 10800, which works out to be 3 hours). + +## Multi-Port Services + +For some Services, you need to expose more than one port. +Kubernetes lets you configure multiple port definitions on a Service object. +When using multiple ports for a Service, you must give all of your ports names +so that these are unambiguous. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + - name: https + protocol: TCP + port: 443 + targetPort: 9377 +``` + +{{< note >}} +As with Kubernetes {{< glossary_tooltip term_id="name" text="names">}} in general, names for ports +must only contain lowercase alphanumeric characters and `-`. Port names must +also start and end with an alphanumeric character. + +For example, the names `123-abc` and `web` are valid, but `123_abc` and `-web` are not. +{{< /note >}} + +## Choosing your own IP address + +You can specify your own cluster IP address as part of a `Service` creation +request. To do this, set the `.spec.clusterIP` field. For example, if you +already have an existing DNS entry that you wish to reuse, or legacy systems +that are configured for a specific IP address and difficult to re-configure. + +The IP address that you choose must be a valid IPv4 or IPv6 address from within the +`service-cluster-ip-range` CIDR range that is configured for the API server. +If you try to create a Service with an invalid clusterIP address value, the API +server will returns a 422 HTTP status code to indicate that there's a problem. + +## Discovering services + +Kubernetes supports 2 primary modes of finding a Service - environment +variables and DNS. + +### Environment variables + +When a Pod is run on a Node, the kubelet adds a set of environment variables +for each active Service. It supports both [Docker links +compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see +[makeLinkVariables](http://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, +where the Service name is upper-cased and dashes are converted to underscores. + +For example, the Service `"redis-master"` which exposes TCP port 6379 and has been +allocated cluster IP address 10.0.0.11 produces the following environment +variables: + +```shell +REDIS_MASTER_SERVICE_HOST=10.0.0.11 +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_PORT=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 +REDIS_MASTER_PORT_6379_TCP_PROTO=tcp +REDIS_MASTER_PORT_6379_TCP_PORT=6379 +REDIS_MASTER_PORT_6379_TCP_ADDR=10.0.0.11 +``` + +{{< note >}} +When you have a Pod that might need to acccess a Service, and you are using +the environment variable method to publish the port and cluster IP to the client +Pods, you must create the Service *before* the client Pods come into existence. +Otherwise, those client Pods won't have their environment variables populated. + +If you only use DNS to discover the cluster IP for a Service, you don't need to +worry about this ordering issue. +{{< /note >}} + +### DNS + +You can (and almost always should) set up a DNS service for your Kubernetes +cluster using an [add-on](/docs/concepts/cluster-administration/addons/). + +A cluster-aware DNS server, such as CoreDNS, watches the Kubernetes API for new +Services and creates a set of DNS records for each one. If DNS has been enabled +throughout your cluster then all Pods should automatically be able to resolve +Services by their DNS name. + +For example, if you have a Service called `"my-service"` in a Kubernetes +Namespace `"my-ns"`, the control plane and the DNS service acting together will +create a DNS record for `"my-service.my-ns"`. Pods in the `"my-ns"` Namespace +should be able to find it by simply doing a name lookup for `my-service` +(`"my-service.my-ns"` would also work). + +Pods in other Namespaces must qualify the name as `my-service.my-ns`. These names +will resolve to the cluster IP assigned for the Service. + +Kubernetes also supports DNS SRV (service) records for named ports. If the +`"my-service.my-ns"` Service has a port named `"http"` with protocol set to +`TCP`, you can do a DNS SRV query for `_http._tcp.my-service.my-ns` to discover +the port number for `"http"`, as well as the IP address. + +The Kubernetes DNS server is the only way to access `ExternalName` Services. +You can find more information about `ExternalName` resolution in +[DNS Pods and Services](/docs/concepts/services-networking/dns-pod-service/). + +## Headless services + +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create what are termed “headless” Services, by explicitly +specifying `"None"` for the cluster IP (`.spec.clusterIP`). + +You can use a headless Service to interface with other service discovery mechanisms, +without being tied to Kubernetes' implementation. For example, you could implement +a custom [Operator]( +be built upon this API. + +For such `Services`, a cluster IP is not allocated, kube-proxy does not handle +these services, and there is no load balancing or proxying done by the platform +for them. How DNS is automatically configured depends on whether the service has +selectors defined. + +### With selectors + +For headless services that define selectors, the endpoints controller creates +`Endpoints` records in the API, and modifies the DNS configuration to return A +records (addresses) that point directly to the `Pods` backing the `Service`. + +### Without selectors + +For headless services that do not define selectors, the endpoints controller does +not create `Endpoints` records. However, the DNS system looks for and configures +either: + + * CNAME records for [`ExternalName`](#externalname)-type services. + * A records for any `Endpoints` that share a name with the service, for all + other types. + +## Publishing services (ServiceTypes) {#publishing-services-service-types} + +For some parts of your application (e.g. frontends) you may want to expose a +Service onto an external IP address, one that's outside of your cluster. + +Kubernetes `ServiceTypes` allow you to specify what kind of service you want. +The default is `ClusterIP`. + +`Type` values and their behaviors are: + + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the + default `ServiceType`. + * [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port + (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will + route, is automatically created. You'll be able to contact the `NodePort` service, + from outside the cluster, + by requesting `:`. + * [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud + provider's load balancer. `NodePort` and `ClusterIP` services, to which the external + load balancer will route, are automatically created. + * [`ExternalName`](#externalname): Maps the service to the contents of the + `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record + with its value. No proxying of any kind is set up. + +{{< note >}} + +You need CoreDNS version 1.7 or higher to use the `ExternalName` type. + +{{< /note >}} + +### Type NodePort {#nodeport} + +If you set the `type` field to `NodePort`, the Kubernetes control plane will +allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767). +Each node will proxy that port each (the same port number on every Node) into your Service. +Your service will report that allocated port in its `.spec.ports[*].nodePort` field. + + +If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s); this is supported since Kubernetes v1.10. +This flag takes a comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 192.0.2.0/25) to specify IP address ranges that kube-proxy should consider as local to this node. + +For example, if you start kube-proxy with flag `--nodeport-addresses=127.0.0.0/8`, kube-proxy will select only the loopback interface for NodePort Services. The default for `--nodeport-addresses` is an empty list, and means that kube-proxy should consider all available network interfaces for NodePort. (That's also compatible with earlier Kubernetes releases). + +If you want a specific port number, you can specify a value in the `nodePort` +field. The control plane will either allocate you that port or report that +the API transaction failed. +This means that you need to take care about possible port collisions yourself). +You also have to use a valid port number, one that's inside the range configured +for NodePort use. + +Using a NodePort gives you the freedom to set up your own load balancing solution, +to configure environments that are not fully supported by Kubernetes, or even +to just expose one or more nodes' IPs directly. + +Note that this Service will be visible as both `:spec.ports[*].nodePort` +and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) + +### Type LoadBalancer {#loadbalancer} + +On cloud providers which support external load balancers, setting the `type` +field to `LoadBalancer` will provision a load balancer for your Service. +The actual creation of the load balancer happens asynchronously, and +information about the provisioned balancer will be published in the Service's +`.status.loadBalancer` field. For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + clusterIP: 10.0.171.239 + loadBalancerIP: 78.11.24.19 + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 146.148.47.155 +``` + +Traffic from the external load balancer will be directed at the backend Pods, +though exactly how that works depends on the cloud provider. + +Some cloud providers allow you to specify the `loadBalancerIP`. In those cases, the load-balancer will be created +with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified, +the loadBalancer will be set up with an ephemeral IP address. If you specify a `loadBalancerIP` +but your cloud provider does not support the feature, the `loadbalancerIP` field that you +set will be ignored. + +{{< note >}} +If you're using SCTP, see the [caveat](#caveat-sctp-loadbalancer-service-type) below about the +`LoadBalancer` Service type. +{{< /note >}} + +{{< note >}} + +On **Azure**, if you want to use a user-specified public type `loadBalancerIP`, you first need +to create a static type public IP address resource. This public IP address resource should +be in the same resource group of the other automatically created resources of the cluster. +For example, `MC_myResourceGroup_myAKSCluster_eastus`. + +Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357). + +{{< /note >}} + +#### Internal load balancer +In a mixed environment it is sometimes necessary to route traffic from services inside the same +(virtual) network address block. + +In a split-horizon DNS environment you would need two services to be able to route both external and internal traffic to your endpoints. + +You can achieve this by adding one the following annotations to a Service. +The annotation to add depends on the cloud service provider you're using. + +{{< tabs name="service_tabs" >}} +{{% tab name="Default" %}} +Select one of the tabs. +{{% /tab %}} +{{% tab name="GCP" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + cloud.google.com/load-balancer-type: "Internal" +[...] +``` +Use `cloud.google.com/load-balancer-type: "internal"` for masters with version 1.7.0 to 1.7.3. +For more information, see the [docs](https://cloud.google.com/kubernetes-engine/docs/internal-load-balancing). +{{% /tab %}} +{{% tab name="AWS" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +[...] +``` +{{% /tab %}} +{{% tab name="Azure" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +[...] +``` +{{% /tab %}} +{{% tab name="OpenStack" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" +[...] +``` +{{% /tab %}} +{{% tab name="Baidu Cloud" %}} +```yaml +[...] +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" +[...] +``` +{{% /tab %}} +{{< /tabs >}} + + +#### TLS support on AWS {#ssl-support-on-aws} + +For partial TLS / SSL support on clusters running on AWS, you can add three +annotations to a `LoadBalancer` service: + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 +``` + +The first specifies the ARN of the certificate to use. It can be either a +certificate from a third party issuer that was uploaded to IAM or one created +within AWS Certificate Manager. + +```yaml +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp) +``` + +The second annotation specifies which protocol a Pod speaks. For HTTPS and +SSL, the ELB will expect the Pod to authenticate itself over the encrypted +connection, using a certificate. + +HTTP and HTTPS will select layer 7 proxying: the ELB will terminate +the connection with the user, parse headers and inject the `X-Forwarded-For` +header with the user's IP address (Pods will only see the IP address of the +ELB at the other end of its connection) when forwarding requests. + +TCP and SSL will select layer 4 proxying: the ELB will forward traffic without +modifying the headers. + +In a mixed-use environment where some ports are secured and others are left unencrypted, +you can use the following annotations: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443" +``` + +In the above example, if the service contained three ports, `80`, `443`, and +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just +be proxied HTTP. + +From Kubernetes v1.9 onwrds you can use [predefined AWS SSL policies](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) with HTTPS or SSL listeners for your Services. +To see which policies are available for use, you can the `aws` command line tool: + +```bash +aws elb describe-load-balancer-policies --query 'PolicyDescriptions[].PolicyName' +``` + +You can then specify any one of those policies using the +"`service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`" +annotation; for example: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" +``` + +#### PROXY protocol support on AWS + +To enable [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) +support for clusters running on AWS, you can use the following service +annotation: + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" +``` + +Since version 1.3.0, the use of this annotation applies to all ports proxied by the ELB +and cannot be configured otherwise. + +#### ELB Access Logs on AWS + +There are several annotations to manage access logs for ELB services on AWS. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` +controls whether access logs are enabled. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval` +controls the interval in minutes for publishing the access logs. You can specify +an interval of either 5 or 60 minutes. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` +controls the name of the Amazon S3 bucket where load balancer access logs are +stored. + +The annotation `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` +specifies the logical hierarchy you created for your Amazon S3 bucket. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" + # Specifies whether access logs are enabled for the load balancer + service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" + # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" + # The name of the Amazon S3 bucket where the access logs are stored + service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" + # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` +``` + +#### Connection Draining on AWS + +Connection draining for Classic ELBs can be managed with the annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled` set +to the value of `"true"`. The annotation +`service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout` can +also be used to set maximum time, in seconds, to keep the existing connections open before deregistering the instances. + + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60" +``` + +#### Other ELB annotations + +There are other annotations to manage Classic Elastic Load Balancers that are described below. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" + # The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer + + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # Specifies whether cross-zone load balancing is enabled for the load balancer + + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops" + # A comma-separated list of key-value pairs which will be recorded as + # additional tags in the ELB. + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: "" + # The number of successive successful health checks required for a backend to + # be considered healthy for traffic. Defaults to 2, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3" + # The number of unsuccessful health checks required for a backend to be + # considered unhealthy for traffic. Defaults to 6, must be between 2 and 10 + + service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20" + # The approximate interval, in seconds, between health checks of an + # individual instance. Defaults to 10, must be between 5 and 300 + service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5" + # The amount of time, in seconds, during which no response means a failed + # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval + # value. Defaults to 5, must be between 2 and 60 + + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" + # A list of additional security groups to be added to the ELB +``` + +#### Network Load Balancer support on AWS [alpha] {#aws-nlb-support} + +{{< warning >}} +This is an alpha feature and is not yet recommended for production clusters. +{{< /warning >}} + +Starting from Kubernetes v1.9.0, you can use AWS Network Load Balancer (NLB) with Services. To +use a Network Load Balancer on AWS, use the annotation `service.beta.kubernetes.io/aws-load-balancer-type` +with the value set to `nlb`. + +```yaml + metadata: + name: my-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +{{< note >}} +NLB only works with certain instance classes; see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets) +on Elastic Load Balancing for a list of supported instance types. +{{< /note >}} + +Unlike Classic Elastic Load Balancers, Network Load Balancers (NLBs) forward the +client's IP address through to the node. If a service's `.spec.externalTrafficPolicy` +is set to `Cluster`, the client's IP address will not be propagated to the end +pods. + +By setting `.spec.externalTrafficPolicy` to `Local`, client IP addresses will be +propagated to the end pods, but this could result in uneven distribution of +traffic. Nodes without any pods for a particular LoadBalancer service will fail +the NLB Target Group's health check on the auto-assigned +`.spec.healthCheckNodePort` and not receive any traffic. + +In order to achieve even traffic, either use a DaemonSet, or specify a +[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) +to not locate on the same node. + +You can also use NLB Services with the [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer) +annotation. + +In order for client traffic to reach instances behind an NLB, the Node security +groups are modified with the following IP rules: + +| Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | +|------|----------|---------|------------|---------------------| +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | +| MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | + +In order to limit which client IP's can access the Network Load Balancer, +specify `loadBalancerSourceRanges`. + +```yaml +spec: + loadBalancerSourceRanges: + - "143.231.0.0/16" +``` + +{{< note >}} +If `.spec.loadBalancerSourceRanges` is not set, Kubernetes will +allow traffic from `0.0.0.0/0` to the Node Security Group(s). If nodes have +public IP addresses, be aware that non-NLB traffic can also reach all instances +in those modified security groups. + +{{< /note >}} + +### Type ExternalName {#externalname} + +Services of type ExternalName map a service to a DNS name, not to a typical selector such as +`my-service` or `cassandra`. You specify these services with the `spec.externalName` parameter. + +This Service definition, for example, maps +the `my-service` Service in the `prod` namespace to `my.database.example.com`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: prod +spec: + type: ExternalName + externalName: my.database.example.com +``` +{{< note >}} +ExternalName accepts an IPv4 address string, but as a DNS names comprised of digits, not as an IP address. ExternalNames that resemble IPv4 addresses are not resolved by CoreDNS or ingress-nginx because ExternalName +is intended to specify a canonical DNS name. To hardcode an IP address, consider using +[headless services](#headless-services). +{{< /note >}} + +When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS service +will return a `CNAME` record with the value `my.database.example.com`. Accessing +`my-service` works in the same way as other Services but with the crucial +difference that redirection happens at the DNS level rather than via proxying or +forwarding. Should you later decide to move your database into your cluster, you +can start its pods, add appropriate selectors or endpoints, and change the +Service's `type`. + + +{{< note >}} +This section is indebted to the [Kubernetes Tips - Part +1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/). +{{< /note >}} + +### External IPs + +If there are external IPs that route to one or more cluster nodes, Kubernetes services can be exposed on those +`externalIPs`. Traffic that ingresses into the cluster with the external IP (as destination IP), on the service port, +will be routed to one of the service endpoints. `externalIPs` are not managed by Kubernetes and are the responsibility +of the cluster administrator. + +In the Service spec, `externalIPs` can be specified along with any of the `ServiceTypes`. +In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`" (`externalIP:port`) + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9376 + externalIPs: + - 80.11.12.10 +``` + +## Shortcomings + +Using the userspace proxy for VIPs will work at small to medium scale, but will +not scale to very large clusters with thousands of Services. The [original +design proposal for portals](http://issue.k8s.io/1107) has more details on +this. + +Using the userspace proxy obscures the source IP address of a packet accessing +a Service. +This makes some kinds of network filtering (firewalling) impossible. The iptables +proxy mode does not +obscure in-cluster source IPs, but it does still impact clients coming through +a load balancer or node-port. + +The `Type` field is designed as nested functionality - each level adds to the +previous. This is not strictly required on all cloud providers (e.g. Google Compute Engine does +not need to allocate a `NodePort` to make `LoadBalancer` work, but AWS does) +but the current API requires it. + +## Virtual IP implementation {#the-gory-details-of-virtual-ips} + +The previous information should be sufficient for many people who just want to +use Services. However, there is a lot going on behind the scenes that may be +worth understanding. + +### Avoiding collisions + +One of the primary philosophies of Kubernetes is that you should not be +exposed to situations that could cause your actions to fail through no fault +of your own. For the design of the Service resource, this means not making +you choose your own port number for a if that choice might collide with +someone else's choice. That is an isolation failure. + +In order to allow you to choose a port number for your Services, we must +ensure that no two Services can collide. Kubernetes does that by allocating each +Service its own IP address. + +To ensure each service receives a unique IP, an internal allocator atomically +updates a global allocation map in {{< glossary_tooltip term_id="etcd" >}} +prior to creating each Service. The map object must exist in the registry for +Services to get IP address assignments, otherwise creations will +fail with a message indicating an IP address could not be allocated. + +In the control plane, a background controller is responsible for creating that +map (needed to support migrating from older versions of Kubernetes that used +in-memory locking). Kubernetes also uses controllers to checking for invalid +assignments (eg due to administrator intervention) and for cleaning up allocated +IP addresses that are no longer used by any Services. + +### Service IP addresses {#ips-and-vips} + +Unlike Pod IP addresses, which actually route to a fixed destination, +Service IPs are not actually answered by a single host. Instead, kube-proxy +uses iptables (packet processing logic in Linux) to define _virtual_ IP addresses +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for Services are actually populated in +terms of the Service's virtual IP address (and port). + +kube-proxy supports three proxy modes—userspace, iptables and IPVS—which +each operate slightly differently. + +#### Userspace + +As an example, consider the image processing application described above. +When the backend Service is created, the Kubernetes master assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it opens a new random port, establishes an +iptables redirect from the virtual IP address to this new port, and starts accepting +connections on it. + +When a client connects to the Service's virtual IP address, the iptables +rule kicks in, and redirects the packets to the proxy's own port. +The “Service proxy” chooses a backend, and starts proxying traffic from the client to the backend. + +This means that Service owners can choose any port they want without risk of +collision. Clients can simply connect to an IP and port, without being aware +of which Pods they are actually accessing. + +#### iptables + +Again, consider the image processing application described above. +When the backend Service is created, the Kubernetes control plane assigns a virtual +IP address, for example 10.0.0.1. Assuming the Service port is 1234, the +Service is observed by all of the kube-proxy instances in the cluster. +When a proxy sees a new Service, it installs a series of iptables rules which +redirect from the virtual IP address to per-Service rules. The per-Service +rules link to per-Endpoint rules which redirect traffic (using destination NAT) +to the backends. + +When a client connects to the Service's virtual IP address the iptables rule kicks in. +A backend is chosen (either based on session affinity or randomly) and packets are +redirected to the backend. Unlike the userspace proxy, packets are never +copied to userspace, the kube-proxy does not have to be running for the virtual +IP address to work, and Nodes see traffic arriving from the unaltered client IP +address. + +This same basic flow executes when traffic comes in through a node-port or +through a load-balancer, though in those cases the client IP does get altered. + +#### IPVS + +iptables operations slow down dramatically in large scale cluster e.g 10,000 Services. +IPVS is designed for load balancing and based on in-kernel hash tables. So you can achieve performance consistency in large number of services from IPVS-based kube-proxy. Meanwhile, IPVS-based kube-proxy has more sophisticated load balancing algorithms (least conns, locality, weighted, persistence). + +## API Object + +Service is a top-level resource in the Kubernetes REST API. You can find more details +about the API object at: [Service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core). + +## Supported protocols {#protocol-support} + +### TCP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use TCP for any kind of service, and it's the default network protocol. + +### UDP + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + +You can use UDP for most services. For type=LoadBalancer services, UDP support +depends on the cloud provider offering this facility. + +### HTTP + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it, you can use a Service in LoadBalancer mode +to set up external HTTP / HTTPS reverse proxying, forwarded to the Endpoints +of the Service. + +{{< note >}} +You can also use {{< glossary_tooltip term_id="ingress" >}} in place of Service +to expose HTTP / HTTPS services. +{{< /note >}} + +### PROXY protocol + +{{< feature-state for_k8s_version="v1.1" state="stable" >}} + +If your cloud provider supports it (eg, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +you can use a Service in LoadBalancer mode to configure a load balancer outside +of Kubernetes itself, that will forward connections prefixed with +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). + +The load balancer will send an initial series of octets describing the +incoming connection, similar to this example + +``` +PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n +``` +followed by the data from the client. + +### SCTP + +{{< feature-state for_k8s_version="v1.12" state="alpha" >}} + +Kubernetes supports SCTP as a `protocol` value in Service, Endpoint, NetworkPolicy and Pod definitions as an alpha feature. To enable this feature, the cluster administrator needs to enable the `SCTPSupport` feature gate on the apiserver, for example, `--feature-gates=SCTPSupport=true,…`. + +When the feature gate is enabled, you can set the `protocol` field of a Service, Endpoint, NetworkPolicy or Pod to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections. + +#### Warnings {#caveat-sctp-overview} + +##### Support for multihomed SCTP associations {#caveat-sctp-multihomed} + +{{< warning >}} +The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a Pod. + +NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules. +{{< /warning >}} + +##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type} + +{{< warning >}} +You can only create a Service with `type` LoadBalancer plus `protocol` SCTP if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise, the Service creation request is rejected. The current set of cloud load balancer providers (Azure, AWS, CloudStack, GCE, OpenStack) all lack support for SCTP. +{{< /warning >}} + +##### Windows {#caveat-sctp-windows-os} + +{{< warning >}} +SCTP is not supported on Windows based nodes. +{{< /warning >}} + +##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace} + +{{< warning >}} +The kube-proxy does not support the management of SCTP associations when it is in userspace mode. +{{< /warning >}} + +## Future work + +In the future, the proxy policy for Services can become more nuanced than +simple round-robin balancing, for example master-elected or sharded. We also +envision that some Services will have "real" load balancers, in which case the +virtual IP address will simply transport the packets there. + +The Kubernetes project intends to improve support for L7 (HTTP) Services. + +The Kubernetes project intends to have more flexible ingress modes for Services +which encompass the current ClusterIP, NodePort, and LoadBalancer modes and more. + + +{{% /capture %}} + +{{% capture whatsnext %}} + +* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) +* Read about [Ingress](/docs/concepts/services-networking/ingress/) + +{{% /capture %}} diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index 17215bcfa14c8..dded59d9dd3e9 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -226,15 +226,9 @@ CSI volume expansion requires enabling `ExpandCSIVolumes` feature gate and also You can only resize volumes containing a file system if the file system is XFS, Ext3, or Ext4. -When a volume contains a file system, the file system is only resized when a new Pod is started using -the `PersistentVolumeClaim` in ReadWrite mode. Therefore, if a pod or deployment is using a volume and -you want to expand it, you need to delete or recreate the pod after the volume has been expanded by the cloud provider in the controller-manager. You can check the status of resize operation by running the `kubectl describe pvc` command: - -``` -kubectl describe pvc -``` - -If the `PersistentVolumeClaim` has the status `FileSystemResizePending`, it is safe to recreate the pod using the PersistentVolumeClaim. +When a volume contains a file system, the file system is only resized when a new Pod is using +the `PersistentVolumeClaim` in ReadWrite mode. File system expansion is either done when Pod is starting up +or is done when Pod is running and underlying file system supports online expansion. FlexVolumes allow resize if the driver is set with the `RequiresFSResize` capability to true. The FlexVolume can be resized on pod restart. @@ -243,14 +237,15 @@ The FlexVolume can be resized on pod restart. #### Resizing an in-use PersistentVolumeClaim -Expanding in-use PVCs is an alpha feature. To use it, enable the `ExpandInUsePersistentVolumes` feature gate. +Expanding in-use PVCs is a beta feature and is enabled by default via `ExpandInUsePersistentVolumes` feature gate. In this case, you don't need to delete and recreate a Pod or deployment that is using an existing PVC. Any in-use PVC automatically becomes available to its Pod as soon as its file system has been expanded. This feature has no effect on PVCs that are not in use by a Pod or deployment. You must create a Pod which uses the PVC before the expansion can complete. -Expanding in-use PVCs for FlexVolumes is added in release 1.13. To enable this feature use `ExpandInUsePersistentVolumes` and `ExpandPersistentVolumes` feature gates. The `ExpandPersistentVolumes` feature gate is already enabled by default. If the `ExpandInUsePersistentVolumes` is set, FlexVolume can be resized online without pod restart. - + +Similar to other volume types - FlexVolume volumes can also be expanded when in-use by a pod. + {{< note >}} FlexVolume resize is possible only when the underlying driver supports resize. {{< /note >}} @@ -682,6 +677,33 @@ spec: storage: 10Gi ``` +## Volume Cloning + +{{< feature-state for_k8s_version="v1.15" state="alpha" >}} + +Volume clone feature was added to support CSI Volume Plugins only. For details, see [volume cloning](/docs/concepts/storage/volume-pvc-datasource/). + +To enable support for cloning a volume from a pvc data source, enable the +`VolumePVCDataSource` feature gate on the apiserver and controller-manager. + +### Create Persistent Volume Claim from an existing pvc +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cloned-pvc +spec: + storageClassName: my-csi-plugin + dataSource: + name: existing-src-pvc-name + kind: PersistentVolumeClaim + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +``` + ## Writing Portable Configuration If you're writing configuration templates or examples that run on a wide range of clusters diff --git a/content/en/docs/concepts/storage/volume-pvc-datasource.md b/content/en/docs/concepts/storage/volume-pvc-datasource.md new file mode 100644 index 0000000000000..7819b83a2bd7e --- /dev/null +++ b/content/en/docs/concepts/storage/volume-pvc-datasource.md @@ -0,0 +1,68 @@ +--- +reviewers: +- jsafrane +- saad-ali +- thockin +- msau42 +title: CSI Volume Cloning +content_template: templates/concept +weight: 30 +--- + +{{% capture overview %}} + +{{< feature-state for_k8s_version="v1.15" state="alpha" >}} +This document describes the concept of cloning existing CSI Volumes in Kubernetes. Familiarity with [Volumes](/docs/concepts/storage/volumes) is suggested. + +This feature requires VolumePVCDataSource feature gate to be enabled: + +``` +--feature-gates=VolumePVCDataSource=true +``` + + +{{% /capture %}} + + +{{% capture body %}} + +## Introduction + +The {{< glossary_tooltip text="CSI" term_id="csi" >}} Volume Cloning feature adds support for specifying existing {{< glossary_tooltip text="PVC" term_id="persistent-volume-claim" >}}s in the `dataSource` field to indicate a user would like to clone a {{< glossary_tooltip term_id="volume" >}}. + +A Clone is defined as a duplicate of an existing Kubernetes Volume that can be consumed as any standard Volume would be. The only difference is that upon provisioning, rather than creating a "new" empty Volume, the back end device creates an exact duplicate of the specified Volume. + +The implementation of cloning, from the perspective of the Kubernetes API simply adds the ability to specify an existing unbound PVC as a dataSource during new pvc creation. + +Users need to be aware of the following when using this feature: + +* Cloning support (`VolumePVCDataSource`) is only available for CSI drivers. +* Cloning support is only available for dynamic provisioners. +* CSI drivers may or may not have implemented the volume cloning functionality. +* You can only clone a PVC when it exists in the same namespace as the destination PVC (source and destination must be in the same namespace). + +## Provisioning + +Clones are provisioned just like any other PVC with the exception of adding a dataSource that references an existing PVC in the same namespace. + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: clone-of-pvc-1 + namespace: myns +spec: + capacity: + storage: 10Gi + dataSource: + kind: PersistentVolumeClaim + name: pvc-1 +``` + +The result is a new PVC with the name `clone-of-pvc-1` that has the exact same content as the specified source `pvc-1`. + +## Usage + +Upon availability of the new PVC, the cloned PVC is consumed the same as other PVC. It's also expected at this point that the newly created PVC is an independent object. It can be consumed, cloned, snapshotted, or deleted independently and without consideration for it's original dataSource PVC. This also implies that the source is not linked in any way to the newly created clone, it may also be modified or deleted without affecting the newly created clone. + +{{% /capture %}} diff --git a/content/en/docs/concepts/storage/volume-snapshots.md b/content/en/docs/concepts/storage/volume-snapshots.md index 6b57db96aabdb..cf105c16d815f 100644 --- a/content/en/docs/concepts/storage/volume-snapshots.md +++ b/content/en/docs/concepts/storage/volume-snapshots.md @@ -64,6 +64,12 @@ A user creates, or has already created in the case of dynamic provisioning, a `V VolumeSnapshots will remain unbound indefinitely if a matching VolumeSnapshotContent does not exist. VolumeSnapshots will be bound as matching VolumeSnapshotContents become available. +### Persistent Volume Claim in Use Protection + +The purpose of the Persistent Volume Claim Object in Use Protection feature is to ensure that in-use PVC API objects are not removed from the system (as this may result in data loss). + +If a PVC is in active use by a snapshot as a source to create the snapshot, the PVC is in-use. If a user deletes a PVC API object in active use as a snapshot source, the PVC object is not removed immediately. Instead, removal of the PVC object is postponed until the PVC is no longer actively used by any snapshots. A PVC is no longer used as a snapshot source when `ReadyToUse` of the snapshot `Status` becomes `true`. + ### Delete Deletion removes both the `VolumeSnapshotContent` object from the Kubernetes API, as well as the associated storage asset in the external infrastructure. diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index 6949a89f1406a..851f48515bf8b 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -166,6 +166,17 @@ A `azureDisk` is used to mount a Microsoft Azure [Data Disk](https://azure.micro More details can be found [here](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/azure_disk/README.md). +#### CSI Migration + +{{< feature-state for_k8s_version="v1.15" state="alpha" >}} + +The CSI Migration feature for azureDisk, when enabled, shims all plugin operations +from the existing in-tree plugin to the `disk.csi.azure.com` Container +Storage Interface (CSI) Driver. In order to use this feature, the [Azure Disk CSI +Driver](https://github.com/kubernetes-sigs/azuredisk-csi-driver) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationAzureDisk` +Alpha features must be enabled. + ### azureFile {#azurefile} A `azureFile` is used to mount a Microsoft Azure File Volume (SMB 2.1 and 3.0) @@ -173,6 +184,17 @@ into a Pod. More details can be found [here](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/azure_file/README.md). +#### CSI Migration + +{{< feature-state for_k8s_version="v1.15" state="alpha" >}} + +The CSI Migration feature for azureFile, when enabled, shims all plugin operations +from the existing in-tree plugin to the `file.csi.azure.com` Container +Storage Interface (CSI) Driver. In order to use this feature, the [Azure File CSI +Driver](https://github.com/kubernetes-sigs/azurefile-csi-driver) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationAzureFile` +Alpha features must be enabled. + ### cephfs {#cephfs} A `cephfs` volume allows an existing CephFS volume to be @@ -1132,7 +1154,7 @@ spec: ### Using subPath with expanded environment variables -{{< feature-state for_k8s_version="v1.14" state="alpha" >}} +{{< feature-state for_k8s_version="v1.15" state="beta" >}} Use the `subPathExpr` field to construct `subPath` directory names from Downward API environment variables. @@ -1307,7 +1329,7 @@ configuration changes to existing Storage Classes, PVs or PVCs (referring to in-tree plugins) when transitioning to a CSI driver that supersedes an in-tree plugin. In the alpha state, the operations and features that are supported include -provisioning/delete, attach/detach and mount/unmount of volumes with `volumeMode` set to `filesystem` +provisioning/delete, attach/detach, mount/unmount and resizing of volumes. In-tree plugins that support CSI Migration and have a corresponding CSI driver implemented are listed in the "Types of Volumes" section above. diff --git a/content/en/docs/concepts/storage/volumes.md.orig b/content/en/docs/concepts/storage/volumes.md.orig new file mode 100644 index 0000000000000..851f48515bf8b --- /dev/null +++ b/content/en/docs/concepts/storage/volumes.md.orig @@ -0,0 +1,1413 @@ +--- +reviewers: +- jsafrane +- saad-ali +- thockin +- msau42 +title: Volumes +content_template: templates/concept +weight: 10 +--- + +{{% capture overview %}} + +On-disk files in a Container are ephemeral, which presents some problems for +non-trivial applications when running in Containers. First, when a Container +crashes, kubelet will restart it, but the files will be lost - the +Container starts with a clean state. Second, when running Containers together +in a `Pod` it is often necessary to share files between those Containers. The +Kubernetes `Volume` abstraction solves both of these problems. + +Familiarity with [Pods](/docs/user-guide/pods) is suggested. + +{{% /capture %}} + + +{{% capture body %}} + +## Background + +Docker also has a concept of +[volumes](https://docs.docker.com/engine/admin/volumes/), though it is +somewhat looser and less managed. In Docker, a volume is simply a directory on +disk or in another Container. Lifetimes are not managed and until very +recently there were only local-disk-backed volumes. Docker now provides volume +drivers, but the functionality is very limited for now (e.g. as of Docker 1.7 +only one volume driver is allowed per Container and there is no way to pass +parameters to volumes). + +A Kubernetes volume, on the other hand, has an explicit lifetime - the same as +the Pod that encloses it. Consequently, a volume outlives any Containers that run +within the Pod, and data is preserved across Container restarts. Of course, when a +Pod ceases to exist, the volume will cease to exist, too. Perhaps more +importantly than this, Kubernetes supports many types of volumes, and a Pod can +use any number of them simultaneously. + +At its core, a volume is just a directory, possibly with some data in it, which +is accessible to the Containers in a Pod. How that directory comes to be, the +medium that backs it, and the contents of it are determined by the particular +volume type used. + +To use a volume, a Pod specifies what volumes to provide for the Pod (the +`.spec.volumes` +field) and where to mount those into Containers (the +`.spec.containers.volumeMounts` +field). + +A process in a container sees a filesystem view composed from their Docker +image and volumes. The [Docker +image](https://docs.docker.com/userguide/dockerimages/) is at the root of the +filesystem hierarchy, and any volumes are mounted at the specified paths within +the image. Volumes can not mount onto other volumes or have hard links to +other volumes. Each Container in the Pod must independently specify where to +mount each volume. + +## Types of Volumes + +Kubernetes supports several types of Volumes: + + * [awsElasticBlockStore](#awselasticblockstore) + * [azureDisk](#azuredisk) + * [azureFile](#azurefile) + * [cephfs](#cephfs) + * [cinder](#cinder) + * [configMap](#configmap) + * [csi](#csi) + * [downwardAPI](#downwardapi) + * [emptyDir](#emptydir) + * [fc (fibre channel)](#fc) + * [flexVolume](#flexVolume) + * [flocker](#flocker) + * [gcePersistentDisk](#gcepersistentdisk) + * [gitRepo (deprecated)](#gitrepo) + * [glusterfs](#glusterfs) + * [hostPath](#hostpath) + * [iscsi](#iscsi) + * [local](#local) + * [nfs](#nfs) + * [persistentVolumeClaim](#persistentvolumeclaim) + * [projected](#projected) + * [portworxVolume](#portworxvolume) + * [quobyte](#quobyte) + * [rbd](#rbd) + * [scaleIO](#scaleio) + * [secret](#secret) + * [storageos](#storageos) + * [vsphereVolume](#vspherevolume) + +We welcome additional contributions. + +### awsElasticBlockStore {#awselasticblockstore} + +An `awsElasticBlockStore` volume mounts an Amazon Web Services (AWS) [EBS +Volume](http://aws.amazon.com/ebs/) into your Pod. Unlike +`emptyDir`, which is erased when a Pod is removed, the contents of an EBS +volume are preserved and the volume is merely unmounted. This means that an +EBS volume can be pre-populated with data, and that data can be "handed off" +between Pods. + +{{< caution >}} +You must create an EBS volume using `aws ec2 create-volume` or the AWS API before you can use it. +{{< /caution >}} + +There are some restrictions when using an `awsElasticBlockStore` volume: + +* the nodes on which Pods are running must be AWS EC2 instances +* those instances need to be in the same region and availability-zone as the EBS volume +* EBS only supports a single EC2 instance mounting a volume + +#### Creating an EBS volume + +Before you can use an EBS volume with a Pod, you need to create it. + +```shell +aws ec2 create-volume --availability-zone=eu-west-1a --size=10 --volume-type=gp2 +``` + +Make sure the zone matches the zone you brought up your cluster in. (And also check that the size and EBS volume +type are suitable for your use!) + +#### AWS EBS Example configuration + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-ebs +spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-ebs + name: test-volume + volumes: + - name: test-volume + # This AWS EBS volume must already exist. + awsElasticBlockStore: + volumeID: + fsType: ext4 +``` + +#### CSI Migration + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +The CSI Migration feature for awsElasticBlockStore, when enabled, shims all plugin operations +from the existing in-tree plugin to the `ebs.csi.aws.com` Container +Storage Interface (CSI) Driver. In order to use this feature, the [AWS EBS CSI +Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationAWS` +Alpha features must be enabled. + +### azureDisk {#azuredisk} + +A `azureDisk` is used to mount a Microsoft Azure [Data Disk](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-linux-about-disks-vhds/) into a Pod. + +More details can be found [here](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/azure_disk/README.md). + +#### CSI Migration + +{{< feature-state for_k8s_version="v1.15" state="alpha" >}} + +The CSI Migration feature for azureDisk, when enabled, shims all plugin operations +from the existing in-tree plugin to the `disk.csi.azure.com` Container +Storage Interface (CSI) Driver. In order to use this feature, the [Azure Disk CSI +Driver](https://github.com/kubernetes-sigs/azuredisk-csi-driver) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationAzureDisk` +Alpha features must be enabled. + +### azureFile {#azurefile} + +A `azureFile` is used to mount a Microsoft Azure File Volume (SMB 2.1 and 3.0) +into a Pod. + +More details can be found [here](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/azure_file/README.md). + +#### CSI Migration + +{{< feature-state for_k8s_version="v1.15" state="alpha" >}} + +The CSI Migration feature for azureFile, when enabled, shims all plugin operations +from the existing in-tree plugin to the `file.csi.azure.com` Container +Storage Interface (CSI) Driver. In order to use this feature, the [Azure File CSI +Driver](https://github.com/kubernetes-sigs/azurefile-csi-driver) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationAzureFile` +Alpha features must be enabled. + +### cephfs {#cephfs} + +A `cephfs` volume allows an existing CephFS volume to be +mounted into your Pod. Unlike `emptyDir`, which is erased when a Pod is +removed, the contents of a `cephfs` volume are preserved and the volume is merely +unmounted. This means that a CephFS volume can be pre-populated with data, and +that data can be "handed off" between Pods. CephFS can be mounted by multiple +writers simultaneously. + +{{< caution >}} +You must have your own Ceph server running with the share exported before you can use it. +{{< /caution >}} + +See the [CephFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/cephfs/) for more details. + +### cinder {#cinder} + +{{< note >}} +Prerequisite: Kubernetes with OpenStack Cloud Provider configured. For cloudprovider +configuration please refer [cloud provider openstack](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#openstack). +{{< /note >}} + +`cinder` is used to mount OpenStack Cinder Volume into your Pod. + +#### Cinder Volume Example configuration + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-cinder +spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-cinder-container + volumeMounts: + - mountPath: /test-cinder + name: test-volume + volumes: + - name: test-volume + # This OpenStack volume must already exist. + cinder: + volumeID: + fsType: ext4 +``` + +#### CSI Migration + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +The CSI Migration feature for Cinder, when enabled, shims all plugin operations +from the existing in-tree plugin to the `cinder.csi.openstack.org` Container +Storage Interface (CSI) Driver. In order to use this feature, the [Openstack Cinder CSI +Driver](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-cinder-csi-plugin.md) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationOpenStack` +Alpha features must be enabled. + +### configMap {#configmap} + +The [`configMap`](/docs/tasks/configure-pod-container/configure-pod-configmap/) resource +provides a way to inject configuration data into Pods. +The data stored in a `ConfigMap` object can be referenced in a volume of type +`configMap` and then consumed by containerized applications running in a Pod. + +When referencing a `configMap` object, you can simply provide its name in the +volume to reference it. You can also customize the path to use for a specific +entry in the ConfigMap. +For example, to mount the `log-config` ConfigMap onto a Pod called `configmap-pod`, +you might use the YAML below: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: configmap-pod +spec: + containers: + - name: test + image: busybox + volumeMounts: + - name: config-vol + mountPath: /etc/config + volumes: + - name: config-vol + configMap: + name: log-config + items: + - key: log_level + path: log_level +``` + +The `log-config` ConfigMap is mounted as a volume, and all contents stored in +its `log_level` entry are mounted into the Pod at path "`/etc/config/log_level`". +Note that this path is derived from the volume's `mountPath` and the `path` +keyed with `log_level`. + +{{< caution >}} +You must create a [ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/) before you can use it. +{{< /caution >}} + +{{< note >}} +A Container using a ConfigMap as a [subPath](#using-subpath) volume mount will not +receive ConfigMap updates. +{{< /note >}} + +### downwardAPI {#downwardapi} + +A `downwardAPI` volume is used to make downward API data available to applications. +It mounts a directory and writes the requested data in plain text files. + +{{< note >}} +A Container using Downward API as a [subPath](#using-subpath) volume mount will not +receive Downward API updates. +{{< /note >}} + +See the [`downwardAPI` volume example](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) for more details. + +### emptyDir {#emptydir} + +An `emptyDir` volume is first created when a Pod is assigned to a Node, and +exists as long as that Pod is running on that node. As the name says, it is +initially empty. Containers in the Pod can all read and write the same +files in the `emptyDir` volume, though that volume can be mounted at the same +or different paths in each Container. When a Pod is removed from a node for +any reason, the data in the `emptyDir` is deleted forever. + +{{< note >}} +A Container crashing does *NOT* remove a Pod from a node, so the data in an `emptyDir` volume is safe across Container crashes. +{{< /note >}} + +Some uses for an `emptyDir` are: + +* scratch space, such as for a disk-based merge sort +* checkpointing a long computation for recovery from crashes +* holding files that a content-manager Container fetches while a webserver + Container serves the data + +By default, `emptyDir` volumes are stored on whatever medium is backing the +node - that might be disk or SSD or network storage, depending on your +environment. However, you can set the `emptyDir.medium` field to `"Memory"` +to tell Kubernetes to mount a tmpfs (RAM-backed filesystem) for you instead. +While tmpfs is very fast, be aware that unlike disks, tmpfs is cleared on +node reboot and any files you write will count against your Container's +memory limit. + +#### Example Pod + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-pd +spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /cache + name: cache-volume + volumes: + - name: cache-volume + emptyDir: {} +``` + +### fc (fibre channel) {#fc} + +An `fc` volume allows an existing fibre channel volume to be mounted in a Pod. +You can specify single or multiple target World Wide Names using the parameter +`targetWWNs` in your volume configuration. If multiple WWNs are specified, +targetWWNs expect that those WWNs are from multi-path connections. + +{{< caution >}} +You must configure FC SAN Zoning to allocate and mask those LUNs (volumes) to the target WWNs beforehand so that Kubernetes hosts can access them. +{{< /caution >}} + +See the [FC example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/fibre_channel) for more details. + +### flocker {#flocker} + +[Flocker](https://github.com/ClusterHQ/flocker) is an open-source clustered Container data volume manager. It provides management +and orchestration of data volumes backed by a variety of storage backends. + +A `flocker` volume allows a Flocker dataset to be mounted into a Pod. If the +dataset does not already exist in Flocker, it needs to be first created with the Flocker +CLI or by using the Flocker API. If the dataset already exists it will be +reattached by Flocker to the node that the Pod is scheduled. This means data +can be "handed off" between Pods as required. + +{{< caution >}} +You must have your own Flocker installation running before you can use it. +{{< /caution >}} + +See the [Flocker example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/flocker) for more details. + +### gcePersistentDisk {#gcepersistentdisk} + +A `gcePersistentDisk` volume mounts a Google Compute Engine (GCE) [Persistent +Disk](http://cloud.google.com/compute/docs/disks) into your Pod. Unlike +`emptyDir`, which is erased when a Pod is removed, the contents of a PD are +preserved and the volume is merely unmounted. This means that a PD can be +pre-populated with data, and that data can be "handed off" between Pods. + +{{< caution >}} +You must create a PD using `gcloud` or the GCE API or UI before you can use it. +{{< /caution >}} + +There are some restrictions when using a `gcePersistentDisk`: + +* the nodes on which Pods are running must be GCE VMs +* those VMs need to be in the same GCE project and zone as the PD + +A feature of PD is that they can be mounted as read-only by multiple consumers +simultaneously. This means that you can pre-populate a PD with your dataset +and then serve it in parallel from as many Pods as you need. Unfortunately, +PDs can only be mounted by a single consumer in read-write mode - no +simultaneous writers allowed. + +Using a PD on a Pod controlled by a ReplicationController will fail unless +the PD is read-only or the replica count is 0 or 1. + +#### Creating a PD + +Before you can use a GCE PD with a Pod, you need to create it. + +```shell +gcloud compute disks create --size=500GB --zone=us-central1-a my-data-disk +``` + +#### Example Pod + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-pd +spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + # This GCE PD must already exist. + gcePersistentDisk: + pdName: my-data-disk + fsType: ext4 +``` + +#### Regional Persistent Disks +{{< feature-state for_k8s_version="v1.10" state="beta" >}} + +The [Regional Persistent Disks](https://cloud.google.com/compute/docs/disks/#repds) feature allows the creation of Persistent Disks that are available in two zones within the same region. In order to use this feature, the volume must be provisioned as a PersistentVolume; referencing the volume directly from a pod is not supported. + +#### Manually provisioning a Regional PD PersistentVolume +Dynamic provisioning is possible using a [StorageClass for GCE PD](/docs/concepts/storage/storage-classes/#gce). +Before creating a PersistentVolume, you must create the PD: +```shell +gcloud beta compute disks create --size=500GB my-data-disk + --region us-central1 + --replica-zones us-central1-a,us-central1-b +``` +Example PersistentVolume spec: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: test-volume + labels: + failure-domain.beta.kubernetes.io/zone: us-central1-a__us-central1-b +spec: + capacity: + storage: 400Gi + accessModes: + - ReadWriteOnce + gcePersistentDisk: + pdName: my-data-disk + fsType: ext4 +``` + +#### CSI Migration + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +The CSI Migration feature for GCE PD, when enabled, shims all plugin operations +from the existing in-tree plugin to the `pd.csi.storage.gke.io` Container +Storage Interface (CSI) Driver. In order to use this feature, the [GCE PD CSI +Driver](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationGCE` +Alpha features must be enabled. + +### gitRepo (deprecated) {#gitrepo} + +{{< warning >}} +The gitRepo volume type is deprecated. To provision a container with a git repo, mount an [EmptyDir](#emptydir) into an InitContainer that clones the repo using git, then mount the [EmptyDir](#emptydir) into the Pod's container. +{{< /warning >}} + +A `gitRepo` volume is an example of what can be done as a volume plugin. It +mounts an empty directory and clones a git repository into it for your Pod to +use. In the future, such volumes may be moved to an even more decoupled model, +rather than extending the Kubernetes API for every such use case. + +Here is an example of gitRepo volume: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: server +spec: + containers: + - image: nginx + name: nginx + volumeMounts: + - mountPath: /mypath + name: git-volume + volumes: + - name: git-volume + gitRepo: + repository: "git@somewhere:me/my-git-repository.git" + revision: "22f1d8406d464b0c0874075539c1f2e96c253775" +``` + +### glusterfs {#glusterfs} + +A `glusterfs` volume allows a [Glusterfs](http://www.gluster.org) (an open +source networked filesystem) volume to be mounted into your Pod. Unlike +`emptyDir`, which is erased when a Pod is removed, the contents of a +`glusterfs` volume are preserved and the volume is merely unmounted. This +means that a glusterfs volume can be pre-populated with data, and that data can +be "handed off" between Pods. GlusterFS can be mounted by multiple writers +simultaneously. + +{{< caution >}} +You must have your own GlusterFS installation running before you can use it. +{{< /caution >}} + +See the [GlusterFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/glusterfs) for more details. + +### hostPath {#hostpath} + +A `hostPath` volume mounts a file or directory from the host node's filesystem +into your Pod. This is not something that most Pods will need, but it offers a +powerful escape hatch for some applications. + +For example, some uses for a `hostPath` are: + +* running a Container that needs access to Docker internals; use a `hostPath` + of `/var/lib/docker` +* running cAdvisor in a Container; use a `hostPath` of `/sys` +* allowing a Pod to specify whether a given `hostPath` should exist prior to the + Pod running, whether it should be created, and what it should exist as + +In addition to the required `path` property, user can optionally specify a `type` for a `hostPath` volume. + +The supported values for field `type` are: + + +| Value | Behavior | +|:------|:---------| +| | Empty string (default) is for backward compatibility, which means that no checks will be performed before mounting the hostPath volume. | +| `DirectoryOrCreate` | If nothing exists at the given path, an empty directory will be created there as needed with permission set to 0755, having the same group and ownership with Kubelet. | +| `Directory` | A directory must exist at the given path | +| `FileOrCreate` | If nothing exists at the given path, an empty file will be created there as needed with permission set to 0644, having the same group and ownership with Kubelet. | +| `File` | A file must exist at the given path | +| `Socket` | A UNIX socket must exist at the given path | +| `CharDevice` | A character device must exist at the given path | +| `BlockDevice` | A block device must exist at the given path | + +Watch out when using this type of volume, because: + +* Pods with identical configuration (such as created from a podTemplate) may + behave differently on different nodes due to different files on the nodes +* when Kubernetes adds resource-aware scheduling, as is planned, it will not be + able to account for resources used by a `hostPath` +* the files or directories created on the underlying hosts are only writable by root. You + either need to run your process as root in a + [privileged Container](/docs/user-guide/security-context) or modify the file + permissions on the host to be able to write to a `hostPath` volume + +#### Example Pod + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-pd +spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + # directory location on host + path: /data + # this field is optional + type: Directory +``` + +### iscsi {#iscsi} + +An `iscsi` volume allows an existing iSCSI (SCSI over IP) volume to be mounted +into your Pod. Unlike `emptyDir`, which is erased when a Pod is removed, the +contents of an `iscsi` volume are preserved and the volume is merely +unmounted. This means that an iscsi volume can be pre-populated with data, and +that data can be "handed off" between Pods. + +{{< caution >}} +You must have your own iSCSI server running with the volume created before you can use it. +{{< /caution >}} + +A feature of iSCSI is that it can be mounted as read-only by multiple consumers +simultaneously. This means that you can pre-populate a volume with your dataset +and then serve it in parallel from as many Pods as you need. Unfortunately, +iSCSI volumes can only be mounted by a single consumer in read-write mode - no +simultaneous writers allowed. + +See the [iSCSI example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/iscsi) for more details. + +### local {#local} + +{{< feature-state for_k8s_version="v1.14" state="stable" >}} + +A `local` volume represents a mounted local storage device such as a disk, +partition or directory. + +Local volumes can only be used as a statically created PersistentVolume. Dynamic +provisioning is not supported yet. + +Compared to `hostPath` volumes, local volumes can be used in a durable and +portable manner without manually scheduling Pods to nodes, as the system is aware +of the volume's node constraints by looking at the node affinity on the PersistentVolume. + +However, local volumes are still subject to the availability of the underlying +node and are not suitable for all applications. If a node becomes unhealthy, +then the local volume will also become inaccessible, and a Pod using it will not +be able to run. Applications using local volumes must be able to tolerate this +reduced availability, as well as potential data loss, depending on the +durability characteristics of the underlying disk. + +The following is an example of PersistentVolume spec using a `local` volume and +`nodeAffinity`: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: example-pv +spec: + capacity: + storage: 100Gi + # volumeMode field requires BlockVolume Alpha feature gate to be enabled. + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Delete + storageClassName: local-storage + local: + path: /mnt/disks/ssd1 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - example-node +``` + +PersistentVolume `nodeAffinity` is required when using local volumes. It enables +the Kubernetes scheduler to correctly schedule Pods using local volumes to the +correct node. + +PersistentVolume `volumeMode` can now be set to "Block" (instead of the default +value "Filesystem") to expose the local volume as a raw block device. The +`volumeMode` field requires `BlockVolume` Alpha feature gate to be enabled. + +When using local volumes, it is recommended to create a StorageClass with +`volumeBindingMode` set to `WaitForFirstConsumer`. See the +[example](/docs/concepts/storage/storage-classes/#local). Delaying volume binding ensures +that the PersistentVolumeClaim binding decision will also be evaluated with any +other node constraints the Pod may have, such as node resource requirements, node +selectors, Pod affinity, and Pod anti-affinity. + +An external static provisioner can be run separately for improved management of +the local volume lifecycle. Note that this provisioner does not support dynamic +provisioning yet. For an example on how to run an external local provisioner, +see the [local volume provisioner user +guide](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner). + +{{< note >}} +The local PersistentVolume requires manual cleanup and deletion by the +user if the external static provisioner is not used to manage the volume +lifecycle. +{{< /note >}} + +### nfs {#nfs} + +An `nfs` volume allows an existing NFS (Network File System) share to be +mounted into your Pod. Unlike `emptyDir`, which is erased when a Pod is +removed, the contents of an `nfs` volume are preserved and the volume is merely +unmounted. This means that an NFS volume can be pre-populated with data, and +that data can be "handed off" between Pods. NFS can be mounted by multiple +writers simultaneously. + +{{< caution >}} +You must have your own NFS server running with the share exported before you can use it. +{{< /caution >}} + +See the [NFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/nfs) for more details. + +### persistentVolumeClaim {#persistentvolumeclaim} + +A `persistentVolumeClaim` volume is used to mount a +[PersistentVolume](/docs/concepts/storage/persistent-volumes/) into a Pod. PersistentVolumes are a +way for users to "claim" durable storage (such as a GCE PersistentDisk or an +iSCSI volume) without knowing the details of the particular cloud environment. + +See the [PersistentVolumes example](/docs/concepts/storage/persistent-volumes/) for more +details. + +### projected {#projected} + +A `projected` volume maps several existing volume sources into the same directory. + +Currently, the following types of volume sources can be projected: + +- [`secret`](#secret) +- [`downwardAPI`](#downwardapi) +- [`configMap`](#configmap) +- `serviceAccountToken` + +All sources are required to be in the same namespace as the Pod. For more details, +see the [all-in-one volume design document](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/node/all-in-one-volume.md). + +The projection of service account tokens is a feature introduced in Kubernetes +1.11 and promoted to Beta in 1.12. +To enable this feature on 1.11, you need to explicitly set the `TokenRequestProjection` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to +True. + +#### Example Pod with a secret, a downward API, and a configmap. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: volume-test +spec: + containers: + - name: container-test + image: busybox + volumeMounts: + - name: all-in-one + mountPath: "/projected-volume" + readOnly: true + volumes: + - name: all-in-one + projected: + sources: + - secret: + name: mysecret + items: + - key: username + path: my-group/my-username + - downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + - path: "cpu_limit" + resourceFieldRef: + containerName: container-test + resource: limits.cpu + - configMap: + name: myconfigmap + items: + - key: config + path: my-group/my-config +``` + +#### Example Pod with multiple secrets with a non-default permission mode set. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: volume-test +spec: + containers: + - name: container-test + image: busybox + volumeMounts: + - name: all-in-one + mountPath: "/projected-volume" + readOnly: true + volumes: + - name: all-in-one + projected: + sources: + - secret: + name: mysecret + items: + - key: username + path: my-group/my-username + - secret: + name: mysecret2 + items: + - key: password + path: my-group/my-password + mode: 511 +``` + +Each projected volume source is listed in the spec under `sources`. The +parameters are nearly the same with two exceptions: + +* For secrets, the `secretName` field has been changed to `name` to be consistent + with ConfigMap naming. +* The `defaultMode` can only be specified at the projected level and not for each + volume source. However, as illustrated above, you can explicitly set the `mode` + for each individual projection. + +When the `TokenRequestProjection` feature is enabled, you can inject the token +for the current [service account](/docs/reference/access-authn-authz/authentication/#service-account-tokens) +into a Pod at a specified path. Below is an example: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: sa-token-test +spec: + containers: + - name: container-test + image: busybox + volumeMounts: + - name: token-vol + mountPath: "/service-account" + readOnly: true + volumes: + - name: token-vol + projected: + sources: + - serviceAccountToken: + audience: api + expirationSeconds: 3600 + path: token +``` + +The example Pod has a projected volume containing the injected service account +token. This token can be used by Pod containers to access the Kubernetes API +server, for example. The `audience` field contains the intended audience of the +token. A recipient of the token must identify itself with an identifier specified +in the audience of the token, and otherwise should reject the token. This field +is optional and it defaults to the identifier of the API server. + +The `expirationSeconds` is the expected duration of validity of the service account +token. It defaults to 1 hour and must be at least 10 minutes (600 seconds). An administrator +can also limit its maximum value by specifying the `--service-account-max-token-expiration` +option for the API server. The `path` field specifies a relative path to the mount point +of the projected volume. + +{{< note >}} +A Container using a projected volume source as a [subPath](#using-subpath) volume mount will not +receive updates for those volume sources. +{{< /note >}} + +### portworxVolume {#portworxvolume} + +A `portworxVolume` is an elastic block storage layer that runs hyperconverged with +Kubernetes. [Portworx](https://portworx.com/use-case/kubernetes-storage/) fingerprints storage in a server, tiers based on capabilities, +and aggregates capacity across multiple servers. Portworx runs in-guest in virtual machines or on bare metal Linux nodes. + +A `portworxVolume` can be dynamically created through Kubernetes or it can also +be pre-provisioned and referenced inside a Kubernetes Pod. +Here is an example Pod referencing a pre-provisioned PortworxVolume: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-portworx-volume-pod +spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /mnt + name: pxvol + volumes: + - name: pxvol + # This Portworx volume must already exist. + portworxVolume: + volumeID: "pxvol" + fsType: "" +``` + +{{< caution >}} +Make sure you have an existing PortworxVolume with name `pxvol` +before using it in the Pod. +{{< /caution >}} + +More details and examples can be found [here](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/portworx/README.md). + +### quobyte {#quobyte} + +A `quobyte` volume allows an existing [Quobyte](http://www.quobyte.com) volume to +be mounted into your Pod. + +{{< caution >}} +You must have your own Quobyte setup running with the volumes +created before you can use it. +{{< /caution >}} + +Quobyte supports the {{< glossary_tooltip text="Container Storage Interface" term_id="csi" >}}. +CSI is the recommended plugin to use Quobyte volumes inside Kubernetes. Quobyte's +GitHub project has [instructions](https://github.com/quobyte/quobyte-csi#quobyte-csi) for deploying Quobyte using CSI, along with examples. + +### rbd {#rbd} + +An `rbd` volume allows a [Rados Block +Device](http://ceph.com/docs/master/rbd/rbd/) volume to be mounted into your +Pod. Unlike `emptyDir`, which is erased when a Pod is removed, the contents of +a `rbd` volume are preserved and the volume is merely unmounted. This +means that a RBD volume can be pre-populated with data, and that data can +be "handed off" between Pods. + +{{< caution >}} +You must have your own Ceph installation running before you can use RBD. +{{< /caution >}} + +A feature of RBD is that it can be mounted as read-only by multiple consumers +simultaneously. This means that you can pre-populate a volume with your dataset +and then serve it in parallel from as many Pods as you need. Unfortunately, +RBD volumes can only be mounted by a single consumer in read-write mode - no +simultaneous writers allowed. + +See the [RBD example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/rbd) for more details. + +### scaleIO {#scaleio} + +ScaleIO is a software-based storage platform that can use existing hardware to +create clusters of scalable shared block networked storage. The `scaleIO` volume +plugin allows deployed Pods to access existing ScaleIO +volumes (or it can dynamically provision new volumes for persistent volume claims, see +[ScaleIO Persistent Volumes](/docs/concepts/storage/persistent-volumes/#scaleio)). + +{{< caution >}} +You must have an existing ScaleIO cluster already setup and +running with the volumes created before you can use them. +{{< /caution >}} + +The following is an example of Pod configuration with ScaleIO: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: pod-0 +spec: + containers: + - image: k8s.gcr.io/test-webserver + name: pod-0 + volumeMounts: + - mountPath: /test-pd + name: vol-0 + volumes: + - name: vol-0 + scaleIO: + gateway: https://localhost:443/api + system: scaleio + protectionDomain: sd0 + storagePool: sp1 + volumeName: vol-0 + secretRef: + name: sio-secret + fsType: xfs +``` + +For further detail, please see the [ScaleIO examples](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/scaleio). + +### secret {#secret} + +A `secret` volume is used to pass sensitive information, such as passwords, to +Pods. You can store secrets in the Kubernetes API and mount them as files for +use by Pods without coupling to Kubernetes directly. `secret` volumes are +backed by tmpfs (a RAM-backed filesystem) so they are never written to +non-volatile storage. + +{{< caution >}} +You must create a secret in the Kubernetes API before you can use it. +{{< /caution >}} + +{{< note >}} +A Container using a Secret as a [subPath](#using-subpath) volume mount will not +receive Secret updates. +{{< /note >}} + +Secrets are described in more detail [here](/docs/user-guide/secrets). + +### storageOS {#storageos} + +A `storageos` volume allows an existing [StorageOS](https://www.storageos.com) +volume to be mounted into your Pod. + +StorageOS runs as a Container within your Kubernetes environment, making local +or attached storage accessible from any node within the Kubernetes cluster. +Data can be replicated to protect against node failure. Thin provisioning and +compression can improve utilization and reduce cost. + +At its core, StorageOS provides block storage to Containers, accessible via a file system. + +The StorageOS Container requires 64-bit Linux and has no additional dependencies. +A free developer license is available. + +{{< caution >}} +You must run the StorageOS Container on each node that wants to +access StorageOS volumes or that will contribute storage capacity to the pool. +For installation instructions, consult the +[StorageOS documentation](https://docs.storageos.com). +{{< /caution >}} + +```yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + name: redis + role: master + name: test-storageos-redis +spec: + containers: + - name: master + image: kubernetes/redis:v1 + env: + - name: MASTER + value: "true" + ports: + - containerPort: 6379 + volumeMounts: + - mountPath: /redis-master-data + name: redis-data + volumes: + - name: redis-data + storageos: + # The `redis-vol01` volume must already exist within StorageOS in the `default` namespace. + volumeName: redis-vol01 + fsType: ext4 +``` + +For more information including Dynamic Provisioning and Persistent Volume Claims, please see the +[StorageOS examples](https://github.com/kubernetes/examples/blob/master/staging/volumes/storageos). + +### vsphereVolume {#vspherevolume} + +{{< note >}} +Prerequisite: Kubernetes with vSphere Cloud Provider configured. For cloudprovider +configuration please refer [vSphere getting started guide](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/). +{{< /note >}} + +A `vsphereVolume` is used to mount a vSphere VMDK Volume into your Pod. The contents +of a volume are preserved when it is unmounted. It supports both VMFS and VSAN datastore. + +{{< caution >}} +You must create VMDK using one of the following methods before using with Pod. +{{< /caution >}} + +#### Creating a VMDK volume + +Choose one of the following methods to create a VMDK. + +{{< tabs name="tabs_volumes" >}} +{{% tab name="Create using vmkfstools" %}} +First ssh into ESX, then use the following command to create a VMDK: + +```shell +vmkfstools -c 2G /vmfs/volumes/DatastoreName/volumes/myDisk.vmdk +``` +{{% /tab %}} +{{% tab name="Create using vmware-vdiskmanager" %}} +Use the following command to create a VMDK: + +```shell +vmware-vdiskmanager -c -t 0 -s 40GB -a lsilogic myDisk.vmdk +``` +{{% /tab %}} + +{{< /tabs >}} + + +#### vSphere VMDK Example configuration + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-vmdk +spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-vmdk + name: test-volume + volumes: + - name: test-volume + # This VMDK volume must already exist. + vsphereVolume: + volumePath: "[DatastoreName] volumes/myDisk" + fsType: ext4 +``` + +More examples can be found [here](https://github.com/kubernetes/examples/tree/master/staging/volumes/vsphere). + + +## Using subPath + +Sometimes, it is useful to share one volume for multiple uses in a single Pod. The `volumeMounts.subPath` +property can be used to specify a sub-path inside the referenced volume instead of its root. + +Here is an example of a Pod with a LAMP stack (Linux Apache Mysql PHP) using a single, shared volume. +The HTML contents are mapped to its `html` folder, and the databases will be stored in its `mysql` folder: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: my-lamp-site +spec: + containers: + - name: mysql + image: mysql + env: + - name: MYSQL_ROOT_PASSWORD + value: "rootpasswd" + volumeMounts: + - mountPath: /var/lib/mysql + name: site-data + subPath: mysql + - name: php + image: php:7.0-apache + volumeMounts: + - mountPath: /var/www/html + name: site-data + subPath: html + volumes: + - name: site-data + persistentVolumeClaim: + claimName: my-lamp-site-data +``` + +### Using subPath with expanded environment variables + +{{< feature-state for_k8s_version="v1.15" state="beta" >}} + + +Use the `subPathExpr` field to construct `subPath` directory names from Downward API environment variables. +Before you use this feature, you must enable the `VolumeSubpathEnvExpansion` feature gate. +The `subPath` and `subPathExpr` properties are mutually exclusive. + +In this example, a Pod uses `subPathExpr` to create a directory `pod1` within the hostPath volume `/var/log/pods`, using the pod name from the Downward API. The host directory `/var/log/pods/pod1` is mounted at `/logs` in the container. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: pod1 +spec: + containers: + - name: container1 + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: busybox + command: [ "sh", "-c", "while [ true ]; do echo 'Hello'; sleep 10; done | tee -a /logs/hello.txt" ] + volumeMounts: + - name: workdir1 + mountPath: /logs + subPathExpr: $(POD_NAME) + restartPolicy: Never + volumes: + - name: workdir1 + hostPath: + path: /var/log/pods +``` + +## Resources + +The storage media (Disk, SSD, etc.) of an `emptyDir` volume is determined by the +medium of the filesystem holding the kubelet root dir (typically +`/var/lib/kubelet`). There is no limit on how much space an `emptyDir` or +`hostPath` volume can consume, and no isolation between Containers or between +Pods. + +In the future, we expect that `emptyDir` and `hostPath` volumes will be able to +request a certain amount of space using a [resource](/docs/user-guide/compute-resources) +specification, and to select the type of media to use, for clusters that have +several media types. + +## Out-of-Tree Volume Plugins +The Out-of-tree volume plugins include the Container Storage Interface (CSI) +and Flexvolume. They enable storage vendors to create custom storage plugins +without adding them to the Kubernetes repository. + +Before the introduction of CSI and Flexvolume, all volume plugins (like +volume types listed above) were "in-tree" meaning they were built, linked, +compiled, and shipped with the core Kubernetes binaries and extend the core +Kubernetes API. This meant that adding a new storage system to Kubernetes (a +volume plugin) required checking code into the core Kubernetes code repository. + +Both CSI and Flexvolume allow volume plugins to be developed independent of +the Kubernetes code base, and deployed (installed) on Kubernetes clusters as +extensions. + +For storage vendors looking to create an out-of-tree volume plugin, please refer +to [this FAQ](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md). + +### CSI + +[Container Storage Interface](https://github.com/container-storage-interface/spec/blob/master/spec.md) (CSI) +defines a standard interface for container orchestration systems (like +Kubernetes) to expose arbitrary storage systems to their container workloads. + +Please read the [CSI design proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md) for more information. + +CSI support was introduced as alpha in Kubernetes v1.9, moved to beta in +Kubernetes v1.10, and is GA in Kubernetes v1.13. + +{{< note >}} +Support for CSI spec versions 0.2 and 0.3 are deprecated in Kubernetes +v1.13 and will be removed in a future release. +{{< /note >}} + +{{< note >}} +CSI drivers may not be compatible across all Kubernetes releases. +Please check the specific CSI driver's documentation for supported +deployments steps for each Kubernetes release and a compatibility matrix. +{{< /note >}} + +Once a CSI compatible volume driver is deployed on a Kubernetes cluster, users +may use the `csi` volume type to attach, mount, etc. the volumes exposed by the +CSI driver. + +The `csi` volume type does not support direct reference from Pod and may only be +referenced in a Pod via a `PersistentVolumeClaim` object. + +The following fields are available to storage administrators to configure a CSI +persistent volume: + +- `driver`: A string value that specifies the name of the volume driver to use. + This value must correspond to the value returned in the `GetPluginInfoResponse` + by the CSI driver as defined in the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md#getplugininfo). + It is used by Kubernetes to identify which CSI driver to call out to, and by + CSI driver components to identify which PV objects belong to the CSI driver. +- `volumeHandle`: A string value that uniquely identifies the volume. This value + must correspond to the value returned in the `volume.id` field of the + `CreateVolumeResponse` by the CSI driver as defined in the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume). + The value is passed as `volume_id` on all calls to the CSI volume driver when + referencing the volume. +- `readOnly`: An optional boolean value indicating whether the volume is to be + "ControllerPublished" (attached) as read only. Default is false. This value is + passed to the CSI driver via the `readonly` field in the + `ControllerPublishVolumeRequest`. +- `fsType`: If the PV's `VolumeMode` is `Filesystem` then this field may be used + to specify the filesystem that should be used to mount the volume. If the + volume has not been formatted and formatting is supported, this value will be + used to format the volume. + This value is passed to the CSI driver via the `VolumeCapability` field of + `ControllerPublishVolumeRequest`, `NodeStageVolumeRequest`, and + `NodePublishVolumeRequest`. +- `volumeAttributes`: A map of string to string that specifies static properties + of a volume. This map must correspond to the map returned in the + `volume.attributes` field of the `CreateVolumeResponse` by the CSI driver as + defined in the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume). + The map is passed to the CSI driver via the `volume_attributes` field in the + `ControllerPublishVolumeRequest`, `NodeStageVolumeRequest`, and + `NodePublishVolumeRequest`. +- `controllerPublishSecretRef`: A reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + `ControllerPublishVolume` and `ControllerUnpublishVolume` calls. This field is + optional, and may be empty if no secret is required. If the secret object + contains more than one secret, all secrets are passed. +- `nodeStageSecretRef`: A reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + `NodeStageVolume` call. This field is optional, and may be empty if no secret + is required. If the secret object contains more than one secret, all secrets + are passed. +- `nodePublishSecretRef`: A reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + `NodePublishVolume` call. This field is optional, and may be empty if no + secret is required. If the secret object contains more than one secret, all + secrets are passed. + +#### CSI raw block volume support + +{{< feature-state for_k8s_version="v1.14" state="beta" >}} + +Starting with version 1.11, CSI introduced support for raw block volumes, which +relies on the raw block volume feature that was introduced in a previous version of +Kubernetes. This feature will make it possible for vendors with external CSI drivers to +implement raw block volumes support in Kubernetes workloads. + +CSI block volume support is feature-gated, but enabled by default. The two +feature gates which must be enabled for this feature are `BlockVolume` and +`CSIBlockVolume`. + +Learn how to +[setup your PV/PVC with raw block volume support](/docs/concepts/storage/persistent-volumes/#raw-block-volume-support). + +# Developer resources +For more information on how to develop a CSI driver, refer to the [kubernetes-csi +documentation](https://kubernetes-csi.github.io/docs/) + +#### Migrating to CSI drivers from in-tree plugins + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +The CSI Migration feature, when enabled, directs operations against existing in-tree +plugins to corresponding CSI plugins (which are expected to be installed and configured). +The feature implements the necessary translation logic and shims to re-route the +operations in a seamless fashion. As a result, operators do not have to make any +configuration changes to existing Storage Classes, PVs or PVCs (referring to +in-tree plugins) when transitioning to a CSI driver that supersedes an in-tree plugin. + +In the alpha state, the operations and features that are supported include +provisioning/delete, attach/detach, mount/unmount and resizing of volumes. + +In-tree plugins that support CSI Migration and have a corresponding CSI driver implemented +are listed in the "Types of Volumes" section above. + +### Flexvolume {#flexVolume} + +Flexvolume is an out-of-tree plugin interface that has existed in Kubernetes +since version 1.2 (before CSI). It uses an exec-based model to interface with +drivers. Flexvolume driver binaries must be installed in a pre-defined volume +plugin path on each node (and in some cases master). + +Pods interact with Flexvolume drivers through the `flexvolume` in-tree plugin. +More details can be found [here](https://github.com/kubernetes/community/blob/master/contributors/devel/flexvolume.md). + +## Mount propagation + +Mount propagation allows for sharing volumes mounted by a Container to +other Containers in the same Pod, or even to other Pods on the same node. + +Mount propagation of a volume is controlled by `mountPropagation` field in Container.volumeMounts. +Its values are: + + * `None` - This volume mount will not receive any subsequent mounts + that are mounted to this volume or any of its subdirectories by the host. + In similar fashion, no mounts created by the Container will be visible on + the host. This is the default mode. + + This mode is equal to `private` mount propagation as described in the + [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + + * `HostToContainer` - This volume mount will receive all subsequent mounts + that are mounted to this volume or any of its subdirectories. + + In other words, if the host mounts anything inside the volume mount, the + Container will see it mounted there. + + Similarly, if any Pod with `Bidirectional` mount propagation to the same + volume mounts anything there, the Container with `HostToContainer` mount + propagation will see it. + + This mode is equal to `rslave` mount propagation as described in the + [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + + * `Bidirectional` - This volume mount behaves the same the `HostToContainer` mount. + In addition, all volume mounts created by the Container will be propagated + back to the host and to all Containers of all Pods that use the same volume. + + A typical use case for this mode is a Pod with a Flexvolume or CSI driver or + a Pod that needs to mount something on the host using a `hostPath` volume. + + This mode is equal to `rshared` mount propagation as described in the + [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + +{{< caution >}} +`Bidirectional` mount propagation can be dangerous. It can damage +the host operating system and therefore it is allowed only in privileged +Containers. Familiarity with Linux kernel behavior is strongly recommended. +In addition, any volume mounts created by Containers in Pods must be destroyed +(unmounted) by the Containers on termination. +{{< /caution >}} + +### Configuration +Before mount propagation can work properly on some deployments (CoreOS, +RedHat/Centos, Ubuntu) mount share must be configured correctly in +Docker as shown below. + +Edit your Docker's `systemd` service file. Set `MountFlags` as follows: +```shell +MountFlags=shared +``` +Or, remove `MountFlags=slave` if present. Then restart the Docker daemon: +```shell +sudo systemctl daemon-reload +sudo systemctl restart docker +``` + + + +{{% capture whatsnext %}} +* Follow an example of [deploying WordPress and MySQL with Persistent Volumes](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/). +{{% /capture %}} diff --git a/content/en/docs/concepts/workloads/controllers/daemonset.md b/content/en/docs/concepts/workloads/controllers/daemonset.md index 518514748e4d5..37b9fac0f3157 100644 --- a/content/en/docs/concepts/workloads/controllers/daemonset.md +++ b/content/en/docs/concepts/workloads/controllers/daemonset.md @@ -20,8 +20,7 @@ Some typical uses of a DaemonSet are: - running a cluster storage daemon, such as `glusterd`, `ceph`, on each node. - running a logs collection daemon on every node, such as `fluentd` or `logstash`. -- running a node monitoring daemon on every node, such as [Prometheus Node Exporter]( - https://github.com/prometheus/node_exporter), [Sysdig Agent](https://sysdigdocs.atlassian.net/wiki/spaces/Platform), `collectd`, [Dynatrace OneAgent](https://www.dynatrace.com/technologies/kubernetes-monitoring/), [AppDynamics Agent](https://docs.appdynamics.com/display/CLOUD/Container+Visibility+with+Kubernetes), [SignalFx Agent](https://docs.signalfx.com/en/latest/integrations/agent/kubernetes-setup.html), [Datadog agent](https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/), [New Relic agent](https://docs.newrelic.com/docs/integrations/kubernetes-integration/installation/kubernetes-installation-configuration), Ganglia `gmond` or Instana agent. +- running a node monitoring daemon on every node, such as [Prometheus Node Exporter](https://github.com/prometheus/node_exporter), [Sysdig Agent](https://sysdigdocs.atlassian.net/wiki/spaces/Platform), `collectd`, [Dynatrace OneAgent](https://www.dynatrace.com/technologies/kubernetes-monitoring/), [AppDynamics Agent](https://docs.appdynamics.com/display/CLOUD/Container+Visibility+with+Kubernetes), [Datadog agent](https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/), [New Relic agent](https://docs.newrelic.com/docs/integrations/kubernetes-integration/installation/kubernetes-installation-configuration), Ganglia `gmond` or [Instana Agent](https://www.instana.com/supported-integrations/kubernetes-monitoring/). In a simple case, one DaemonSet, covering all nodes, would be used for each type of daemon. A more complex setup might use multiple DaemonSets for a single type of daemon, but with diff --git a/content/en/docs/concepts/workloads/controllers/daemonset.md.orig b/content/en/docs/concepts/workloads/controllers/daemonset.md.orig new file mode 100644 index 0000000000000..37b9fac0f3157 --- /dev/null +++ b/content/en/docs/concepts/workloads/controllers/daemonset.md.orig @@ -0,0 +1,246 @@ +--- +reviewers: +- enisoc +- erictune +- foxish +- janetkuo +- kow3ns +title: DaemonSet +content_template: templates/concept +weight: 50 +--- + +{{% capture overview %}} + +A _DaemonSet_ ensures that all (or some) Nodes run a copy of a Pod. As nodes are added to the +cluster, Pods are added to them. As nodes are removed from the cluster, those Pods are garbage +collected. Deleting a DaemonSet will clean up the Pods it created. + +Some typical uses of a DaemonSet are: + +- running a cluster storage daemon, such as `glusterd`, `ceph`, on each node. +- running a logs collection daemon on every node, such as `fluentd` or `logstash`. +- running a node monitoring daemon on every node, such as [Prometheus Node Exporter](https://github.com/prometheus/node_exporter), [Sysdig Agent](https://sysdigdocs.atlassian.net/wiki/spaces/Platform), `collectd`, [Dynatrace OneAgent](https://www.dynatrace.com/technologies/kubernetes-monitoring/), [AppDynamics Agent](https://docs.appdynamics.com/display/CLOUD/Container+Visibility+with+Kubernetes), [Datadog agent](https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/), [New Relic agent](https://docs.newrelic.com/docs/integrations/kubernetes-integration/installation/kubernetes-installation-configuration), Ganglia `gmond` or [Instana Agent](https://www.instana.com/supported-integrations/kubernetes-monitoring/). + +In a simple case, one DaemonSet, covering all nodes, would be used for each type of daemon. +A more complex setup might use multiple DaemonSets for a single type of daemon, but with +different flags and/or different memory and cpu requests for different hardware types. + +{{% /capture %}} + + +{{% capture body %}} + +## Writing a DaemonSet Spec + +### Create a DaemonSet + +You can describe a DaemonSet in a YAML file. For example, the `daemonset.yaml` file below describes a DaemonSet that runs the fluentd-elasticsearch Docker image: + +{{< codenew file="controllers/daemonset.yaml" >}} + +* Create a DaemonSet based on the YAML file: +``` +kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml +``` + +### Required Fields + +As with all other Kubernetes config, a DaemonSet needs `apiVersion`, `kind`, and `metadata` fields. For +general information about working with config files, see [deploying applications](/docs/user-guide/deploying-applications/), +[configuring containers](/docs/tasks/), and [object management using kubectl](/docs/concepts/overview/working-with-objects/object-management/) documents. + +A DaemonSet also needs a [`.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) section. + +### Pod Template + +The `.spec.template` is one of the required fields in `.spec`. + +The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/pod-overview/#pod-templates). It has exactly the same schema as a [Pod](/docs/concepts/workloads/pods/pod/), except it is nested and does not have an `apiVersion` or `kind`. + +In addition to required fields for a Pod, a Pod template in a DaemonSet has to specify appropriate +labels (see [pod selector](#pod-selector)). + +A Pod Template in a DaemonSet must have a [`RestartPolicy`](/docs/user-guide/pod-states) + equal to `Always`, or be unspecified, which defaults to `Always`. + +### Pod Selector + +The `.spec.selector` field is a pod selector. It works the same as the `.spec.selector` of +a [Job](/docs/concepts/jobs/run-to-completion-finite-workloads/). + +As of Kubernetes 1.8, you must specify a pod selector that matches the labels of the +`.spec.template`. The pod selector will no longer be defaulted when left empty. Selector +defaulting was not compatible with `kubectl apply`. Also, once a DaemonSet is created, +its `.spec.selector` can not be mutated. Mutating the pod selector can lead to the +unintentional orphaning of Pods, and it was found to be confusing to users. + +The `.spec.selector` is an object consisting of two fields: + +* `matchLabels` - works the same as the `.spec.selector` of a [ReplicationController](/docs/concepts/workloads/controllers/replicationcontroller/). +* `matchExpressions` - allows to build more sophisticated selectors by specifying key, + list of values and an operator that relates the key and values. + +When the two are specified the result is ANDed. + +If the `.spec.selector` is specified, it must match the `.spec.template.metadata.labels`. Config with these not matching will be rejected by the API. + +Also you should not normally create any Pods whose labels match this selector, either directly, via +another DaemonSet, or via other controller such as ReplicaSet. Otherwise, the DaemonSet +controller will think that those Pods were created by it. Kubernetes will not stop you from doing +this. One case where you might want to do this is manually create a Pod with a different value on +a node for testing. + +### Running Pods on Only Some Nodes + +If you specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will +create Pods on nodes which match that [node +selector](/docs/concepts/configuration/assign-pod-node/). Likewise if you specify a `.spec.template.spec.affinity`, +then DaemonSet controller will create Pods on nodes which match that [node affinity](/docs/concepts/configuration/assign-pod-node/). +If you do not specify either, then the DaemonSet controller will create Pods on all nodes. + +## How Daemon Pods are Scheduled + +### Scheduled by DaemonSet controller (disabled by default since 1.12) + +Normally, the machine that a Pod runs on is selected by the Kubernetes scheduler. However, Pods +created by the DaemonSet controller have the machine already selected (`.spec.nodeName` is specified +when the Pod is created, so it is ignored by the scheduler). Therefore: + + - The [`unschedulable`](/docs/admin/node/#manual-node-administration) field of a node is not respected + by the DaemonSet controller. + - The DaemonSet controller can make Pods even when the scheduler has not been started, which can help cluster + bootstrap. + + +### Scheduled by default scheduler (enabled by default since 1.12) + +{{< feature-state state="beta" for-kubernetes-version="1.12" >}} + +A DaemonSet ensures that all eligible nodes run a copy of a Pod. Normally, the +node that a Pod runs on is selected by the Kubernetes scheduler. However, +DaemonSet pods are created and scheduled by the DaemonSet controller instead. +That introduces the following issues: + + * Inconsistent Pod behavior: Normal Pods waiting to be scheduled are created + and in `Pending` state, but DaemonSet pods are not created in `Pending` + state. This is confusing to the user. + * [Pod preemption](/docs/concepts/configuration/pod-priority-preemption/) + is handled by default scheduler. When preemption is enabled, the DaemonSet controller + will make scheduling decisions without considering pod priority and preemption. + +`ScheduleDaemonSetPods` allows you to schedule DaemonSets using the default +scheduler instead of the DaemonSet controller, by adding the `NodeAffinity` term +to the DaemonSet pods, instead of the `.spec.nodeName` term. The default +scheduler is then used to bind the pod to the target host. If node affinity of +the DaemonSet pod already exists, it is replaced. The DaemonSet controller only +performs these operations when creating or modifying DaemonSet pods, and no +changes are made to the `spec.template` of the DaemonSet. + +```yaml +nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchFields: + - key: metadata.name + operator: In + values: + - target-host-name +``` + +In addition, `node.kubernetes.io/unschedulable:NoSchedule` toleration is added +automatically to DaemonSet Pods. The default scheduler ignores +`unschedulable` Nodes when scheduling DaemonSet Pods. + + +### Taints and Tolerations + +Although Daemon Pods respect +[taints and tolerations](/docs/concepts/configuration/taint-and-toleration), +the following tolerations are added to DaemonSet Pods automatically according to +the related features. + +| Toleration Key | Effect | Version | Description | +| ---------------------------------------- | ---------- | ------- | ------------------------------------------------------------ | +| `node.kubernetes.io/not-ready` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. | +| `node.kubernetes.io/unreachable` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. | +| `node.kubernetes.io/disk-pressure` | NoSchedule | 1.8+ | | +| `node.kubernetes.io/memory-pressure` | NoSchedule | 1.8+ | | +| `node.kubernetes.io/unschedulable` | NoSchedule | 1.12+ | DaemonSet pods tolerate unschedulable attributes by default scheduler. | +| `node.kubernetes.io/network-unavailable` | NoSchedule | 1.12+ | DaemonSet pods, who uses host network, tolerate network-unavailable attributes by default scheduler. | + + + + +## Communicating with Daemon Pods + +Some possible patterns for communicating with Pods in a DaemonSet are: + +- **Push**: Pods in the DaemonSet are configured to send updates to another service, such + as a stats database. They do not have clients. +- **NodeIP and Known Port**: Pods in the DaemonSet can use a `hostPort`, so that the pods are reachable via the node IPs. Clients know the list of node IPs somehow, and know the port by convention. +- **DNS**: Create a [headless service](/docs/concepts/services-networking/service/#headless-services) with the same pod selector, + and then discover DaemonSets using the `endpoints` resource or retrieve multiple A records from + DNS. +- **Service**: Create a service with the same Pod selector, and use the service to reach a + daemon on a random node. (No way to reach specific node.) + +## Updating a DaemonSet + +If node labels are changed, the DaemonSet will promptly add Pods to newly matching nodes and delete +Pods from newly not-matching nodes. + +You can modify the Pods that a DaemonSet creates. However, Pods do not allow all +fields to be updated. Also, the DaemonSet controller will use the original template the next +time a node (even with the same name) is created. + + +You can delete a DaemonSet. If you specify `--cascade=false` with `kubectl`, then the Pods +will be left on the nodes. You can then create a new DaemonSet with a different template. +The new DaemonSet with the different template will recognize all the existing Pods as having +matching labels. It will not modify or delete them despite a mismatch in the Pod template. +You will need to force new Pod creation by deleting the Pod or deleting the node. + +In Kubernetes version 1.6 and later, you can [perform a rolling update](/docs/tasks/manage-daemon/update-daemon-set/) on a DaemonSet. + +## Alternatives to DaemonSet + +### Init Scripts + +It is certainly possible to run daemon processes by directly starting them on a node (e.g. using +`init`, `upstartd`, or `systemd`). This is perfectly fine. However, there are several advantages to +running such processes via a DaemonSet: + +- Ability to monitor and manage logs for daemons in the same way as applications. +- Same config language and tools (e.g. Pod templates, `kubectl`) for daemons and applications. +- Running daemons in containers with resource limits increases isolation between daemons from app + containers. However, this can also be accomplished by running the daemons in a container but not in a Pod + (e.g. start directly via Docker). + +### Bare Pods + +It is possible to create Pods directly which specify a particular node to run on. However, +a DaemonSet replaces Pods that are deleted or terminated for any reason, such as in the case of +node failure or disruptive node maintenance, such as a kernel upgrade. For this reason, you should +use a DaemonSet rather than creating individual Pods. + +### Static Pods + +It is possible to create Pods by writing a file to a certain directory watched by Kubelet. These +are called [static pods](/docs/concepts/cluster-administration/static-pod/). +Unlike DaemonSet, static Pods cannot be managed with kubectl +or other Kubernetes API clients. Static Pods do not depend on the apiserver, making them useful +in cluster bootstrapping cases. Also, static Pods may be deprecated in the future. + +### Deployments + +DaemonSets are similar to [Deployments](/docs/concepts/workloads/controllers/deployment/) in that +they both create Pods, and those Pods have processes which are not expected to terminate (e.g. web servers, +storage servers). + +Use a Deployment for stateless services, like frontends, where scaling up and down the +number of replicas and rolling out updates are more important than controlling exactly which host +the Pod runs on. Use a DaemonSet when it is important that a copy of a Pod always run on +all or certain hosts, and when it needs to start before other Pods. + +{{% /capture %}} diff --git a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md index d2fb12a3e84b3..f9555caa514d5 100644 --- a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md @@ -2,45 +2,40 @@ reviewers: - smarterclayton - lavalamp -- whitlockjc - caesarxuchao - deads2k - liggitt - mbohlool +- jpbetz title: Dynamic Admission Control content_template: templates/concept weight: 40 --- {{% capture overview %}} -The [admission controllers documentation](/docs/reference/access-authn-authz/admission-controllers/) -introduces how to use standard, plugin-style admission controllers. However, -plugin admission controllers are not flexible enough for all use cases, due to -the following: - -* They need to be compiled into kube-apiserver. -* They are only configurable when the apiserver starts up. - -*Admission Webhooks* (beta in 1.9) addresses these limitations. It allows -admission controllers to be developed out-of-tree and configured at runtime. - -This page describes how to use Admission Webhooks. - +In addition to [compiled-in admission plugins](/docs/reference/access-authn-authz/admission-controllers/), +admission plugins can be developed as extensions and run as webhooks configured at runtime. +This page describes how to build, configure, and use admission webhooks. {{% /capture %}} {{% capture body %}} -### What are admission webhooks? +## What are admission webhooks? Admission webhooks are HTTP callbacks that receive admission requests and do something with them. You can define two types of admission webhooks, [validating admission Webhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook) and [mutating admission webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook). -With validating admission Webhooks, you may reject requests to enforce custom -admission policies. With mutating admission Webhooks, you may change requests to -enforce custom defaults. +Mutating admission Webhooks are invoked first, and can modify objects sent to the API server to enforce custom defaults. +After all object modifications are complete, and after the incoming object is validated by the API server, +validating admission webhooks are invoked and can reject requests to enforce custom policies. + +{{< note >}} +Admission webhooks that need to guarantee they see the final state of the object in order to enforce policy +should use a validating admission webhook, since objects can be modified after being seen by mutating webhooks. +{{< /note >}} -### Experimenting with admission webhooks +## Experimenting with admission webhooks Admission webhooks are essentially part of the cluster control-plane. You should write and deploy them with great caution. Please read the [user @@ -64,15 +59,12 @@ In the following, we describe how to quickly experiment with admission webhooks. Please refer to the implementation of the [admission webhook server](https://github.com/kubernetes/kubernetes/blob/v1.13.0/test/images/webhook/main.go) that is validated in a Kubernetes e2e test. The webhook handles the -`admissionReview` requests sent by the apiservers, and sends back its decision -wrapped in `admissionResponse`. +`AdmissionReview` request sent by the apiservers, and sends back its decision +as an `AdmissionReview` object in the same version it received. + +See the [webhook request](#request) section for details on the data sent to webhooks. -the `admissionReview` request can have different versions (e.g. v1beta1 or `v1` in a future version). -The webhook can define what version they accept using `admissionReviewVersions` field. API server -will try to use first version in the list which it supports. If none of the versions specified -in this list supported by API server, validation will fail for this object. If the webhook -configuration has already been persisted, calls to the webhook will fail and be -subject to the failure policy. +See the [webhook response](#response) section for the data expected from webhooks. The example admission webhook server leaves the `ClientAuth` field [empty](https://github.com/kubernetes/kubernetes/blob/v1.13.0/test/images/webhook/config.go#L47-L48), @@ -96,12 +88,12 @@ your [webhook client configurations](https://github.com/kubernetes/kubernetes/bl You can dynamically configure what resources are subject to what admission webhooks via -[ValidatingWebhookConfiguration](https://github.com/kubernetes/kubernetes/blob/v1.13.0/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go#L84) +[ValidatingWebhookConfiguration](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#validatingwebhookconfiguration-v1beta1-admissionregistration-k8s-io) or -[MutatingWebhookConfiguration](https://github.com/kubernetes/kubernetes/blob/v1.13.0/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go#L114). +[MutatingWebhookConfiguration](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#mutatingwebhookconfiguration-v1beta1-admissionregistration-k8s-io). The following is an example `validatingWebhookConfiguration`, a mutating webhook -configuration is similar. +configuration is similar. See the [webhook configuration](#webhook-configuration) section for details about each config field. ```yaml apiVersion: admissionregistration.k8s.io/v1beta1 @@ -185,6 +177,7 @@ plugins: The schema of `admissionConfiguration` is defined [here](https://github.com/kubernetes/kubernetes/blob/v1.13.0/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go#L27). +See the [webhook configuration](#webhook-configuration) section for details about each config field. * In the kubeConfig file, provide the credentials: @@ -210,4 +203,596 @@ users: ``` Of course you need to set up the webhook server to handle these authentications. + +## Webhook request and response + +### Request + +Webhooks are sent a POST request, with `Content-Type: application/json`, +with an `AdmissionReview` API object in the `admission.k8s.io` API group +serialized to JSON as the body. + +Webhooks can specify what versions of `AdmissionReview` objects they accept +with the `admissionReviewVersions` field in their configuration: + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + admissionReviewVersions: ["v1beta1"] + ... +``` + +If no `admissionReviewVersions` are specified, the default when creating +`admissionregistration.k8s.io/v1beta1` webhook configurations is `v1beta1`. + +API servers send the first `AdmissionReview` version in the `admissionReviewVersions` list they support. +If none of the versions in the list are supported by the API server, the configuration will not be allowed to be created. +If an API server encounters a webhook configuration that was previously created and does not support any of the `AdmissionReview` +versions the API server knows how to send, attempts to call to the webhook will fail and be subject to the [failure policy](#failure-policy). + +This example shows the data contained in an `AdmissionReview` object +for a request to update the `scale` subresource of an `apps/v1` `Deployment`: + +```json +{ + "apiVersion": "admission.k8s.io/v1beta1", + "kind": "AdmissionReview", + "request": { + // Random uid uniquely identifying this admission call + "uid": "705ab4f5-6393-11e8-b7cc-42010a800002", + + // Fully-qualified group/version/kind of the incoming object + "kind": {"group":"autoscaling","version":"v1","kind":"Scale"}, + // Fully-qualified group/version/kind of the resource being modified + "resource": {"group":"apps","version":"v1","resource":"deployments"}, + // subresource, if the request is to a subresource + "subResource": "scale", + + // Fully-qualified group/version/kind of the incoming object in the original request to the API server. + // This only differs from `kind` if the webhook specified `matchPolicy: Equivalent` and the + // original request to the API server was converted to a version the webhook registered for. + // Only sent by v1.15+ API servers. + "requestKind": {"group":"autoscaling","version":"v1","kind":"Scale"}, + // Fully-qualified group/version/kind of the resource being modified in the original request to the API server. + // This only differs from `resource` if the webhook specified `matchPolicy: Equivalent` and the + // original request to the API server was converted to a version the webhook registered for. + // Only sent by v1.15+ API servers. + "requestResource": {"group":"apps","version":"v1","resource":"deployments"}, + // subresource, if the request is to a subresource + // This only differs from `subResource` if the webhook specified `matchPolicy: Equivalent` and the + // original request to the API server was converted to a version the webhook registered for. + // Only sent by v1.15+ API servers. + "requestSubResource": "scale", + + // Name of the resource being modified + "name": "my-deployment", + // Namespace of the resource being modified, if the resource is namespaced (or is a Namespace object) + "namespace": "my-namespace", + + // operation can be CREATE, UPDATE, DELETE, or CONNECT + "operation": "UPDATE", + + "userInfo": { + // Username of the authenticated user making the request to the API server + "username": "admin", + // UID of the authenticated user making the request to the API server + "uid": "014fbff9a07c", + // Group memberships of the authenticated user making the request to the API server + "groups": ["system:authenticated","my-admin-group"], + // Arbitrary extra info associated with the user making the request to the API server. + // This is populated by the API server authentication layer and should be included + // if any SubjectAccessReview checks are performed by the webhook. + "extra": { + "some-key":["some-value1", "some-value2"] + } + }, + + // object is the new object being admitted. + // It is null for DELETE operations. + "object": {"apiVersion":"autoscaling/v1","kind":"Scale",...}, + // oldObject is the existing object. + // It is null for CREATE and CONNECT operations (and for DELETE operations in API servers prior to v1.15.0) + "oldObject": {"apiVersion":"autoscaling/v1","kind":"Scale",...}, + // options contains the options for the operation being admitted, like meta.k8s.io/v1 CreateOptions, UpdateOptions, or DeleteOptions. + // It is null for CONNECT operations. + // Only sent by v1.15+ API servers. + "options": {"apiVersion":"meta.k8s.io/v1","kind":"UpdateOptions",...}, + + // dryRun indicates the API request is running in dry run mode and will not be persisted. + // Webhooks with side effects should avoid actuating those side effects when dryRun is true. + // See http://k8s.io/docs/reference/using-api/api-concepts/#make-a-dry-run-request for more details. + "dryRun": false + } +} +``` + +### Response + +Webhooks respond with a 200 HTTP status code, `Content-Type: application/json`, +and a body containing an `AdmissionReview` object (in the same version they were sent), +with the `response` stanza populated, serialized to JSON. + +At a minimum, the `response` stanza must contain the following fields: +* `uid`, copied from the `request.uid` sent to the webhook +* `allowed`, either set to `true` or `false` + +Example of a minimal response from a webhook to allow a request: +```json +{ + "apiVersion": "admission.k8s.io/v1beta1", + "kind": "AdmissionReview", + "response": { + "uid": "", + "allowed": true + } +} +``` + +Example of a minimal response from a webhook to forbid a request: +```json +{ + "apiVersion": "admission.k8s.io/v1beta1", + "kind": "AdmissionReview", + "response": { + "uid": "", + "allowed": false + } +} +``` + +When rejecting a request, the webhook can customize the http code and message returned to the user using the `status` field. +The specified status object is returned to the user. +See [API documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) for details about the status type. +Example of a response to forbid a request, customizing the HTTP status code and message presented to the user: +```json +{ + "apiVersion": "admission.k8s.io/v1beta1", + "kind": "AdmissionReview", + "response": { + "uid": "", + "allowed": false, + "status": { + "code": 403, + "message": "You cannot do this because it is Tuesday and your name starts with A" + } + } +} +``` + +When allowing a request, a mutating admission webhook may optionally modify the incoming object as well. +This is done using the `patch` and `patchType` fields in the response. +The only currently supported `patchType` is `JSONPatch`. +See [JSON patch](http://jsonpatch.com/) documentation for more details. +For `patchType: JSONPatch`, the `patch` field contains a base64-encoded array of JSON patch operations. + +As an example, a single patch operation that would set `spec.replicas` would be `[{"op": "add", "path": "/spec/replicas", "value": 3}]` + +Base64-encoded, this would be `W3sib3AiOiAiYWRkIiwgInBhdGgiOiAiL3NwZWMvcmVwbGljYXMiLCAidmFsdWUiOiAzfV0=` + +So a webhook response to add that label would be: +```json +{ + "apiVersion": "admission.k8s.io/v1beta1", + "kind": "AdmissionReview", + "response": { + "uid": "", + "allowed": true, + "patchType": "JSONPatch", + "patch": "W3sib3AiOiAiYWRkIiwgInBhdGgiOiAiL3NwZWMvcmVwbGljYXMiLCAidmFsdWUiOiAzfV0=" + } +} +``` + +## Webhook configuration + +To register admission webhooks, create `MutatingWebhookConfiguration` or `ValidatingWebhookConfiguration` API objects. +Each configuration can contain one or more webhooks. Each webhook defines the following things. + +### Matching requests: rules + +Each webhook must specify a list of rules used to determine if a request to the API server should be sent to the webhook. +Each rule specifies one or more operations, apiGroups, apiVersions, and resources, and a resource scope: + +* `operations` lists one or more operations to match. Can be `"CREATE"`, `"UPDATE"`, `"DELETE"`, `"CONNECT"`, or `"*"` to match all. +* `apiGroups` lists one or more API groups to match. `""` is the core API group. `"*"` matches all API groups. +* `apiVersions` lists one or more API versions to match. `"*"` matches all API versions. +* `resources` lists one or more resources to match. + * `"*"` matches all resources, but not subresources. + * `"*/*"` matches all resources and subresources. + * `"pods/*"` matches all subresources of pods. + * `"*/status"` matches all status subresources. +* `scope` specifies a scope to match. Valid values are `"Cluster"`, `"Namespaced"`, and `"*"`. Subresources match the scope of their parent resource. Supported in v1.14+. Default is `"*"`, matching pre-1.14 behavior. + * `"Cluster"` means that only cluster-scoped resources will match this rule (Namespace API objects are cluster-scoped). + * `"Namespaced"` means that only namespaced resources will match this rule. + * `"*"` means that there are no scope restrictions. + +If an incoming request matches one of the specified operations, groups, versions, resources, and scope for any of a webhook's rules, the request is sent to the webhook. + +Here are other examples of rules that could be used to specify which resources should be intercepted. + +Match `CREATE` or `UPDATE` requests to `apps/v1` and `apps/v1beta1` `deployments` and `replicasets`: + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + rules: + - operations: ["CREATE", "UPDATE"] + apiGroups: ["apps"] + apiVersions: ["v1", "v1beta1"] + resources: ["deployments", "replicasets"] + scope: "Namespaced" + ... +``` + +Match create requests for all resources (but not subresources) in all API groups and versions: + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + rules: + - operations: ["CREATE"] + apiGroups: ["*"] + apiVersions: ["*"] + resources: ["*"] + scope: "*" + ... +``` + +Match update requests for all `status` subresources in all API groups and versions: + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + rules: + - operations: ["UPDATE"] + apiGroups: ["*"] + apiVersions: ["*"] + resources: ["*/status"] + scope: "*" + ... +``` + +### Matching requests: objectSelector + +In v1.15+, webhooks may optionally limit which requests are intercepted based on the labels of the +objects they would be sent, by specifying an `objectSelector`. If specified, the objectSelector +is evaluated against both the object and oldObject that would be sent to the webhook, +and is considered to match if either object matches the selector. + +A null object (oldObject in the case of create, or newObject in the case of delete), +or an object that cannot have labels (like a `DeploymentRollback` or a `PodProxyOptions` object) +is not considered to match. + +Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. + +This example shows a mutating webhook that would match a `CREATE` of any resource with the label `foo: bar`: + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + objectSelector: + matchLabels: + foo: bar + rules: + - operations: ["CREATE"] + apiGroups: ["*"] + apiVersions: ["*"] + resources: ["*"] + scope: "*" + ... +``` + +See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors. + +### Matching requests: namespaceSelector + +Webhooks may optionally limit which requests for namespaced resources are intercepted, +based on the labels of the containing namespace, by specifying a `namespaceSelector`. + +The `namespaceSelector` decides whether to run the webhook on a request for a namespaced resource +(or a Namespace object), based on whether the namespace's labels match the selector. +If the object itself is a namespace, the matching is performed on object.metadata.labels. +If the object is a cluster scoped resource other than a Namespace, `namespaceSelector` has no effect. + +This example shows a mutating webhook that matches a `CREATE` of any namespaced resource inside a namespace +that does not have a "runlevel" label of "0" or "1": + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + namespaceSelector: + matchExpressions: + - key: runlevel + operator: NotIn + values: ["0","1"] + rules: + - operations: ["CREATE"] + apiGroups: ["*"] + apiVersions: ["*"] + resources: ["*"] + scope: "Namespaced" + ... +``` + +This example shows a validating webhook that matches a `CREATE` of any namespaced resource inside a namespace +that is associated with the "environment" of "prod" or "staging": + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + namespaceSelector: + matchExpressions: + - key: environment + operator: In + values: ["prod","staging"] + rules: + - operations: ["CREATE"] + apiGroups: ["*"] + apiVersions: ["*"] + resources: ["*"] + scope: "Namespaced" + ... +``` + +See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors. + +### Matching requests: matchPolicy + +API servers can make objects available via multiple API groups or versions. +For example, the Kubernetes API server allows creating and modifying `Deployment` objects +via `extensions/v1beta1`, `apps/v1beta1`, `apps/v1beta2`, and `apps/v1` APIs. + +For example, if a webhook only specified a rule for some API groups/versions (like `apiGroups:["apps"], apiVersions:["v1","v1beta1"]`), +and a request was made to modify the resource via another API group/version (like `extensions/v1beta1`), +the request would not be sent to the webhook. + +In v1.15+, `matchPolicy` lets a webhook define how its `rules` are used to match incoming requests. +Allowed values are `Exact` or `Equivalent`. The default in `v1beta1` is `Exact`. + +* `Exact` means a request should be intercepted only if it exactly matches a specified rule. +* `Equivalent` means a request should be intercepted if modifies a resource listed in `rules`, even via another API group or version. + +In the example given above, the webhook that only registered for `apps/v1` could use `matchPolicy`: +* `matchPolicy: Exact` would mean the `extensions/v1beta1` request would not be sent to the webhook +* `matchPolicy: Equivalent` means the `extensions/v1beta1` request would be sent to the webhook (with the objects converted to a version the webhook had specified: `apps/v1`) + +Specifying `Equivalent` is recommended, and ensures that webhooks continue to intercept the +resources they expect when upgrades enable new versions of the resource in the API server. + +When a resource stops being served by the API server, it is no longer considered equivalent to other versions of that resource that are still served. +For example, deprecated `extensions/v1beta1` deployments are scheduled to stop being served by default in v1.16. +Once that occurs, a webhook with a `apiGroups:["extensions"], apiVersions:["v1beta1"], resources:["deployments"]` rule +would no longer intercept deployments created via `apps/v1` APIs. For that reason, webhooks should prefer registering +for stable versions of resources. + +This example shows a validating webhook that intercepts modifications to deployments (no matter the API group or version), +and is always sent an `apps/v1` `Deployment` object: + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + matchPolicy: Equivalent + rules: + - operations: ["CREATE","UPDATE","DELETE"] + apiGroups: ["apps"] + apiVersions: ["v1"] + resources: ["deployments"] + scope: "Namespaced" + ... +``` + +### Contacting the webhook + +Once the API server has determined a request should be sent to a webhook, +it needs to know how to contact the webhook. This is specified in the `clientConfig` +stanza of the webhook configuration. + +Webhooks can either be called via a URL or a service reference, +and can optionally include a custom CA bundle to use to verify the TLS connection. + +#### URL + +`url` gives the location of the webhook, in standard URL form +(`scheme://host:port/path`). + +The `host` should not refer to a service running in the cluster; use +a service reference by specifying the `service` field instead. +The host might be resolved via external DNS in some apiservers +(e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would +be a layering violation). `host` may also be an IP address. + +Please note that using `localhost` or `127.0.0.1` as a `host` is +risky unless you take great care to run this webhook on all hosts +which run an apiserver which might need to make calls to this +webhook. Such installs are likely to be non-portable, i.e., not easy +to turn up in a new cluster. + +The scheme must be "https"; the URL must begin with "https://". + +Attempting to use a user or basic auth e.g. "user:password@" is not allowed. +Fragments ("#...") and query parameters ("?...") are also not allowed. + +Here is an example of a mutating webhook configured to call a URL +(and expects the TLS certificate to be verified using system trust roots, so does not specify a caBundle): +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + clientConfig: + url: "https://my-webhook.example.com:9443/my-webhook-path" + ... +``` + +#### Service reference + +The `service` stanza inside `clientConfig` is a reference to the service for this webhook. +If the webhook is running within the cluster, then you should use `service` instead of `url`. +The service namespace and name are required. The port is optional and defaults to 443. +The path is optional and defaults to "/". + +Here is an example of a mutating webhook configured to call a service on port "1234" +at the subpath "/my-path", and to verify the TLS connection against the ServerName +`my-service-name.my-service-namespace.svc` using a custom CA bundle: + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + clientConfig: + caBundle: "Ci0tLS0tQk......tLS0K" + service: + namespace: my-service-namespace + name: my-service-name + path: /my-path + port: 1234 + ... +``` + +### Side effects + +Webhooks typically operate only on the content of the `AdmissionReview` sent to them. +Some webhooks, however, make out-of-band changes as part of processing admission requests. + +Webhooks that make out-of-band changes ("side effects") must also have a reconcilation mechanism +(like a controller) that periodically determines the actual state of the world, and adjusts +the out-of-band data modified by the admission webhook to reflect reality. +This is because a call to an admission webhook does not guarantee the admitted object will be persisted as is, or at all. +Later webhooks can modify the content of the object, a conflict could be encountered while writing to storage, +or the server could power off before persisting the object. + +Additionally, webhooks with side effects should skip those side-effects when `dryRun: true` admission requests are handled. +A webhook must explicitly indicate that it will not have side-effects when run with `dryRun`, +or the dry-run request will not be sent to the webhook and the API request fill fail instead. + +Webhooks indicate whether they have side effects using the `sideEffects` field in the webhook configuration. +`sideEffects` may be set to `Unknown`, `None`, `Some`, `NoneOnDryRun`. The default is `Unknown`. + +* `Unknown`: no information is known about the side effects of calling the webhook. +If a request with `dryRun: true` would trigger a call to this webhook, the request will instead fail, and the webhook will not be called. +* `None`: calling the webhook will have no side effects. +* `Some`: calling the webhook will possibly have side effects. +If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail, and the webhook will not be called. +* `NoneOnDryRun`: calling the webhook will possibly have side effects, +but if a request with `dryRun: true` is sent to the webhook, the webhook will suppress the side effects (the webhook is `dryRun`-aware). + +Here is an example of a validating webhook indicating it has no side effects on `dryRun: true` requests: +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + sideEffects: NoneOnDryRun + ... +``` + +### Timeouts + +Because webhooks add to API request latency, they should evaluate as quickly as possible. +`timeoutSeconds` allows configuring how long the API server should wait for a webhook to respond +before treating the call as a failure. + +If the timeout expires before the webhook responds, the webhook call will be ignored or +the API call will be rejected based on the [failure policy](#failure-policy). + +The timeout value must be between 1 and 30 seconds, and defaults to 30 seconds. + +Here is an example of a validating webhook with a custom timeout of 2 seconds: +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + timeoutSeconds: 2 + ... +``` + +### Reinvocation policy + +A single ordering of mutating admissions plugins (including webhooks) does not work for all cases +(see https://issue.k8s.io/64333 as an example). A mutating webhook can add a new sub-structure +to the object (like adding a `container` to a `pod`), and other mutating plugins which have already +run may have opinions on those new structures (like setting an `imagePullPolicy` on all containers). + +In v1.15+, to allow mutating admission plugins to observe changes made by other plugins, +built-in mutating admission plugins are re-run if a mutating webhook modifies an object, +and mutating webhooks can specify a `reinvocationPolicy` to control whether they are reinvoked as well. + +`reinvocationPolicy` may be set to `Never` or `IfNeeded`. It defaults to `Never`. + +* `Never`: the webhook must not be called more than once in a single admission evaluation +* `IfNeeded`: the webhook may be called again as part of the admission evaluation if the object +being admitted is modified by other admission plugins after the initial webhook call. + {{< note >}} + * the number of additional invocations is not guaranteed to be exactly one. + * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + * webhooks that use this option may be reordered to minimize the number of additional invocations. + * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead (recommended for webhooks with side-effects). + {{< /note >}} + +Here is an example of a mutating webhook opting into being re-invoked if later admission plugins modify the object: + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + reinvocationPolicy: IfNeeded + ... +``` + +Mutating webhooks must be idempotent, able to successfully process an object they have already admitted +and potentially modified. This is true for all mutating admission webhooks, since any change they can make +in an object could already exist in the user-provided object, but it is essential for webhooks that opt into reinvocation. + +### Failure policy + +`failurePolicy` defines how unrecognized errors and timeout errors from the admission webhook +are handled. Allowed values are `Ignore` or `Fail`. Defaults to `Ignore` in v1beta1. + +* `Ignore` means that an error calling the webhook is ignored and the API request is allowed to continue. +* `Fail` means that an error calling the webhook causes the admission to fail and the API request to be rejected. + +Here is a mutating webhook configured to reject an API request if errors are encountered calling the admission webhook: + +```yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +... +webhooks: +- name: my-webhook.example.com + failurePolicy: Fail + ... +``` + {{% /capture %}} diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 23d728bbf0904..b9feb726c89ad 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -58,8 +58,11 @@ different Kubernetes components. | `CSIBlockVolume` | `true` | Beta | 1.14 | | | `CSIDriverRegistry` | `false` | Alpha | 1.12 | 1.13 | | `CSIDriverRegistry` | `true` | Beta | 1.14 | | +| `CSIInlineVolume` | `false` | Alpha | 1.15 | - | | `CSIMigration` | `false` | Alpha | 1.14 | | | `CSIMigrationAWS` | `false` | Alpha | 1.14 | | +| `CSIMigrationAzureDisk` | `false` | Alpha | 1.15 | | +| `CSIMigrationAzureFile` | `false` | Alpha | 1.15 | | | `CSIMigrationGCE` | `false` | Alpha | 1.14 | | | `CSIMigrationOpenStack` | `false` | Alpha | 1.14 | | | `CSINodeInfo` | `false` | Alpha | 1.12 | 1.13 | @@ -107,6 +110,7 @@ different Kubernetes components. | `KubeletPodResources` | `false` | Alpha | 1.13 | | | `LocalStorageCapacityIsolation` | `false` | Alpha | 1.7 | 1.9 | | `LocalStorageCapacityIsolation` | `true` | Beta| 1.10 | | +| `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | Alpha| 1.15 | | | `MountContainers` | `false` | Alpha | 1.9 | | | `MountPropagation` | `false` | Alpha | 1.8 | 1.9 | | `MountPropagation` | `true` | Beta | 1.10 | 1.11 | @@ -158,7 +162,8 @@ different Kubernetes components. | `VolumeScheduling` | `false` | Alpha | 1.9 | 1.9 | | `VolumeScheduling` | `true` | Beta | 1.10 | 1.12 | | `VolumeScheduling` | `true` | GA | 1.13 | | -| `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.14 | | +| `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.14 | 1.14 | +| `VolumeSubpathEnvExpansion` | `true` | Beta | 1.15 | | | `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | - | | `ScheduleDaemonSetPods` | `false` | Alpha | 1.11 | 1.11 | | `ScheduleDaemonSetPods` | `true` | Beta | 1.12 | | @@ -230,6 +235,8 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CSIDriverRegistry`: Enable all logic related to the CSIDriver API object in csi.storage.k8s.io. - `CSIMigration`: Enables shims and translation logic to route volume operations from in-tree plugins to corresponding pre-installed CSI plugins - `CSIMigrationAWS`: Enables shims and translation logic to route volume operations from the AWS-EBS in-tree plugin to EBS CSI plugin +- `CSIMigrationAzureDisk`: Enables shims and translation logic to route volume operations from the Azure-Disk in-tree plugin to Azure Disk CSI plugin +- `CSIMigrationAzureFile`: Enables shims and translation logic to route volume operations from the Azure-File in-tree plugin to Azure File CSI plugin - `CSIMigrationGCE`: Enables shims and translation logic to route volume operations from the GCE-PD in-tree plugin to PD CSI plugin - `CSIMigrationOpenStack`: Enables shims and translation logic to route volume operations from the Cinder in-tree plugin to Cinder CSI plugin - `CSINodeInfo`: Enable all logic related to the CSINodeInfo API object in csi.storage.k8s.io. @@ -276,6 +283,7 @@ Each feature gate is designed for enabling/disabling a specific feature: - `KubeletPodResources`: Enable the kubelet's pod resources grpc endpoint. See [Support Device Monitoring](https://git.k8s.io/community/keps/sig-node/compute-device-assignment.md) for more details. - `LocalStorageCapacityIsolation`: Enable the consumption of [local ephemeral storage](/docs/concepts/configuration/manage-compute-resources-container/) and also the `sizeLimit` property of an [emptyDir volume](/docs/concepts/storage/volumes/#emptydir). +- `LocalStorageCapacityIsolationFSQuotaMonitoring`: When `LocalStorageCapacityIsolation` is enabled for [local ephemeral storage](/docs/concepts/configuration/manage-compute-resources-container/) and the backing filesystem for [emptyDir volumes](/docs/concepts/storage/volumes/#emptydir) supports project quotas and they are enabled, use project quotas to monitor [emptyDir volume](/docs/concepts/storage/volumes/#emptydir) storage consumption rather than filesystem walk for better performance and accuracy. - `MountContainers`: Enable using utility containers on host as the volume mounter. - `MountPropagation`: Enable sharing volume mounted by one container to other containers or pods. For more details, please see [mount propagation](/docs/concepts/storage/volumes/#mount-propagation). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md.orig b/content/en/docs/reference/command-line-tools-reference/feature-gates.md.orig new file mode 100644 index 0000000000000..b9feb726c89ad --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md.orig @@ -0,0 +1,342 @@ +--- +title: Feature Gates +weight: 10 +title: Feature Gates +content_template: templates/concept +--- + +{{% capture overview %}} +This page contains an overview of the various feature gates an administrator +can specify on different Kubernetes components. +{{% /capture %}} + +{{% capture body %}} +## Overview + +Feature gates are a set of key=value pairs that describe alpha or experimental +features. +An administrator can use the `--feature-gates` command line flag on each component +to turn a feature on or off. Each component supports a set of feature gates unique to that component. +Use `-h` flag to see a full set of feature gates for all components. +To set feature gates for a component, such as kubelet, use the `--feature-gates` flag assigned to a list of feature pairs: + +```shell +--feature-gates="...,DynamicKubeletConfig=true" +``` + +The following table is a summary of the feature gates that you can set on +different Kubernetes components. + +- The "Since" column contains the Kubernetes release when a feature is introduced + or its release stage is changed. +- The "Until" column, if not empty, contains the last Kubernetes release in which + you can still use a feature gate. + +| Feature | Default | Stage | Since | Until | +|---------|---------|-------|-------|-------| +| `Accelerators` | `false` | Alpha | 1.6 | 1.10 | +| `AdvancedAuditing` | `false` | Alpha | 1.7 | 1.7 | +| `AdvancedAuditing` | `true` | Beta | 1.8 | 1.11 | +| `AdvancedAuditing` | `true` | GA | 1.12 | - | +| `AffinityInAnnotations` | `false` | Alpha | 1.6 | 1.7 | +| `AllowExtTrafficLocalEndpoints` | `false` | Beta | 1.4 | 1.6 | +| `AllowExtTrafficLocalEndpoints` | `true` | GA | 1.7 | - | +| `APIListChunking` | `false` | Alpha | 1.8 | 1.8 | +| `APIListChunking` | `true` | Beta | 1.9 | | +| `APIResponseCompression` | `false` | Alpha | 1.7 | | +| `AppArmor` | `true` | Beta | 1.4 | | +| `AttachVolumeLimit` | `true` | Alpha | 1.11 | 1.11 | +| `AttachVolumeLimit` | `true` | Beta | 1.12 | | +| `BlockVolume` | `false` | Alpha | 1.9 | | +| `BlockVolume` | `true` | Beta | 1.13 | - | +| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | | +| `CPUManager` | `false` | Alpha | 1.8 | 1.9 | +| `CPUManager` | `true` | Beta | 1.10 | | +| `CRIContainerLogRotation` | `false` | Alpha | 1.10 | 1.10 | +| `CRIContainerLogRotation` | `true` | Beta| 1.11 | | +| `CSIBlockVolume` | `false` | Alpha | 1.11 | 1.13 | +| `CSIBlockVolume` | `true` | Beta | 1.14 | | +| `CSIDriverRegistry` | `false` | Alpha | 1.12 | 1.13 | +| `CSIDriverRegistry` | `true` | Beta | 1.14 | | +| `CSIInlineVolume` | `false` | Alpha | 1.15 | - | +| `CSIMigration` | `false` | Alpha | 1.14 | | +| `CSIMigrationAWS` | `false` | Alpha | 1.14 | | +| `CSIMigrationAzureDisk` | `false` | Alpha | 1.15 | | +| `CSIMigrationAzureFile` | `false` | Alpha | 1.15 | | +| `CSIMigrationGCE` | `false` | Alpha | 1.14 | | +| `CSIMigrationOpenStack` | `false` | Alpha | 1.14 | | +| `CSINodeInfo` | `false` | Alpha | 1.12 | 1.13 | +| `CSINodeInfo` | `true` | Beta | 1.14 | | +| `CSIPersistentVolume` | `false` | Alpha | 1.9 | 1.9 | +| `CSIPersistentVolume` | `true` | Beta | 1.10 | 1.12 | +| `CSIPersistentVolume` | `true` | GA | 1.13 | - | +| `CustomCPUCFSQuotaPeriod` | `false` | Alpha | 1.12 | | +| `CustomPodDNS` | `false` | Alpha | 1.9 | 1.9 | +| `CustomPodDNS` | `true` | Beta| 1.10 | | +| `CustomResourceSubresources` | `false` | Alpha | 1.10 | 1.11 | +| `CustomResourceSubresources` | `true` | Beta | 1.11 | - | +| `CustomResourceValidation` | `false` | Alpha | 1.8 | 1.8 | +| `CustomResourceValidation` | `true` | Beta | 1.9 | | +| `CustomResourceWebhookConversion` | `false` | Alpha | 1.13 | | +| `DebugContainers` | `false` | Alpha | 1.10 | | +| `DevicePlugins` | `false` | Alpha | 1.8 | 1.9 | +| `DevicePlugins` | `true` | Beta | 1.10 | | +| `DryRun` | `true` | Beta | 1.13 | | +| `DynamicAuditing` | `false` | Alpha | 1.13 | | +| `DynamicKubeletConfig` | `false` | Alpha | 1.4 | 1.10 | +| `DynamicKubeletConfig` | `true` | Beta | 1.11 | | +| `DynamicProvisioningScheduling` | `false` | Alpha | 1.11 | 1.11 | +| `DynamicVolumeProvisioning` | `true` | Alpha | 1.3 | 1.7 | +| `DynamicVolumeProvisioning` | `true` | GA | 1.8 | | +| `EnableEquivalenceClassCache` | `false` | Alpha | 1.8 | | +| `ExpandCSIVolumes` | `false` | Alpha | 1.14 | | | +| `ExpandInUsePersistentVolumes` | `false` | Alpha | 1.11 | 1.13 | | +| `ExpandPersistentVolumes` | `false` | Alpha | 1.8 | 1.10 | +| `ExpandPersistentVolumes` | `true` | Beta | 1.11 | | +| `ExperimentalCriticalPodAnnotation` | `false` | Alpha | 1.5 | | +| `ExperimentalHostUserNamespaceDefaulting` | `false` | Beta | 1.5 | | +| `GCERegionalPersistentDisk` | `true` | Beta | 1.10 | 1.12 | +| `GCERegionalPersistentDisk` | `true` | GA | 1.13 | - | +| `HugePages` | `false` | Alpha | 1.8 | 1.9 | +| `HugePages` | `true` | Beta| 1.10 | 1.13 | +| `HugePages` | `true` | GA | 1.14 | | +| `HyperVContainer` | `false` | Alpha | 1.10 | | +| `Initializers` | `false` | Alpha | 1.7 | 1.13 | +| `Initializers` | - | Deprecated | 1.14 | | +| `KubeletConfigFile` | `false` | Alpha | 1.8 | 1.9 | +| `KubeletPluginsWatcher` | `false` | Alpha | 1.11 | 1.11 | +| `KubeletPluginsWatcher` | `true` | Beta | 1.12 | 1.12 | +| `KubeletPluginsWatcher` | `true` | GA | 1.13 | - | +| `KubeletPodResources` | `false` | Alpha | 1.13 | | +| `LocalStorageCapacityIsolation` | `false` | Alpha | 1.7 | 1.9 | +| `LocalStorageCapacityIsolation` | `true` | Beta| 1.10 | | +| `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | Alpha| 1.15 | | +| `MountContainers` | `false` | Alpha | 1.9 | | +| `MountPropagation` | `false` | Alpha | 1.8 | 1.9 | +| `MountPropagation` | `true` | Beta | 1.10 | 1.11 | +| `MountPropagation` | `true` | GA | 1.12 | | +| `NodeLease` | `false` | Alpha | 1.12 | 1.13 | +| `NodeLease` | `true` | Beta | 1.14 | | +| `PersistentLocalVolumes` | `false` | Alpha | 1.7 | 1.9 | +| `PersistentLocalVolumes` | `true` | Beta | 1.10 | 1.13 | +| `PersistentLocalVolumes` | `true` | GA | 1.14 | | +| `PodPriority` | `false` | Alpha | 1.8 | 1.10 | +| `PodPriority` | `true` | Beta | 1.11 | 1.13 | +| `PodPriority` | `true` | GA | 1.14 | | +| `PodReadinessGates` | `false` | Alpha | 1.11 | | +| `PodReadinessGates` | `true` | Beta | 1.12 | | +| `PodShareProcessNamespace` | `false` | Alpha | 1.10 | | +| `PodShareProcessNamespace` | `true` | Beta | 1.12 | | +| `ProcMountType` | `false` | Alpha | 1.12 | | +| `PVCProtection` | `false` | Alpha | 1.9 | 1.9 | +| `ResourceLimitsPriorityFunction` | `false` | Alpha | 1.9 | | +| `ResourceQuotaScopeSelectors` | `false` | Alpha | 1.11 | 1.11 | +| `ResourceQuotaScopeSelectors` | `true` | Beta | 1.12 | | +| `RotateKubeletClientCertificate` | `true` | Beta | 1.8 | | +| `RotateKubeletServerCertificate` | `false` | Alpha | 1.7 | 1.11 | +| `RotateKubeletServerCertificate` | `true` | Beta | 1.12 | | +| `RunAsGroup` | `true` | Beta | 1.14 | | +| `RuntimeClass` | `true` | Beta | 1.14 | | +| `SCTPSupport` | `false` | Alpha | 1.12 | | +| `ServerSideApply` | `false` | Alpha | 1.14 | | +| `ServiceNodeExclusion` | `false` | Alpha | 1.8 | | +| `StorageObjectInUseProtection` | `true` | Beta | 1.10 | 1.10 | +| `StorageObjectInUseProtection` | `true` | GA | 1.11 | | +| `StreamingProxyRedirects` | `true` | Beta | 1.5 | | +| `SupportIPVSProxyMode` | `false` | Alpha | 1.8 | 1.8 | +| `SupportIPVSProxyMode` | `false` | Beta | 1.9 | 1.9 | +| `SupportIPVSProxyMode` | `true` | Beta | 1.10 | 1.10 | +| `SupportIPVSProxyMode` | `true` | GA | 1.11 | | +| `SupportPodPidsLimit` | `false` | Alpha | 1.10 | 1.13 | +| `SupportPodPidsLimit` | `true` | Beta | 1.14 | | +| `Sysctls` | `true` | Beta | 1.11 | | +| `TaintBasedEvictions` | `false` | Alpha | 1.6 | 1.12 | +| `TaintBasedEvictions` | `true` | Beta | 1.13 | | +| `TaintNodesByCondition` | `false` | Alpha | 1.8 | 1.11 | +| `TaintNodesByCondition` | `true` | Beta | 1.12 | | +| `TokenRequest` | `false` | Alpha | 1.10 | 1.11 | +| `TokenRequest` | `true` | Beta | 1.12 | | +| `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 | +| `TokenRequestProjection` | `true` | Beta | 1.12 | | +| `TTLAfterFinished` | `false` | Alpha | 1.12 | | +| `VolumeScheduling` | `false` | Alpha | 1.9 | 1.9 | +| `VolumeScheduling` | `true` | Beta | 1.10 | 1.12 | +| `VolumeScheduling` | `true` | GA | 1.13 | | +| `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.14 | 1.14 | +| `VolumeSubpathEnvExpansion` | `true` | Beta | 1.15 | | +| `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | - | +| `ScheduleDaemonSetPods` | `false` | Alpha | 1.11 | 1.11 | +| `ScheduleDaemonSetPods` | `true` | Beta | 1.12 | | +| `WindowsGMSA` | `false` | Alpha | 1.14 | | + +## Using a Feature + +### Feature Stages + +A feature can be in *Alpha*, *Beta* or *GA* stage. +An *Alpha* feature means: + +* Disabled by default. +* Might be buggy. Enabling the feature may expose bugs. +* Support for feature may be dropped at any time without notice. +* The API may change in incompatible ways in a later software release without notice. +* Recommended for use only in short-lived testing clusters, due to increased + risk of bugs and lack of long-term support. + +A *Beta* feature means: + +* Enabled by default. +* The feature is well tested. Enabling the feature is considered safe. +* Support for the overall feature will not be dropped, though details may change. +* The schema and/or semantics of objects may change in incompatible ways in a + subsequent beta or stable release. When this happens, we will provide instructions + for migrating to the next version. This may require deleting, editing, and + re-creating API objects. The editing process may require some thought. + This may require downtime for applications that rely on the feature. +* Recommended for only non-business-critical uses because of potential for + incompatible changes in subsequent releases. If you have multiple clusters + that can be upgraded independently, you may be able to relax this restriction. + +{{< note >}} +Please do try *Beta* features and give feedback on them! +After they exit beta, it may not be practical for us to make more changes. +{{< /note >}} + +A *GA* feature is also referred to as a *stable* feature. It means: + +* The corresponding feature gate is no longer needed. +* Stable versions of features will appear in released software for many subsequent versions. + +### Feature Gates + +Each feature gate is designed for enabling/disabling a specific feature: + +- `Accelerators`: Enable Nvidia GPU support when using Docker +- `AdvancedAuditing`: Enable [advanced auditing](/docs/tasks/debug-application-cluster/audit/#advanced-audit) +- `AffinityInAnnotations`(*deprecated*): Enable setting [Pod affinity or anti-affinitys](/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). +- `AllowExtTrafficLocalEndpoints`: Enable a service to route external requests to node local endpoints. +- `APIListChunking`: Enable the API clients to retrieve (`LIST` or `GET`) resources from API server in chunks. +- `APIResponseCompression`: Compress the API responses for `LIST` or `GET` requests. +- `AppArmor`: Enable AppArmor based mandatory access control on Linux nodes when using Docker. + See [AppArmor Tutorial](/docs/tutorials/clusters/apparmor/) for more details. +- `AttachVolumeLimit`: Enable volume plugins to report limits on number of volumes + that can be attached to a node. + See [dynamic volume limits](/docs/concepts/storage/storage-limits/#dynamic-volume-limits) for more details. +- `BlockVolume`: Enable the definition and consumption of raw block devices in Pods. + See [Raw Block Volume Support](/docs/concepts/storage/persistent-volumes/#raw-block-volume-support) + for more details. +- `BoundServiceAccountTokenVolume`: Migrate ServiceAccount volumes to use a projected volume consisting of a + ServiceAccountTokenVolumeProjection. + Check [Service Account Token Volumes](https://git.k8s.io/community/contributors/design-proposals/storage/svcacct-token-volume-source.md) + for more details. +- `CPUManager`: Enable container level CPU affinity support, see [CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/). +- `CRIContainerLogRotation`: Enable container log rotation for cri container runtime. +- `CSIBlockVolume`: Enable external CSI volume drivers to support block storage. See the [`csi` raw block volume support](/docs/concepts/storage/volumes/#csi-raw-block-volume-support) documentation for more details. +- `CSIDriverRegistry`: Enable all logic related to the CSIDriver API object in csi.storage.k8s.io. +- `CSIMigration`: Enables shims and translation logic to route volume operations from in-tree plugins to corresponding pre-installed CSI plugins +- `CSIMigrationAWS`: Enables shims and translation logic to route volume operations from the AWS-EBS in-tree plugin to EBS CSI plugin +- `CSIMigrationAzureDisk`: Enables shims and translation logic to route volume operations from the Azure-Disk in-tree plugin to Azure Disk CSI plugin +- `CSIMigrationAzureFile`: Enables shims and translation logic to route volume operations from the Azure-File in-tree plugin to Azure File CSI plugin +- `CSIMigrationGCE`: Enables shims and translation logic to route volume operations from the GCE-PD in-tree plugin to PD CSI plugin +- `CSIMigrationOpenStack`: Enables shims and translation logic to route volume operations from the Cinder in-tree plugin to Cinder CSI plugin +- `CSINodeInfo`: Enable all logic related to the CSINodeInfo API object in csi.storage.k8s.io. +- `CSIPersistentVolume`: Enable discovering and mounting volumes provisioned through a + [CSI (Container Storage Interface)](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md) + compatible volume plugin. + Check the [`csi` volume type](/docs/concepts/storage/volumes/#csi) documentation for more details. +- `CustomCPUCFSQuotaPeriod`: Enable nodes to change CPUCFSQuotaPeriod. +- `CustomPodDNS`: Enable customizing the DNS settings for a Pod using its `dnsConfig` property. + Check [Pod's DNS Config](/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) + for more details. +- `CustomResourceSubresources`: Enable `/status` and `/scale` subresources + on resources created from [CustomResourceDefinition](/docs/concepts/api-extension/custom-resources/). +- `CustomResourceValidation`: Enable schema based validation on resources created from + [CustomResourceDefinition](/docs/concepts/api-extension/custom-resources/). +- `CustomResourceWebhookConversion`: Enable webhook-based conversion + on resources created from [CustomResourceDefinition](/docs/concepts/api-extension/custom-resources/). +- `DebugContainers`: Enable running a "debugging" container in a Pod's namespace to + troubleshoot a running Pod. +- `DevicePlugins`: Enable the [device-plugins](/docs/concepts/cluster-administration/device-plugins/) + based resource provisioning on nodes. +- `DryRun`: Enable server-side [dry run](/docs/reference/using-api/api-concepts/#dry-run) requests. +- `DynamicAuditing`: Enable [dynamic auditing](/docs/tasks/debug-application-cluster/audit/#dynamic-backend) +- `DynamicKubeletConfig`: Enable the dynamic configuration of kubelet. See [Reconfigure kubelet](/docs/tasks/administer-cluster/reconfigure-kubelet/). +- `DynamicProvisioningScheduling`: Extend the default scheduler to be aware of volume topology and handle PV provisioning. + This feature is superceded by the `VolumeScheduling` feature completely in v1.12. +- `DynamicVolumeProvisioning`(*deprecated*): Enable the [dynamic provisioning](/docs/concepts/storage/dynamic-provisioning/) of persistent volumes to Pods. +- `EnableEquivalenceClassCache`: Enable the scheduler to cache equivalence of nodes when scheduling Pods. +- `ExpandInUsePersistentVolumes`: Enable expanding in-use PVCs. See [Resizing an in-use PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim). +- `ExpandPersistentVolumes`: Enable the expanding of persistent volumes. See [Expanding Persistent Volumes Claims](/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims). +- `ExperimentalCriticalPodAnnotation`: Enable annotating specific pods as *critical* so that their [scheduling is guaranteed](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/). +- `ExperimentalHostUserNamespaceDefaultingGate`: Enabling the defaulting user + namespace to host. This is for containers that are using other host namespaces, + host mounts, or containers that are privileged or using specific non-namespaced + capabilities (e.g. `MKNODE`, `SYS_MODULE` etc.). This should only be enabled + if user namespace remapping is enabled in the Docker daemon. +- `GCERegionalPersistentDisk`: Enable the regional PD feature on GCE. +- `HugePages`: Enable the allocation and consumption of pre-allocated [huge pages](/docs/tasks/manage-hugepages/scheduling-hugepages/). +- `HyperVContainer`: Enable [Hyper-V isolation](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container) for Windows containers. +- `KubeletConfigFile`: Enable loading kubelet configuration from a file specified using a config file. + See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) for more details. +- `KubeletPluginsWatcher`: Enable probe-based plugin watcher utility to enable kubelet + to discover plugins such as [CSI volume drivers](/docs/concepts/storage/volumes/#csi). +- `KubeletPodResources`: Enable the kubelet's pod resources grpc endpoint. + See [Support Device Monitoring](https://git.k8s.io/community/keps/sig-node/compute-device-assignment.md) for more details. +- `LocalStorageCapacityIsolation`: Enable the consumption of [local ephemeral storage](/docs/concepts/configuration/manage-compute-resources-container/) and also the `sizeLimit` property of an [emptyDir volume](/docs/concepts/storage/volumes/#emptydir). +- `LocalStorageCapacityIsolationFSQuotaMonitoring`: When `LocalStorageCapacityIsolation` is enabled for [local ephemeral storage](/docs/concepts/configuration/manage-compute-resources-container/) and the backing filesystem for [emptyDir volumes](/docs/concepts/storage/volumes/#emptydir) supports project quotas and they are enabled, use project quotas to monitor [emptyDir volume](/docs/concepts/storage/volumes/#emptydir) storage consumption rather than filesystem walk for better performance and accuracy. +- `MountContainers`: Enable using utility containers on host as the volume mounter. +- `MountPropagation`: Enable sharing volume mounted by one container to other containers or pods. + For more details, please see [mount propagation](/docs/concepts/storage/volumes/#mount-propagation). +- `NodeLease`: Enable the new Lease API to report node heartbeats, which could be used as a node health signal. +- `PersistentLocalVolumes`: Enable the usage of `local` volume type in Pods. + Pod affinity has to be specified if requesting a `local` volume. +- `PodPriority`: Enable the descheduling and preemption of Pods based on their [priorities](/docs/concepts/configuration/pod-priority-preemption/). +- `PodReadinessGates`: Enable the setting of `PodReadinessGate` field for extending + Pod readiness evaluation. + For more details, please see [Pod readiness gate](/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate). +- `ProcMountType`: Enables control over ProcMountType for containers. +- `PVCProtection`: Enable the prevention of a PersistentVolumeClaim (PVC) from + being deleted when it is still used by any Pod. + More details can be found [here](/docs/tasks/administer-cluster/storage-object-in-use-protection/). +- `ResourceLimitsPriorityFunction`: Enable a scheduler priority function that + assigns a lowest possible score of 1 to a node that satisfies at least one of + the input Pod's cpu and memory limits. The intent is to break ties between + nodes with same scores. +- `ResourceQuotaScopeSelectors`: Enable resource quota scope selectors. +- `RotateKubeletClientCertificate`: Enable the rotation of the client TLS certificate on the kubelet. + See [kubelet configuration](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration) for more details. +- `RotateKubeletServerCertificate`: Enable the rotation of the server TLS certificate on the kubelet. + See [kubelet configuration](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration) for more details. +- `RunAsGroup`: Enable control over the primary group ID set on the init processes of containers. +- `RuntimeClass`: Enable the [RuntimeClass](/docs/concepts/containers/runtime-class/) feature for selecting container runtime configurations. +- `ScheduleDaemonSetPods`: Enable DaemonSet Pods to be scheduled by the default scheduler instead of the DaemonSet controller. +- `SCTPSupport`: Enables the usage of SCTP as `protocol` value in `Service`, `Endpoint`, `NetworkPolicy` and `Pod` definitions +- `ServerSideApply`: Enables the [Sever Side Apply (SSA)](/docs/reference/using-api/api-concepts/#server-side-apply) path at the API Server. +- `ServiceNodeExclusion`: Enable the exclusion of nodes from load balancers created by a cloud provider. + A node is eligible for exclusion if annotated with "`alpha.service-controller.kubernetes.io/exclude-balancer`" key. +- `StorageObjectInUseProtection`: Postpone the deletion of PersistentVolume or + PersistentVolumeClaim objects if they are still being used. +- `StreamingProxyRedirects`: Instructs the API server to intercept (and follow) + redirects from the backend (kubelet) for streaming requests. + Examples of streaming requests include the `exec`, `attach` and `port-forward` requests. +- `SupportIPVSProxyMode`: Enable providing in-cluster service load balancing using IPVS. + See [service proxies](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) for more details. +- `SupportPodPidsLimit`: Enable the support to limiting PIDs in Pods. +- `Sysctls`: Enable support for namespaced kernel parameters (sysctls) that can be set for each pod. + See [sysctls](/docs/tasks/administer-cluster/sysctl-cluster/) for more details. +- `TaintBasedEvictions`: Enable evicting pods from nodes based on taints on nodes and tolerations on Pods. + See [taints and tolerations](/docs/concepts/configuration/taint-and-toleration/) for more details. +- `TaintNodesByCondition`: Enable automatic tainting nodes based on [node conditions](/docs/concepts/architecture/nodes/#condition). +- `TokenRequest`: Enable the `TokenRequest` endpoint on service account resources. +- `TokenRequestProjection`: Enable the injection of service account tokens into + a Pod through the [`projected` volume](/docs/concepts/storage/volumes/#projected). +- `TTLAfterFinished`: Allow a [TTL controller](/docs/concepts/workloads/controllers/ttlafterfinished/) to clean up resources after they finish execution. +- `VolumeScheduling`: Enable volume topology aware scheduling and make the + PersistentVolumeClaim (PVC) binding aware of scheduling decisions. It also + enables the usage of [`local`](/docs/concepts/storage/volumes/#local) volume + type when used together with the `PersistentLocalVolumes` feature gate. +- `VolumeSnapshotDataSource`: Enable volume snapshot data source support. +- `VolumeSubpathEnvExpansion`: Enable `subPathExpr` field for expanding environment variables into a `subPath`. +- `WindowsGMSA`: Enables passing of GMSA credential specs from pods to container runtimes. + +{{% /capture %}} diff --git a/content/en/docs/reference/command-line-tools-reference/federation-apiserver_BACKUP_76806.md b/content/en/docs/reference/command-line-tools-reference/federation-apiserver_BACKUP_76806.md new file mode 100644 index 0000000000000..46222dedee6c5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/federation-apiserver_BACKUP_76806.md @@ -0,0 +1,154 @@ +--- +title: federation-apiserver +notitle: true +--- + +## federation-apiserver + + + +### Synopsis + + +The Kubernetes federation API server validates and configures data +for the api objects which include pods, services, replicationcontrollers, and +others. The API Server services REST operations and provides the frontend to the +cluster's shared state through which all other components interact. + +``` +federation-apiserver [flags] +``` + +### Options + +``` + --admission-control-config-file string File with admission control configuration. + --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. + --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) + --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Requires the 'AdvancedAuditing' feature gate. Known formats are legacy,json. (default "json") + --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. + --audit-log-maxbackup int The maximum number of old audit log files to retain. + --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. + --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-policy-file string Path to the file that defines the audit policy configuration. Requires the 'AdvancedAuditing' feature gate. With AdvancedAuditing, a profile is required to enable auditing. + --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and sending to the webhook. Only used in batch mode. (default 10000) + --audit-webhook-batch-initial-backoff duration The amount of time to wait before retrying the first failed requests. Only used in batch mode. (default 10s) + --audit-webhook-batch-max-size int The maximum size of a batch sent to the webhook. Only used in batch mode. (default 400) + --audit-webhook-batch-max-wait duration The amount of time to wait before force sending the batch that hadn't reached the max size. Only used in batch mode. (default 30s) + --audit-webhook-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. (default 15) + --audit-webhook-batch-throttle-qps float32 Maximum average number of requests per second. Only used in batch mode. (default 10) + --audit-webhook-config-file string Path to a kubeconfig formatted file that defines the audit webhook configuration. Requires the 'AdvancedAuditing' feature gate. + --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the webhook to buffer and send events asynchronously. Known modes are batch,blocking. (default "batch") + --authentication-token-webhook-cache-ttl duration The duration to cache responses from the webhook token authenticator. (default 2m0s) + --authentication-token-webhook-config-file string File with webhook configuration for token authentication in kubeconfig format. The API server will query the remote service to determine authentication for bearer tokens. + --authorization-mode string Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. (default "AlwaysAllow") + --authorization-policy-file string File with authorization policy in csv format, used with --authorization-mode=ABAC, on the secure port. + --authorization-webhook-cache-authorized-ttl duration The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s) + --authorization-webhook-cache-unauthorized-ttl duration The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s) + --authorization-webhook-config-file string File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. + --basic-auth-file string If set, the file that will be used to admit requests to the secure port of the API server via http basic authentication. + --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0). (default 0.0.0.0) + --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "/var/run/kubernetes") + --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. + --contention-profiling Enable lock contention profiling, if profiling is enabled + --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. + --default-watch-cache-size int Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set. (default 100) + --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) + --deserialization-cache-size int Number of deserialized json objects to cache in memory. + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list. Comma-delimited list of admission plugins: Initializers, MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones. Comma-delimited list of admission plugins: Initializers, MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-bootstrap-token-auth Enable to allow secrets of type 'bootstrap.kubernetes.io/token' in the 'kube-system' namespace to be used for TLS bootstrapping authentication. + --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) + --enable-swagger-ui Enables swagger ui on the apiserver at /swagger-ui + --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. + --etcd-certfile string SSL certification file used to secure etcd communication. + --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) + --etcd-keyfile string SSL key file used to secure etcd communication. + --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry") + --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. + --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are http://ip:port, semicolon separated. + --event-ttl duration Amount of time to retain events. (default 1h0m0s) + --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd + --experimental-keystone-ca-file string If set, the Keystone server's certificate will be verified by one of the authorities in the experimental-keystone-ca-file, otherwise the host's root CA set will be used. + --experimental-keystone-url string If passed, activates the keystone authentication plugin. + --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs). + --feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: +APIListChunking=true|false (BETA - default=true) +APIResponseCompression=true|false (ALPHA - default=false) +Accelerators=true|false (ALPHA - default=false) +AdvancedAuditing=true|false (BETA - default=true) +AllAlpha=true|false (ALPHA - default=false) +AppArmor=true|false (BETA - default=true) +BlockVolume=true|false (ALPHA - default=false) +CPUManager=true|false (BETA - default=true) +CSIPersistentVolume=true|false (ALPHA - default=false) +CustomPodDNS=true|false (ALPHA - default=false) +CustomResourceValidation=true|false (BETA - default=true) +DebugContainers=true|false (ALPHA - default=false) +DevicePlugins=true|false (ALPHA - default=false) +DynamicKubeletConfig=true|false (ALPHA - default=false) +EnableEquivalenceClassCache=true|false (ALPHA - default=false) +ExpandPersistentVolumes=true|false (ALPHA - default=false) +ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false) +ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) +HugePages=true|false (BETA - default=true) +HyperVContainer=true|false (ALPHA - default=false) +Initializers=true|false (ALPHA - default=false) +LocalStorageCapacityIsolation=true|false (ALPHA - default=false) +LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false) +MountContainers=true|false (ALPHA - default=false) +MountPropagation=true|false (ALPHA - default=false) +PVCProtection=true|false (ALPHA - default=false) +PersistentLocalVolumes=true|false (ALPHA - default=false) +PodPriority=true|false (ALPHA - default=false) +PodShareProcessNamespace=true|false (ALPHA - default=false) +ResourceLimitsPriorityFunction=true|false (ALPHA - default=false) +RotateKubeletClientCertificate=true|false (BETA - default=true) +RotateKubeletServerCertificate=true|false (ALPHA - default=false) +ServiceNodeExclusion=true|false (ALPHA - default=false) +ServiceProxyAllowExternalIPs=true|false (DEPRECATED - default=false) +StreamingProxyRedirects=true|false (BETA - default=true) +SupportIPVSProxyMode=true|false (BETA - default=false) +SupportPodPidsLimit=true|false (ALPHA - default=false) +TaintBasedEvictions=true|false (ALPHA - default=false) +TaintNodesByCondition=true|false (ALPHA - default=false) +VolumeScheduling=true|false (ALPHA - default=false) + -h, --help help for federation-apiserver + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + --master-service-namespace string DEPRECATED: the namespace from which the kubernetes master services should be injected into pods. (default "default") + --max-mutating-requests-inflight int The maximum number of mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. (default 200) + --max-requests-inflight int The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. (default 400) + --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) + --oidc-ca-file string If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + --oidc-client-id string The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set. + --oidc-groups-claim string If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details. + --oidc-groups-prefix string If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + --oidc-issuer-url string The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + --oidc-username-claim string The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub") + --oidc-username-prefix string If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --request-timeout duration An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. (default 1m0s) + --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers + --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. + --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. + --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. + --runtime-config mapStringString A set of key=value pairs that describe runtime configuration that may be passed to apiserver. / (or for the core group) key can be used to turn on/off specific api versions. api/all is special key to control all api versions, be careful setting it false, unless you know what you do. api/legacy is deprecated, we will remove it in the future, so stop using it. + --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 6443) + --service-account-key-file stringArray File containing PEM-encoded x509 RSA or ECDSA private or public keys, used to verify ServiceAccount tokens. If unspecified, --tls-private-key-file is used. The specified file can contain multiple keys, and the flag can be specified multiple times with different files. + --service-account-lookup If true, validate ServiceAccount tokens exist in etcd as part of authentication. (default true) + --storage-backend string The storage backend for persistence. Options: 'etcd3' (default), 'etcd2'. + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. (default "application/vnd.kubernetes.protobuf") + --storage-versions string The per-group version to store resources in. Specified in the format "group1/version1,group2/version2,...". In the case where objects are moved from one group to the other, you may specify the format "group1=group2/v1beta1,group3/v1beta1,...". You only need to pass the groups you wish to change from the defaults. It defaults to a list of preferred versions of all registered groups, which is derived from the KUBE_API_VERSIONS environment variable. (default "admissionregistration.k8s.io/v1beta1,apps/v1beta1,authentication.k8s.io/v1,authorization.k8s.io/v1,autoscaling/v1,batch/v1,certificates.k8s.io/v1beta1,componentconfig/v1alpha1,events.k8s.io/v1beta1,extensions/v1beta1,federation/v1beta1,imagepolicy.k8s.io/v1alpha1,networking.k8s.io/v1,policy/v1beta1,rbac.authorization.k8s.io/v1,scheduling.k8s.io/v1alpha1,settings.k8s.io/v1alpha1,storage.k8s.io/v1,v1") + --target-ram-mb int Memory limit for apiserver in MB (used to configure sizes of caches, etc.) + --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used + --tls-min-version string Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants. + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. + --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) + --token-auth-file string If set, the file that will be used to secure the secure port of the API server via token authentication. + --watch-cache Enable watch caching in the apiserver (default true) + --watch-cache-sizes strings List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. The individual override format: resource[.group]#size, where resource is lowercase plural (no version), group is optional, and size is a number. It takes effect when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) have system defaults set by heuristics, others default to default-watch-cache-size +``` + +###### Auto generated by spf13/cobra on 1-Dec-2018 diff --git a/content/en/docs/reference/command-line-tools-reference/federation-apiserver_BASE_76806.md b/content/en/docs/reference/command-line-tools-reference/federation-apiserver_BASE_76806.md new file mode 100644 index 0000000000000..c219528332a90 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/federation-apiserver_BASE_76806.md @@ -0,0 +1,153 @@ +--- +title: federation-apiserver +notitle: true +--- + +## federation-apiserver + + + +### Synopsis + + +The Kubernetes federation API server validates and configures data +for the api objects which include pods, services, replicationcontrollers, and +others. The API Server services REST operations and provides the frontend to the +cluster's shared state through which all other components interact. + +``` +federation-apiserver [flags] +``` + +### Options + +``` + --admission-control-config-file string File with admission control configuration. + --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. + --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) + --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Requires the 'AdvancedAuditing' feature gate. Known formats are legacy,json. (default "json") + --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. + --audit-log-maxbackup int The maximum number of old audit log files to retain. + --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. + --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-policy-file string Path to the file that defines the audit policy configuration. Requires the 'AdvancedAuditing' feature gate. With AdvancedAuditing, a profile is required to enable auditing. + --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and sending to the webhook. Only used in batch mode. (default 10000) + --audit-webhook-batch-initial-backoff duration The amount of time to wait before retrying the first failed requests. Only used in batch mode. (default 10s) + --audit-webhook-batch-max-size int The maximum size of a batch sent to the webhook. Only used in batch mode. (default 400) + --audit-webhook-batch-max-wait duration The amount of time to wait before force sending the batch that hadn't reached the max size. Only used in batch mode. (default 30s) + --audit-webhook-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. (default 15) + --audit-webhook-batch-throttle-qps float32 Maximum average number of requests per second. Only used in batch mode. (default 10) + --audit-webhook-config-file string Path to a kubeconfig formatted file that defines the audit webhook configuration. Requires the 'AdvancedAuditing' feature gate. + --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the webhook to buffer and send events asynchronously. Known modes are batch,blocking. (default "batch") + --authentication-token-webhook-cache-ttl duration The duration to cache responses from the webhook token authenticator. (default 2m0s) + --authentication-token-webhook-config-file string File with webhook configuration for token authentication in kubeconfig format. The API server will query the remote service to determine authentication for bearer tokens. + --authorization-mode string Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. (default "AlwaysAllow") + --authorization-policy-file string File with authorization policy in csv format, used with --authorization-mode=ABAC, on the secure port. + --authorization-webhook-cache-authorized-ttl duration The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s) + --authorization-webhook-cache-unauthorized-ttl duration The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s) + --authorization-webhook-config-file string File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. + --basic-auth-file string If set, the file that will be used to admit requests to the secure port of the API server via http basic authentication. + --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0). (default 0.0.0.0) + --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "/var/run/kubernetes") + --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. + --contention-profiling Enable lock contention profiling, if profiling is enabled + --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. + --default-watch-cache-size int Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set. (default 100) + --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) + --deserialization-cache-size int Number of deserialized json objects to cache in memory. + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list. Comma-delimited list of admission plugins: Initializers, MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones. Comma-delimited list of admission plugins: Initializers, MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-bootstrap-token-auth Enable to allow secrets of type 'bootstrap.kubernetes.io/token' in the 'kube-system' namespace to be used for TLS bootstrapping authentication. + --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) + --enable-swagger-ui Enables swagger ui on the apiserver at /swagger-ui + --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. + --etcd-certfile string SSL certification file used to secure etcd communication. + --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) + --etcd-keyfile string SSL key file used to secure etcd communication. + --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry") + --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. + --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are http://ip:port, semicolon separated. + --event-ttl duration Amount of time to retain events. (default 1h0m0s) + --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd + --experimental-keystone-ca-file string If set, the Keystone server's certificate will be verified by one of the authorities in the experimental-keystone-ca-file, otherwise the host's root CA set will be used. + --experimental-keystone-url string If passed, activates the keystone authentication plugin. + --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs). + --feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: +APIListChunking=true|false (BETA - default=true) +APIResponseCompression=true|false (ALPHA - default=false) +Accelerators=true|false (ALPHA - default=false) +AdvancedAuditing=true|false (BETA - default=true) +AllAlpha=true|false (ALPHA - default=false) +AppArmor=true|false (BETA - default=true) +BlockVolume=true|false (ALPHA - default=false) +CPUManager=true|false (BETA - default=true) +CSIPersistentVolume=true|false (ALPHA - default=false) +CustomPodDNS=true|false (ALPHA - default=false) +CustomResourceValidation=true|false (BETA - default=true) +DebugContainers=true|false (ALPHA - default=false) +DevicePlugins=true|false (ALPHA - default=false) +DynamicKubeletConfig=true|false (ALPHA - default=false) +EnableEquivalenceClassCache=true|false (ALPHA - default=false) +ExpandPersistentVolumes=true|false (ALPHA - default=false) +ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false) +ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) +HugePages=true|false (BETA - default=true) +HyperVContainer=true|false (ALPHA - default=false) +Initializers=true|false (ALPHA - default=false) +LocalStorageCapacityIsolation=true|false (ALPHA - default=false) +MountContainers=true|false (ALPHA - default=false) +MountPropagation=true|false (ALPHA - default=false) +PVCProtection=true|false (ALPHA - default=false) +PersistentLocalVolumes=true|false (ALPHA - default=false) +PodPriority=true|false (ALPHA - default=false) +PodShareProcessNamespace=true|false (ALPHA - default=false) +ResourceLimitsPriorityFunction=true|false (ALPHA - default=false) +RotateKubeletClientCertificate=true|false (BETA - default=true) +RotateKubeletServerCertificate=true|false (ALPHA - default=false) +ServiceNodeExclusion=true|false (ALPHA - default=false) +ServiceProxyAllowExternalIPs=true|false (DEPRECATED - default=false) +StreamingProxyRedirects=true|false (BETA - default=true) +SupportIPVSProxyMode=true|false (BETA - default=false) +SupportPodPidsLimit=true|false (ALPHA - default=false) +TaintBasedEvictions=true|false (ALPHA - default=false) +TaintNodesByCondition=true|false (ALPHA - default=false) +VolumeScheduling=true|false (ALPHA - default=false) + -h, --help help for federation-apiserver + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + --master-service-namespace string DEPRECATED: the namespace from which the kubernetes master services should be injected into pods. (default "default") + --max-mutating-requests-inflight int The maximum number of mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. (default 200) + --max-requests-inflight int The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. (default 400) + --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) + --oidc-ca-file string If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + --oidc-client-id string The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set. + --oidc-groups-claim string If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details. + --oidc-groups-prefix string If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + --oidc-issuer-url string The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + --oidc-username-claim string The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub") + --oidc-username-prefix string If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --request-timeout duration An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. (default 1m0s) + --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers + --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. + --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. + --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. + --runtime-config mapStringString A set of key=value pairs that describe runtime configuration that may be passed to apiserver. / (or for the core group) key can be used to turn on/off specific api versions. api/all is special key to control all api versions, be careful setting it false, unless you know what you do. api/legacy is deprecated, we will remove it in the future, so stop using it. + --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 6443) + --service-account-key-file stringArray File containing PEM-encoded x509 RSA or ECDSA private or public keys, used to verify ServiceAccount tokens. If unspecified, --tls-private-key-file is used. The specified file can contain multiple keys, and the flag can be specified multiple times with different files. + --service-account-lookup If true, validate ServiceAccount tokens exist in etcd as part of authentication. (default true) + --storage-backend string The storage backend for persistence. Options: 'etcd3' (default), 'etcd2'. + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. (default "application/vnd.kubernetes.protobuf") + --storage-versions string The per-group version to store resources in. Specified in the format "group1/version1,group2/version2,...". In the case where objects are moved from one group to the other, you may specify the format "group1=group2/v1beta1,group3/v1beta1,...". You only need to pass the groups you wish to change from the defaults. It defaults to a list of preferred versions of all registered groups, which is derived from the KUBE_API_VERSIONS environment variable. (default "admissionregistration.k8s.io/v1beta1,apps/v1beta1,authentication.k8s.io/v1,authorization.k8s.io/v1,autoscaling/v1,batch/v1,certificates.k8s.io/v1beta1,componentconfig/v1alpha1,events.k8s.io/v1beta1,extensions/v1beta1,federation/v1beta1,imagepolicy.k8s.io/v1alpha1,networking.k8s.io/v1,policy/v1beta1,rbac.authorization.k8s.io/v1,scheduling.k8s.io/v1alpha1,settings.k8s.io/v1alpha1,storage.k8s.io/v1,v1") + --target-ram-mb int Memory limit for apiserver in MB (used to configure sizes of caches, etc.) + --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used + --tls-min-version string Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants. + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. + --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) + --token-auth-file string If set, the file that will be used to secure the secure port of the API server via token authentication. + --watch-cache Enable watch caching in the apiserver (default true) + --watch-cache-sizes strings List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. The individual override format: resource[.group]#size, where resource is lowercase plural (no version), group is optional, and size is a number. It takes effect when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) have system defaults set by heuristics, others default to default-watch-cache-size +``` + +###### Auto generated by spf13/cobra on 1-Dec-2018 diff --git a/content/en/docs/reference/command-line-tools-reference/federation-apiserver_LOCAL_76806.md b/content/en/docs/reference/command-line-tools-reference/federation-apiserver_LOCAL_76806.md new file mode 100644 index 0000000000000..46222dedee6c5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/federation-apiserver_LOCAL_76806.md @@ -0,0 +1,154 @@ +--- +title: federation-apiserver +notitle: true +--- + +## federation-apiserver + + + +### Synopsis + + +The Kubernetes federation API server validates and configures data +for the api objects which include pods, services, replicationcontrollers, and +others. The API Server services REST operations and provides the frontend to the +cluster's shared state through which all other components interact. + +``` +federation-apiserver [flags] +``` + +### Options + +``` + --admission-control-config-file string File with admission control configuration. + --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. + --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) + --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Requires the 'AdvancedAuditing' feature gate. Known formats are legacy,json. (default "json") + --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. + --audit-log-maxbackup int The maximum number of old audit log files to retain. + --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. + --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-policy-file string Path to the file that defines the audit policy configuration. Requires the 'AdvancedAuditing' feature gate. With AdvancedAuditing, a profile is required to enable auditing. + --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and sending to the webhook. Only used in batch mode. (default 10000) + --audit-webhook-batch-initial-backoff duration The amount of time to wait before retrying the first failed requests. Only used in batch mode. (default 10s) + --audit-webhook-batch-max-size int The maximum size of a batch sent to the webhook. Only used in batch mode. (default 400) + --audit-webhook-batch-max-wait duration The amount of time to wait before force sending the batch that hadn't reached the max size. Only used in batch mode. (default 30s) + --audit-webhook-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. (default 15) + --audit-webhook-batch-throttle-qps float32 Maximum average number of requests per second. Only used in batch mode. (default 10) + --audit-webhook-config-file string Path to a kubeconfig formatted file that defines the audit webhook configuration. Requires the 'AdvancedAuditing' feature gate. + --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the webhook to buffer and send events asynchronously. Known modes are batch,blocking. (default "batch") + --authentication-token-webhook-cache-ttl duration The duration to cache responses from the webhook token authenticator. (default 2m0s) + --authentication-token-webhook-config-file string File with webhook configuration for token authentication in kubeconfig format. The API server will query the remote service to determine authentication for bearer tokens. + --authorization-mode string Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. (default "AlwaysAllow") + --authorization-policy-file string File with authorization policy in csv format, used with --authorization-mode=ABAC, on the secure port. + --authorization-webhook-cache-authorized-ttl duration The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s) + --authorization-webhook-cache-unauthorized-ttl duration The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s) + --authorization-webhook-config-file string File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. + --basic-auth-file string If set, the file that will be used to admit requests to the secure port of the API server via http basic authentication. + --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0). (default 0.0.0.0) + --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "/var/run/kubernetes") + --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. + --contention-profiling Enable lock contention profiling, if profiling is enabled + --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. + --default-watch-cache-size int Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set. (default 100) + --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) + --deserialization-cache-size int Number of deserialized json objects to cache in memory. + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list. Comma-delimited list of admission plugins: Initializers, MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones. Comma-delimited list of admission plugins: Initializers, MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-bootstrap-token-auth Enable to allow secrets of type 'bootstrap.kubernetes.io/token' in the 'kube-system' namespace to be used for TLS bootstrapping authentication. + --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) + --enable-swagger-ui Enables swagger ui on the apiserver at /swagger-ui + --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. + --etcd-certfile string SSL certification file used to secure etcd communication. + --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) + --etcd-keyfile string SSL key file used to secure etcd communication. + --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry") + --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. + --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are http://ip:port, semicolon separated. + --event-ttl duration Amount of time to retain events. (default 1h0m0s) + --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd + --experimental-keystone-ca-file string If set, the Keystone server's certificate will be verified by one of the authorities in the experimental-keystone-ca-file, otherwise the host's root CA set will be used. + --experimental-keystone-url string If passed, activates the keystone authentication plugin. + --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs). + --feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: +APIListChunking=true|false (BETA - default=true) +APIResponseCompression=true|false (ALPHA - default=false) +Accelerators=true|false (ALPHA - default=false) +AdvancedAuditing=true|false (BETA - default=true) +AllAlpha=true|false (ALPHA - default=false) +AppArmor=true|false (BETA - default=true) +BlockVolume=true|false (ALPHA - default=false) +CPUManager=true|false (BETA - default=true) +CSIPersistentVolume=true|false (ALPHA - default=false) +CustomPodDNS=true|false (ALPHA - default=false) +CustomResourceValidation=true|false (BETA - default=true) +DebugContainers=true|false (ALPHA - default=false) +DevicePlugins=true|false (ALPHA - default=false) +DynamicKubeletConfig=true|false (ALPHA - default=false) +EnableEquivalenceClassCache=true|false (ALPHA - default=false) +ExpandPersistentVolumes=true|false (ALPHA - default=false) +ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false) +ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) +HugePages=true|false (BETA - default=true) +HyperVContainer=true|false (ALPHA - default=false) +Initializers=true|false (ALPHA - default=false) +LocalStorageCapacityIsolation=true|false (ALPHA - default=false) +LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false) +MountContainers=true|false (ALPHA - default=false) +MountPropagation=true|false (ALPHA - default=false) +PVCProtection=true|false (ALPHA - default=false) +PersistentLocalVolumes=true|false (ALPHA - default=false) +PodPriority=true|false (ALPHA - default=false) +PodShareProcessNamespace=true|false (ALPHA - default=false) +ResourceLimitsPriorityFunction=true|false (ALPHA - default=false) +RotateKubeletClientCertificate=true|false (BETA - default=true) +RotateKubeletServerCertificate=true|false (ALPHA - default=false) +ServiceNodeExclusion=true|false (ALPHA - default=false) +ServiceProxyAllowExternalIPs=true|false (DEPRECATED - default=false) +StreamingProxyRedirects=true|false (BETA - default=true) +SupportIPVSProxyMode=true|false (BETA - default=false) +SupportPodPidsLimit=true|false (ALPHA - default=false) +TaintBasedEvictions=true|false (ALPHA - default=false) +TaintNodesByCondition=true|false (ALPHA - default=false) +VolumeScheduling=true|false (ALPHA - default=false) + -h, --help help for federation-apiserver + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + --master-service-namespace string DEPRECATED: the namespace from which the kubernetes master services should be injected into pods. (default "default") + --max-mutating-requests-inflight int The maximum number of mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. (default 200) + --max-requests-inflight int The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. (default 400) + --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) + --oidc-ca-file string If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + --oidc-client-id string The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set. + --oidc-groups-claim string If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details. + --oidc-groups-prefix string If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + --oidc-issuer-url string The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + --oidc-username-claim string The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub") + --oidc-username-prefix string If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --request-timeout duration An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. (default 1m0s) + --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers + --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. + --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. + --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. + --runtime-config mapStringString A set of key=value pairs that describe runtime configuration that may be passed to apiserver. / (or for the core group) key can be used to turn on/off specific api versions. api/all is special key to control all api versions, be careful setting it false, unless you know what you do. api/legacy is deprecated, we will remove it in the future, so stop using it. + --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 6443) + --service-account-key-file stringArray File containing PEM-encoded x509 RSA or ECDSA private or public keys, used to verify ServiceAccount tokens. If unspecified, --tls-private-key-file is used. The specified file can contain multiple keys, and the flag can be specified multiple times with different files. + --service-account-lookup If true, validate ServiceAccount tokens exist in etcd as part of authentication. (default true) + --storage-backend string The storage backend for persistence. Options: 'etcd3' (default), 'etcd2'. + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. (default "application/vnd.kubernetes.protobuf") + --storage-versions string The per-group version to store resources in. Specified in the format "group1/version1,group2/version2,...". In the case where objects are moved from one group to the other, you may specify the format "group1=group2/v1beta1,group3/v1beta1,...". You only need to pass the groups you wish to change from the defaults. It defaults to a list of preferred versions of all registered groups, which is derived from the KUBE_API_VERSIONS environment variable. (default "admissionregistration.k8s.io/v1beta1,apps/v1beta1,authentication.k8s.io/v1,authorization.k8s.io/v1,autoscaling/v1,batch/v1,certificates.k8s.io/v1beta1,componentconfig/v1alpha1,events.k8s.io/v1beta1,extensions/v1beta1,federation/v1beta1,imagepolicy.k8s.io/v1alpha1,networking.k8s.io/v1,policy/v1beta1,rbac.authorization.k8s.io/v1,scheduling.k8s.io/v1alpha1,settings.k8s.io/v1alpha1,storage.k8s.io/v1,v1") + --target-ram-mb int Memory limit for apiserver in MB (used to configure sizes of caches, etc.) + --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used + --tls-min-version string Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants. + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. + --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) + --token-auth-file string If set, the file that will be used to secure the secure port of the API server via token authentication. + --watch-cache Enable watch caching in the apiserver (default true) + --watch-cache-sizes strings List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. The individual override format: resource[.group]#size, where resource is lowercase plural (no version), group is optional, and size is a number. It takes effect when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) have system defaults set by heuristics, others default to default-watch-cache-size +``` + +###### Auto generated by spf13/cobra on 1-Dec-2018 diff --git a/content/en/docs/reference/command-line-tools-reference/federation-apiserver_REMOTE_76806.md b/content/en/docs/reference/command-line-tools-reference/federation-apiserver_REMOTE_76806.md new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md index e314bfa6871bd..7629481ed5d03 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md @@ -570,7 +570,7 @@ kube-apiserver [flags] --feature-gates mapStringBool - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (BETA - default=true)
CSIDriverRegistry=true|false (BETA - default=true)
CSIInlineVolume=true|false (ALPHA - default=false)
CSIMigration=true|false (ALPHA - default=false)
CSIMigrationAWS=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (ALPHA - default=false)
CSINodeInfo=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomResourcePublishOpenAPI=true|false (ALPHA - default=false)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
ExpandCSIVolumes=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServerSideApply=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StorageVersionHash=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (ALPHA - default=false)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false)
WindowsGMSA=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (ALPHA - default=false)
CSIDriverRegistry=true|false (ALPHA - default=false)
CSINodeInfo=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomPodDNS=true|false (BETA - default=true)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (BETA - default=true)
HyperVContainer=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (BETA - default=true)
PodPriority=true|false (BETA - default=true)
PodReadinessGates=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (ALPHA - default=false)
RuntimeClass=true|false (ALPHA - default=false)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (ALPHA - default=false)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (ALPHA - default=false)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md.orig b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md.orig new file mode 100644 index 0000000000000..7629481ed5d03 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md.orig @@ -0,0 +1,1021 @@ +--- +title: kube-apiserver +notitle: true +--- +## kube-apiserver + + + +### Synopsis + + +The Kubernetes API server validates and configures data +for the api objects which include pods, services, replicationcontrollers, and +others. The API Server services REST operations and provides the frontend to the +cluster's shared state through which all other components interact. + +``` +kube-apiserver [flags] +``` + +### Options + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--admission-control-config-file string
File with admission control configuration.
--advertise-address ip
The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used.
--allow-privileged
If true, allow privileged containers. [default=false]
--alsologtostderr
log to standard error as well as files
--anonymous-auth     Default: true
Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated.
--api-audiences stringSlice
Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL .
--apiserver-count int     Default: 1
The number of apiservers running in the cluster, must be a positive number. (In use when --endpoint-reconciler-type=master-count is enabled.)
--audit-dynamic-configuration
Enables dynamic audit configuration. This feature also requires the DynamicAuditing feature flag
--audit-log-batch-buffer-size int     Default: 10000
The size of the buffer to store events before batching and writing. Only used in batch mode.
--audit-log-batch-max-size int     Default: 1
The maximum size of a batch. Only used in batch mode.
--audit-log-batch-max-wait duration
The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode.
--audit-log-batch-throttle-burst int
Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode.
--audit-log-batch-throttle-enable
Whether batching throttling is enabled. Only used in batch mode.
--audit-log-batch-throttle-qps float32
Maximum average number of batches per second. Only used in batch mode.
--audit-log-format string     Default: "json"
Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json.
--audit-log-maxage int
The maximum number of days to retain old audit log files based on the timestamp encoded in their filename.
--audit-log-maxbackup int
The maximum number of old audit log files to retain.
--audit-log-maxsize int
The maximum size in megabytes of the audit log file before it gets rotated.
--audit-log-mode string     Default: "blocking"
Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict.
--audit-log-path string
If set, all requests coming to the apiserver will be logged to this file. '-' means standard out.
--audit-log-truncate-enabled
Whether event and batch truncating is enabled.
--audit-log-truncate-max-batch-size int     Default: 10485760
Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size.
--audit-log-truncate-max-event-size int     Default: 102400
Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded.
--audit-log-version string     Default: "audit.k8s.io/v1"
API group and version used for serializing audit events written to log.
--audit-policy-file string
Path to the file that defines the audit policy configuration.
--audit-webhook-batch-buffer-size int     Default: 10000
The size of the buffer to store events before batching and writing. Only used in batch mode.
--audit-webhook-batch-max-size int     Default: 400
The maximum size of a batch. Only used in batch mode.
--audit-webhook-batch-max-wait duration     Default: 30s
The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode.
--audit-webhook-batch-throttle-burst int     Default: 15
Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode.
--audit-webhook-batch-throttle-enable     Default: true
Whether batching throttling is enabled. Only used in batch mode.
--audit-webhook-batch-throttle-qps float32     Default: 10
Maximum average number of batches per second. Only used in batch mode.
--audit-webhook-config-file string
Path to a kubeconfig formatted file that defines the audit webhook configuration.
--audit-webhook-initial-backoff duration     Default: 10s
The amount of time to wait before retrying the first failed request.
--audit-webhook-mode string     Default: "batch"
Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict.
--audit-webhook-truncate-enabled
Whether event and batch truncating is enabled.
--audit-webhook-truncate-max-batch-size int     Default: 10485760
Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size.
--audit-webhook-truncate-max-event-size int     Default: 102400
Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded.
--audit-webhook-version string     Default: "audit.k8s.io/v1"
API group and version used for serializing audit events written to webhook.
--authentication-token-webhook-cache-ttl duration     Default: 2m0s
The duration to cache responses from the webhook token authenticator.
--authentication-token-webhook-config-file string
File with webhook configuration for token authentication in kubeconfig format. The API server will query the remote service to determine authentication for bearer tokens.
--authorization-mode stringSlice     Default: [AlwaysAllow]
Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node.
--authorization-policy-file string
File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port.
--authorization-webhook-cache-authorized-ttl duration     Default: 5m0s
The duration to cache 'authorized' responses from the webhook authorizer.
--authorization-webhook-cache-unauthorized-ttl duration     Default: 30s
The duration to cache 'unauthorized' responses from the webhook authorizer.
--authorization-webhook-config-file string
File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port.
--azure-container-registry-config string
Path to the file containing Azure container registry configuration information.
--basic-auth-file string
If set, the file that will be used to admit requests to the secure port of the API server via http basic authentication.
--bind-address ip     Default: 0.0.0.0
The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces).
--cert-dir string     Default: "/var/run/kubernetes"
The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.
--client-ca-file string
If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.
--cloud-config string
The path to the cloud provider configuration file. Empty string for no configuration file.
--cloud-provider string
The provider for cloud services. Empty string for no provider.
--cloud-provider-gce-lb-src-cidrs cidrs     Default: 130.211.0.0/22,209.85.152.0/22,209.85.204.0/22,35.191.0.0/16
CIDRs opened in GCE firewall for LB traffic proxy & health checks
--contention-profiling
Enable lock contention profiling, if profiling is enabled
--cors-allowed-origins stringSlice
List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled.
--default-not-ready-toleration-seconds int     Default: 300
Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration.
--default-unreachable-toleration-seconds int     Default: 300
Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration.
--default-watch-cache-size int     Default: 100
Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.
--delete-collection-workers int     Default: 1
Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup.
--disable-admission-plugins stringSlice
admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, PersistentVolumeClaimResize, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.
--enable-admission-plugins stringSlice
admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, PersistentVolumeClaimResize, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.
--enable-aggregator-routing
Turns on aggregator routing requests to endpoints IP rather than cluster IP.
--enable-bootstrap-token-auth
Enable to allow secrets of type 'bootstrap.kubernetes.io/token' in the 'kube-system' namespace to be used for TLS bootstrapping authentication.
--enable-garbage-collector     Default: true
Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager.
--enable-logs-handler     Default: true
If true, install a /logs handler for the apiserver logs.
--encryption-provider-config string
The file containing configuration for encryption providers to be used for storing secrets in etcd
--endpoint-reconciler-type string     Default: "lease"
Use an endpoint reconciler (master-count, lease, none)
--etcd-cafile string
SSL Certificate Authority file used to secure etcd communication.
--etcd-certfile string
SSL certification file used to secure etcd communication.
--etcd-compaction-interval duration     Default: 5m0s
The interval of compaction requests. If 0, the compaction request from apiserver is disabled.
--etcd-count-metric-poll-period duration     Default: 1m0s
Frequency of polling etcd for number of resources per type. 0 disables the metric collection.
--etcd-keyfile string
SSL key file used to secure etcd communication.
--etcd-prefix string     Default: "/registry"
The prefix to prepend to all resource paths in etcd.
--etcd-servers stringSlice
List of etcd servers to connect with (scheme://ip:port), comma separated.
--etcd-servers-overrides stringSlice
Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated.
--event-ttl duration     Default: 1h0m0s
Amount of time to retain events.
--external-hostname string
The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs).
--feature-gates mapStringBool
A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (ALPHA - default=false)
CSIDriverRegistry=true|false (ALPHA - default=false)
CSINodeInfo=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomPodDNS=true|false (BETA - default=true)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (BETA - default=true)
HyperVContainer=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (BETA - default=true)
PodPriority=true|false (BETA - default=true)
PodReadinessGates=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (ALPHA - default=false)
RuntimeClass=true|false (ALPHA - default=false)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (ALPHA - default=false)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (ALPHA - default=false)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false)
-h, --help
help for kube-apiserver
--http2-max-streams-per-connection int
The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kubelet-certificate-authority string
Path to a cert file for the certificate authority.
--kubelet-client-certificate string
Path to a client cert file for TLS.
--kubelet-client-key string
Path to a client key file for TLS.
--kubelet-https     Default: true
Use https for kubelet connections.
--kubelet-preferred-address-types stringSlice     Default: [Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP]
List of the preferred NodeAddressTypes to use for kubelet connections.
--kubelet-read-only-port uint     Default: 10255
DEPRECATED: kubelet port.
--kubelet-timeout duration     Default: 5s
Timeout for kubelet operations.
--kubernetes-service-node-port int
If non-zero, the Kubernetes master service (which apiserver creates/maintains) will be of type NodePort, using this as the value of the port. If zero, the Kubernetes master service will be of type ClusterIP.
--log-backtrace-at traceLocation     Default: :0
when logging hits line file:N, emit a stack trace
--log-dir string
If non-empty, write log files in this directory
--log-file string
If non-empty, use this log file
--log-flush-frequency duration     Default: 5s
Maximum number of seconds between log flushes
--logtostderr     Default: true
log to standard error instead of files
--master-service-namespace string     Default: "default"
DEPRECATED: the namespace from which the kubernetes master services should be injected into pods.
--max-connection-bytes-per-sec int
If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests.
--max-mutating-requests-inflight int     Default: 200
The maximum number of mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit.
--max-requests-inflight int     Default: 400
The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit.
--min-request-timeout int     Default: 1800
An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load.
--oidc-ca-file string
If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.
--oidc-client-id string
The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.
--oidc-groups-claim string
If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details.
--oidc-groups-prefix string
If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.
--oidc-issuer-url string
The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).
--oidc-required-claim mapStringString
A key=value pair that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. Repeat this flag to specify multiple claims.
--oidc-signing-algs stringSlice     Default: [RS256]
Comma-separated list of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1.
--oidc-username-claim string     Default: "sub"
The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details.
--oidc-username-prefix string
If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'.
--profiling     Default: true
Enable profiling via web interface host:port/debug/pprof/
--proxy-client-cert-file string
Client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. It is expected that this cert includes a signature from the CA in the --requestheader-client-ca-file flag. That CA is published in the 'extension-apiserver-authentication' configmap in the kube-system namespace. Components receiving calls from kube-aggregator should use that CA to perform their half of the mutual TLS verification.
--proxy-client-key-file string
Private key for the client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins.
--request-timeout duration     Default: 1m0s
An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests.
--requestheader-allowed-names stringSlice
List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed.
--requestheader-client-ca-file string
Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests.
--requestheader-extra-headers-prefix stringSlice
List of request header prefixes to inspect. X-Remote-Extra- is suggested.
--requestheader-group-headers stringSlice
List of request headers to inspect for groups. X-Remote-Group is suggested.
--requestheader-username-headers stringSlice
List of request headers to inspect for usernames. X-Remote-User is common.
--runtime-config mapStringString
A set of key=value pairs that describe runtime configuration that may be passed to apiserver. <group>/<version> (or <version> for the core group) key can be used to turn on/off specific api versions. api/all is special key to control all api versions, be careful setting it false, unless you know what you do. api/legacy is deprecated, we will remove it in the future, so stop using it.
--secure-port int     Default: 6443
The port on which to serve HTTPS with authentication and authorization.It cannot be switched off with 0.
--service-account-issuer string
Identifier of the service account token issuer. The issuer will assert this identifier in "iss" claim of issued tokens. This value is a string or URI.
--service-account-key-file stringArray
File containing PEM-encoded x509 RSA or ECDSA private or public keys, used to verify ServiceAccount tokens. The specified file can contain multiple keys, and the flag can be specified multiple times with different files. If unspecified, --tls-private-key-file is used. Must be specified when --service-account-signing-key is provided
--service-account-lookup     Default: true
If true, validate ServiceAccount tokens exist in etcd as part of authentication.
--service-account-max-token-expiration duration
The maximum validity duration of a token created by the service account token issuer. If an otherwise valid TokenRequest with a validity duration larger than this value is requested, a token will be issued with a validity duration of this value.
--service-account-signing-key-file string
Path to the file that contains the current private key of the service account token issuer. The issuer will sign issued ID tokens with this private key. (Requires the 'TokenRequest' feature gate.)
--service-cluster-ip-range ipNet     Default: 10.0.0.0/24
A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.
--service-node-port-range portRange     Default: 30000-32767
A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.
--skip-headers
If true, avoid header prefixes in the log messages
--stderrthreshold severity     Default: 2
logs at or above this threshold go to stderr
--storage-backend string
The storage backend for persistence. Options: 'etcd3' (default).
--storage-media-type string     Default: "application/vnd.kubernetes.protobuf"
The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting.
--target-ram-mb int
Memory limit for apiserver in MB (used to configure sizes of caches, etc.)
--tls-cert-file string
File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
--tls-cipher-suites stringSlice
Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be use. Possible values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_RC4_128_SHA
--tls-min-version string
Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12
--tls-private-key-file string
File containing the default x509 private key matching --tls-cert-file.
--tls-sni-cert-key namedCertKey     Default: []
A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com".
--token-auth-file string
If set, the file that will be used to secure the secure port of the API server via token authentication.
-v, --v Level
number for the log level verbosity
--version version[=true]
Print version information and quit
--vmodule moduleSpec
comma-separated list of pattern=N settings for file-filtered logging
--watch-cache     Default: true
Enable watch caching in the apiserver
--watch-cache-sizes stringSlice
Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. It takes effect when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) have system defaults set by heuristics, others default to default-watch-cache-size
+ + + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md index da43b5a7790a7..96bf082e5a130 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md @@ -343,7 +343,7 @@ kube-controller-manager [flags] --feature-gates mapStringBool - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (BETA - default=true)
CSIDriverRegistry=true|false (BETA - default=true)
CSIInlineVolume=true|false (ALPHA - default=false)
CSIMigration=true|false (ALPHA - default=false)
CSIMigrationAWS=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (ALPHA - default=false)
CSINodeInfo=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomResourcePublishOpenAPI=true|false (ALPHA - default=false)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
ExpandCSIVolumes=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServerSideApply=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StorageVersionHash=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (ALPHA - default=false)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false)
WindowsGMSA=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (ALPHA - default=false)
CSIDriverRegistry=true|false (ALPHA - default=false)
CSINodeInfo=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomPodDNS=true|false (BETA - default=true)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (BETA - default=true)
HyperVContainer=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (BETA - default=true)
PodPriority=true|false (BETA - default=true)
PodReadinessGates=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (ALPHA - default=false)
RuntimeClass=true|false (ALPHA - default=false)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (ALPHA - default=false)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (ALPHA - default=false)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md.orig b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md.orig new file mode 100644 index 0000000000000..96bf082e5a130 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md.orig @@ -0,0 +1,808 @@ +--- +title: kube-controller-manager +notitle: true +--- +## kube-controller-manager + + + +### Synopsis + + +The Kubernetes controller manager is a daemon that embeds +the core control loops shipped with Kubernetes. In applications of robotics and +automation, a control loop is a non-terminating loop that regulates the state of +the system. In Kubernetes, a controller is a control loop that watches the shared +state of the cluster through the apiserver and makes changes attempting to move the +current state towards the desired state. Examples of controllers that ship with +Kubernetes today are the replication controller, endpoints controller, namespace +controller, and serviceaccounts controller. + +``` +kube-controller-manager [flags] +``` + +### Options + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--allocate-node-cidrs
Should CIDRs for Pods be allocated and set on the cloud provider.
--alsologtostderr
log to standard error as well as files
--attach-detach-reconcile-sync-period duration     Default: 1m0s
The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.
--authentication-kubeconfig string
kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenaccessreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster.
--authentication-skip-lookup
If false, the authentication-kubeconfig will be used to lookup missing authentication configuration from the cluster.
--authentication-token-webhook-cache-ttl duration     Default: 10s
The duration to cache responses from the webhook token authenticator.
--authentication-tolerate-lookup-failure
If true, failures to look up missing authentication configuration from the cluster are not considered fatal. Note that this can result in authentication that treats all requests as anonymous.
--authorization-always-allow-paths stringSlice     Default: [/healthz]
A list of HTTP paths to skip during authorization, i.e. these are authorized without contacting the 'core' kubernetes server.
--authorization-kubeconfig string
kubeconfig file pointing at the 'core' kubernetes server with enough rights to create subjectaccessreviews.authorization.k8s.io. This is optional. If empty, all requests not skipped by authorization are forbidden.
--authorization-webhook-cache-authorized-ttl duration     Default: 10s
The duration to cache 'authorized' responses from the webhook authorizer.
--authorization-webhook-cache-unauthorized-ttl duration     Default: 10s
The duration to cache 'unauthorized' responses from the webhook authorizer.
--azure-container-registry-config string
Path to the file containing Azure container registry configuration information.
--bind-address ip     Default: 0.0.0.0
The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces).
--cert-dir string
The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.
--cidr-allocator-type string     Default: "RangeAllocator"
Type of CIDR allocator to use
--client-ca-file string
If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.
--cloud-config string
The path to the cloud provider configuration file. Empty string for no configuration file.
--cloud-provider string
The provider for cloud services. Empty string for no provider.
--cluster-cidr string
CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true
--cluster-name string     Default: "kubernetes"
The instance prefix for the cluster.
--cluster-signing-cert-file string     Default: "/etc/kubernetes/ca/ca.pem"
Filename containing a PEM-encoded X509 CA certificate used to issue cluster-scoped certificates
--cluster-signing-key-file string     Default: "/etc/kubernetes/ca/ca.key"
Filename containing a PEM-encoded RSA or ECDSA private key used to sign cluster-scoped certificates
--concurrent-deployment-syncs int32     Default: 5
The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load
--concurrent-endpoint-syncs int32     Default: 5
The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load
--concurrent-gc-syncs int32     Default: 20
The number of garbage collector workers that are allowed to sync concurrently.
--concurrent-namespace-syncs int32     Default: 10
The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load
--concurrent-replicaset-syncs int32     Default: 5
The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load
--concurrent-resource-quota-syncs int32     Default: 5
The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load
--concurrent-service-syncs int32     Default: 1
The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load
--concurrent-serviceaccount-token-syncs int32     Default: 5
The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load
--concurrent-ttl-after-finished-syncs int32     Default: 5
The number of TTL-after-finished controller workers that are allowed to sync concurrently.
--concurrent_rc_syncs int32     Default: 5
The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load
--configure-cloud-routes     Default: true
Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.
--contention-profiling
Enable lock contention profiling, if profiling is enabled
--controller-start-interval duration
Interval between starting controller managers.
--controllers stringSlice     Default: [*]
A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'.
All controllers: attachdetach, bootstrapsigner, cloud-node-lifecycle, clusterrole-aggregation, cronjob, csrapproving, csrcleaner, csrsigning, daemonset, deployment, disruption, endpoint, garbagecollector, horizontalpodautoscaling, job, namespace, nodeipam, nodelifecycle, persistentvolume-binder, persistentvolume-expander, podgc, pv-protection, pvc-protection, replicaset, replicationcontroller, resourcequota, root-ca-cert-publisher, route, service, serviceaccount, serviceaccount-token, statefulset, tokencleaner, ttl, ttl-after-finished
Disabled-by-default controllers: bootstrapsigner, tokencleaner
--deployment-controller-sync-period duration     Default: 30s
Period for syncing the deployments.
--disable-attach-detach-reconcile-sync
Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.
--enable-dynamic-provisioning     Default: true
Enable dynamic provisioning for environments that support it.
--enable-garbage-collector     Default: true
Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver.
--enable-hostpath-provisioner
Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.
--enable-taint-manager     Default: true
WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.
--experimental-cluster-signing-duration duration     Default: 8760h0m0s
The length of duration signed certificates will be given.
--external-cloud-volume-plugin string
The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.
--feature-gates mapStringBool
A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (ALPHA - default=false)
CSIDriverRegistry=true|false (ALPHA - default=false)
CSINodeInfo=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomPodDNS=true|false (BETA - default=true)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (BETA - default=true)
HyperVContainer=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (BETA - default=true)
PodPriority=true|false (BETA - default=true)
PodReadinessGates=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (ALPHA - default=false)
RuntimeClass=true|false (ALPHA - default=false)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (ALPHA - default=false)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (ALPHA - default=false)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false)
--flex-volume-plugin-dir string     Default: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
Full path of the directory in which the flex volume plugin should search for additional third party volume plugins.
-h, --help
help for kube-controller-manager
--horizontal-pod-autoscaler-cpu-initialization-period duration     Default: 5m0s
The period after pod start when CPU samples might be skipped.
--horizontal-pod-autoscaler-downscale-stabilization duration     Default: 5m0s
The period for which autoscaler will look backwards and not scale down below any recommendation it made during that period.
--horizontal-pod-autoscaler-initial-readiness-delay duration     Default: 30s
The period after pod start during which readiness changes will be treated as initial readiness.
--horizontal-pod-autoscaler-sync-period duration     Default: 15s
The period for syncing the number of pods in horizontal pod autoscaler.
--horizontal-pod-autoscaler-tolerance float     Default: 0.1
The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.
--http2-max-streams-per-connection int
The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kube-api-burst int32     Default: 30
Burst to use while talking with kubernetes apiserver.
--kube-api-content-type string     Default: "application/vnd.kubernetes.protobuf"
Content type of requests sent to apiserver.
--kube-api-qps float32     Default: 20
QPS to use while talking with kubernetes apiserver.
--kubeconfig string
Path to kubeconfig file with authorization and master location information.
--large-cluster-size-threshold int32     Default: 50
Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.
--leader-elect     Default: true
Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--leader-elect-lease-duration duration     Default: 15s
The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.
--leader-elect-renew-deadline duration     Default: 10s
The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.
--leader-elect-resource-lock endpoints     Default: "endpoints"
The type of resource object that is used for locking during leader election. Supported options are endpoints (default) and `configmaps`.
--leader-elect-retry-period duration     Default: 2s
The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.
--log-backtrace-at traceLocation     Default: :0
when logging hits line file:N, emit a stack trace
--log-dir string
If non-empty, write log files in this directory
--log-file string
If non-empty, use this log file
--log-flush-frequency duration     Default: 5s
Maximum number of seconds between log flushes
--logtostderr     Default: true
log to standard error instead of files
--master string
The address of the Kubernetes API server (overrides any value in kubeconfig).
--min-resync-period duration     Default: 12h0m0s
The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.
--namespace-sync-period duration     Default: 5m0s
The period for syncing namespace life-cycle updates
--node-cidr-mask-size int32     Default: 24
Mask size for node cidr in cluster.
--node-eviction-rate float32     Default: 0.1
Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters.
--node-monitor-grace-period duration     Default: 40s
Amount of time which we allow running Node to be unresponsive before marking it unhealthy. Must be N times more than kubelet's nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet to post node status.
--node-monitor-period duration     Default: 5s
The period for syncing NodeStatus in NodeController.
--node-startup-grace-period duration     Default: 1m0s
Amount of time which we allow starting Node to be unresponsive before marking it unhealthy.
--pod-eviction-timeout duration     Default: 5m0s
The grace period for deleting pods on failed nodes.
--profiling
Enable profiling via web interface host:port/debug/pprof/
--pv-recycler-increment-timeout-nfs int32     Default: 30
the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod
--pv-recycler-minimum-timeout-hostpath int32     Default: 60
The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.
--pv-recycler-minimum-timeout-nfs int32     Default: 300
The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod
--pv-recycler-pod-template-filepath-hostpath string
The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.
--pv-recycler-pod-template-filepath-nfs string
The file path to a pod definition used as a template for NFS persistent volume recycling
--pv-recycler-timeout-increment-hostpath int32     Default: 30
the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.
--pvclaimbinder-sync-period duration     Default: 15s
The period for syncing persistent volumes and persistent volume claims
--requestheader-allowed-names stringSlice
List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed.
--requestheader-client-ca-file string
Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests.
--requestheader-extra-headers-prefix stringSlice     Default: [x-remote-extra-]
List of request header prefixes to inspect. X-Remote-Extra- is suggested.
--requestheader-group-headers stringSlice     Default: [x-remote-group]
List of request headers to inspect for groups. X-Remote-Group is suggested.
--requestheader-username-headers stringSlice     Default: [x-remote-user]
List of request headers to inspect for usernames. X-Remote-User is common.
--resource-quota-sync-period duration     Default: 5m0s
The period for syncing quota usage status in the system
--root-ca-file string
If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.
--route-reconciliation-period duration     Default: 10s
The period for reconciling routes created for Nodes by cloud provider.
--secondary-node-eviction-rate float32     Default: 0.01
Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.
--secure-port int     Default: 10257
The port on which to serve HTTPS with authentication and authorization.If 0, don't serve HTTPS at all.
--service-account-private-key-file string
Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.
--service-cluster-ip-range string
CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true
--skip-headers
If true, avoid header prefixes in the log messages
--stderrthreshold severity     Default: 2
logs at or above this threshold go to stderr
--terminated-pod-gc-threshold int32     Default: 12500
Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.
--tls-cert-file string
File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
--tls-cipher-suites stringSlice
Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be use. Possible values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_RC4_128_SHA
--tls-min-version string
Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12
--tls-private-key-file string
File containing the default x509 private key matching --tls-cert-file.
--tls-sni-cert-key namedCertKey     Default: []
A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com".
--unhealthy-zone-threshold float32     Default: 0.55
Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy.
--use-service-account-credentials
If true, use individual service account credentials for each controller.
-v, --v Level
number for the log level verbosity
--version version[=true]
Print version information and quit
--vmodule moduleSpec
comma-separated list of pattern=N settings for file-filtered logging
+ + + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md index a080424984a34..5f88eb917c8f1 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md @@ -111,7 +111,7 @@ kube-proxy [flags] --feature-gates mapStringBool - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (BETA - default=true)
CSIDriverRegistry=true|false (BETA - default=true)
CSIInlineVolume=true|false (ALPHA - default=false)
CSIMigration=true|false (ALPHA - default=false)
CSIMigrationAWS=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (ALPHA - default=false)
CSINodeInfo=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomResourcePublishOpenAPI=true|false (ALPHA - default=false)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
ExpandCSIVolumes=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServerSideApply=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StorageVersionHash=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (ALPHA - default=false)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false)
WindowsGMSA=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (ALPHA - default=false)
CSIDriverRegistry=true|false (ALPHA - default=false)
CSINodeInfo=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomPodDNS=true|false (BETA - default=true)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (BETA - default=true)
HyperVContainer=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (BETA - default=true)
PodPriority=true|false (BETA - default=true)
PodReadinessGates=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (ALPHA - default=false)
RuntimeClass=true|false (ALPHA - default=false)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (ALPHA - default=false)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (ALPHA - default=false)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md.orig b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md.orig new file mode 100644 index 0000000000000..5f88eb917c8f1 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md.orig @@ -0,0 +1,317 @@ +--- +title: kube-proxy +notitle: true +--- +## kube-proxy + + + +### Synopsis + + +The Kubernetes network proxy runs on each node. This +reflects services as defined in the Kubernetes API on each node and can do simple +TCP, UDP, and SCTP stream forwarding or round robin TCP, UDP, and SCTP forwarding across a set of backends. +Service cluster IPs and ports are currently found through Docker-links-compatible +environment variables specifying ports opened by the service proxy. There is an optional +addon that provides cluster DNS for these cluster IPs. The user must create a service +with the apiserver API to configure the proxy. + +``` +kube-proxy [flags] +``` + +### Options + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--azure-container-registry-config string
Path to the file containing Azure container registry configuration information.
--bind-address 0.0.0.0     Default: 0.0.0.0
The IP address for the proxy server to serve on (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces)
--cleanup
If true cleanup iptables and ipvs rules and exit.
--cleanup-ipvs     Default: true
If true make kube-proxy cleanup ipvs rules before running. Default is true
--cluster-cidr string
The CIDR range of pods in the cluster. When configured, traffic sent to a Service cluster IP from outside this range will be masqueraded and traffic sent from pods to an external LoadBalancer IP will be directed to the respective cluster IP instead
--config string
The path to the configuration file.
--config-sync-period duration     Default: 15m0s
How often configuration from the apiserver is refreshed. Must be greater than 0.
--conntrack-max-per-core int32     Default: 32768
Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).
--conntrack-min int32     Default: 131072
Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).
--conntrack-tcp-timeout-close-wait duration     Default: 1h0m0s
NAT timeout for TCP connections in the CLOSE_WAIT state
--conntrack-tcp-timeout-established duration     Default: 24h0m0s
Idle timeout for established TCP connections (0 to leave as-is)
--feature-gates mapStringBool
A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (ALPHA - default=false)
CSIDriverRegistry=true|false (ALPHA - default=false)
CSINodeInfo=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomPodDNS=true|false (BETA - default=true)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (BETA - default=true)
HyperVContainer=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (BETA - default=true)
PodPriority=true|false (BETA - default=true)
PodReadinessGates=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (ALPHA - default=false)
RuntimeClass=true|false (ALPHA - default=false)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (ALPHA - default=false)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (ALPHA - default=false)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false)
--healthz-bind-address 0.0.0.0     Default: 0.0.0.0:10256
The IP address for the health check server to serve on (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces)
--healthz-port int32     Default: 10256
The port to bind the health check server. Use 0 to disable.
-h, --help
help for kube-proxy
--hostname-override string
If non-empty, will use this string as identification instead of the actual hostname.
--iptables-masquerade-bit int32     Default: 14
If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].
--iptables-min-sync-period duration
The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').
--iptables-sync-period duration     Default: 30s
The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.
--ipvs-exclude-cidrs stringSlice
A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules.
--ipvs-min-sync-period duration
The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').
--ipvs-scheduler string
The ipvs scheduler type when proxy mode is ipvs
--ipvs-sync-period duration     Default: 30s
The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.
--kube-api-burst int32     Default: 10
Burst to use while talking with kubernetes apiserver
--kube-api-content-type string     Default: "application/vnd.kubernetes.protobuf"
Content type of requests sent to apiserver.
--kube-api-qps float32     Default: 5
QPS to use while talking with kubernetes apiserver
--kubeconfig string
Path to kubeconfig file with authorization information (the master location is set by the master flag).
--log-flush-frequency duration     Default: 5s
Maximum number of seconds between log flushes
--masquerade-all
If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)
--master string
The address of the Kubernetes API server (overrides any value in kubeconfig)
--metrics-bind-address 0.0.0.0     Default: 127.0.0.1:10249
The IP address for the metrics server to serve on (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces)
--metrics-port int32     Default: 10249
The port to bind the metrics server. Use 0 to disable.
--nodeport-addresses stringSlice
A string slice of values which specify the addresses to use for NodePorts. Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses.
--oom-score-adj int32     Default: -999
The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]
--profiling
If true enables profiling via web interface on /debug/pprof handler.
--proxy-mode ProxyMode
Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs' (experimental). If blank, use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.
--proxy-port-range port-range
Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.
--udp-timeout duration     Default: 250ms
How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace
--version version[=true]
Print version information and quit
--write-config-to string
If set, write the default configuration values to this file and exit.
+ + + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md index dda0f80f1b43a..2f61a326f4e77 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md @@ -153,7 +153,7 @@ kube-scheduler [flags] --feature-gates mapStringBool - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (BETA - default=true)
CSIDriverRegistry=true|false (BETA - default=true)
CSIInlineVolume=true|false (ALPHA - default=false)
CSIMigration=true|false (ALPHA - default=false)
CSIMigrationAWS=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (ALPHA - default=false)
CSINodeInfo=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomResourcePublishOpenAPI=true|false (ALPHA - default=false)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
ExpandCSIVolumes=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServerSideApply=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StorageVersionHash=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (ALPHA - default=false)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false)
WindowsGMSA=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (ALPHA - default=false)
CSIDriverRegistry=true|false (ALPHA - default=false)
CSINodeInfo=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomPodDNS=true|false (BETA - default=true)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (BETA - default=true)
HyperVContainer=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (BETA - default=true)
PodPriority=true|false (BETA - default=true)
PodReadinessGates=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (ALPHA - default=false)
RuntimeClass=true|false (ALPHA - default=false)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (ALPHA - default=false)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (ALPHA - default=false)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md.orig b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md.orig new file mode 100644 index 0000000000000..2f61a326f4e77 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md.orig @@ -0,0 +1,464 @@ +--- +title: kube-scheduler +notitle: true +--- +## kube-scheduler + + + +### Synopsis + + +The Kubernetes scheduler is a policy-rich, topology-aware, +workload-specific function that significantly impacts availability, performance, +and capacity. The scheduler needs to take into account individual and collective +resource requirements, quality of service requirements, hardware/software/policy +constraints, affinity and anti-affinity specifications, data locality, inter-workload +interference, deadlines, and so on. Workload-specific requirements will be exposed +through the API as necessary. + +``` +kube-scheduler [flags] +``` + +### Options + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--address string     Default: "0.0.0.0"
DEPRECATED: the IP address on which to listen for the --port port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces). See --bind-address instead.
--algorithm-provider string
DEPRECATED: the scheduling algorithm provider to use, one of: ClusterAutoscalerProvider | DefaultProvider
--alsologtostderr
log to standard error as well as files
--authentication-kubeconfig string
kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenaccessreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster.
--authentication-skip-lookup
If false, the authentication-kubeconfig will be used to lookup missing authentication configuration from the cluster.
--authentication-token-webhook-cache-ttl duration     Default: 10s
The duration to cache responses from the webhook token authenticator.
--authentication-tolerate-lookup-failure     Default: true
If true, failures to look up missing authentication configuration from the cluster are not considered fatal. Note that this can result in authentication that treats all requests as anonymous.
--authorization-always-allow-paths stringSlice     Default: [/healthz]
A list of HTTP paths to skip during authorization, i.e. these are authorized without contacting the 'core' kubernetes server.
--authorization-kubeconfig string
kubeconfig file pointing at the 'core' kubernetes server with enough rights to create subjectaccessreviews.authorization.k8s.io. This is optional. If empty, all requests not skipped by authorization are forbidden.
--authorization-webhook-cache-authorized-ttl duration     Default: 10s
The duration to cache 'authorized' responses from the webhook authorizer.
--authorization-webhook-cache-unauthorized-ttl duration     Default: 10s
The duration to cache 'unauthorized' responses from the webhook authorizer.
--azure-container-registry-config string
Path to the file containing Azure container registry configuration information.
--bind-address ip     Default: 0.0.0.0
The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces).
--cert-dir string
The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.
--client-ca-file string
If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.
--config string
The path to the configuration file. Flags override values in this file.
--contention-profiling
DEPRECATED: enable lock contention profiling, if profiling is enabled
--feature-gates mapStringBool
A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
AttachVolumeLimit=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (ALPHA - default=false)
CSIDriverRegistry=true|false (ALPHA - default=false)
CSINodeInfo=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
CustomPodDNS=true|false (BETA - default=true)
CustomResourceSubresources=true|false (BETA - default=true)
CustomResourceValidation=true|false (BETA - default=true)
CustomResourceWebhookConversion=true|false (ALPHA - default=false)
DebugContainers=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandInUsePersistentVolumes=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (BETA - default=true)
HyperVContainer=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletPodResources=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
NodeLease=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (BETA - default=true)
PodPriority=true|false (BETA - default=true)
PodReadinessGates=true|false (BETA - default=true)
PodShareProcessNamespace=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
ResourceQuotaScopeSelectors=true|false (BETA - default=true)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (ALPHA - default=false)
RuntimeClass=true|false (ALPHA - default=false)
SCTPSupport=true|false (ALPHA - default=false)
ScheduleDaemonSetPods=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (ALPHA - default=false)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (ALPHA - default=false)
VolumeSnapshotDataSource=true|false (ALPHA - default=false)
VolumeSubpathEnvExpansion=true|false (ALPHA - default=false)
-h, --help
help for kube-scheduler
--http2-max-streams-per-connection int
The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kube-api-burst int32     Default: 100
DEPRECATED: burst to use while talking with kubernetes apiserver
--kube-api-content-type string     Default: "application/vnd.kubernetes.protobuf"
DEPRECATED: content type of requests sent to apiserver.
--kube-api-qps float32     Default: 50
DEPRECATED: QPS to use while talking with kubernetes apiserver
--kubeconfig string
DEPRECATED: path to kubeconfig file with authorization and master location information.
--leader-elect     Default: true
Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--leader-elect-lease-duration duration     Default: 15s
The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.
--leader-elect-renew-deadline duration     Default: 10s
The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.
--leader-elect-resource-lock endpoints     Default: "endpoints"
The type of resource object that is used for locking during leader election. Supported options are endpoints (default) and `configmaps`.
--leader-elect-retry-period duration     Default: 2s
The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.
--lock-object-name string     Default: "kube-scheduler"
DEPRECATED: define the name of the lock object.
--lock-object-namespace string     Default: "kube-system"
DEPRECATED: define the namespace of the lock object.
--log-backtrace-at traceLocation     Default: :0
when logging hits line file:N, emit a stack trace
--log-dir string
If non-empty, write log files in this directory
--log-file string
If non-empty, use this log file
--log-flush-frequency duration     Default: 5s
Maximum number of seconds between log flushes
--logtostderr     Default: true
log to standard error instead of files
--master string
The address of the Kubernetes API server (overrides any value in kubeconfig)
--policy-config-file string
DEPRECATED: file with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config=true
--policy-configmap string
DEPRECATED: name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config=false. The config must be provided as the value of an element in 'Data' map with the key='policy.cfg'
--policy-configmap-namespace string     Default: "kube-system"
DEPRECATED: the namespace where policy ConfigMap is located. The kube-system namespace will be used if this is not provided or is empty.
--port int     Default: 10251
DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve HTTPS at all. See --secure-port instead.
--profiling
DEPRECATED: enable profiling via web interface host:port/debug/pprof/
--requestheader-allowed-names stringSlice
List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed.
--requestheader-client-ca-file string
Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests.
--requestheader-extra-headers-prefix stringSlice     Default: [x-remote-extra-]
List of request header prefixes to inspect. X-Remote-Extra- is suggested.
--requestheader-group-headers stringSlice     Default: [x-remote-group]
List of request headers to inspect for groups. X-Remote-Group is suggested.
--requestheader-username-headers stringSlice     Default: [x-remote-user]
List of request headers to inspect for usernames. X-Remote-User is common.
--scheduler-name string     Default: "default-scheduler"
DEPRECATED: name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's "spec.schedulerName".
--secure-port int     Default: 10259
The port on which to serve HTTPS with authentication and authorization.If 0, don't serve HTTPS at all.
--skip-headers
If true, avoid header prefixes in the log messages
--stderrthreshold severity     Default: 2
logs at or above this threshold go to stderr
--tls-cert-file string
File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
--tls-cipher-suites stringSlice
Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be use. Possible values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_RC4_128_SHA
--tls-min-version string
Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12
--tls-private-key-file string
File containing the default x509 private key matching --tls-cert-file.
--tls-sni-cert-key namedCertKey     Default: []
A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com".
--use-legacy-policy-config
DEPRECATED: when set to true, scheduler will ignore policy ConfigMap and uses policy config file
-v, --v Level
number for the log level verbosity
--version version[=true]
Print version information and quit
--vmodule moduleSpec
comma-separated list of pattern=N settings for file-filtered logging
--write-config-to string
If set, write the configuration values to this file and exit.
+ + + diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md b/content/en/docs/reference/command-line-tools-reference/kubelet.md index 9778662481ff0..84f0319ad9a6b 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md @@ -555,7 +555,7 @@ kubelet [flags] --feature-gates mapStringBool - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
Accelerators=true|false
AdvancedAuditing=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllowExtTrafficLocalEndpoints=true|false
AppArmor=true|false (BETA - default=true)
BlockVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CSIPersistentVolume=true|false (ALPHA - default=false)
CustomPodDNS=true|false (ALPHA - default=false)
CustomResourceValidation=true|false (BETA - default=true)
DebugContainers=true|false
DevicePlugins=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (ALPHA - default=false)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (ALPHA - default=false)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (ALPHA - default=false)
KubeletConfigFile=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
MountPropagation=true|false (ALPHA - default=false)
PVCProtection=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (ALPHA - default=false)
PodPriority=true|false (ALPHA - default=false)
ReadOnlyAPIDataVolumes=true|false
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceProxyAllowExternalIPs=true|false
StreamingProxyRedirects=true|false (BETA - default=true)
SupportIPVSProxyMode=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
VolumeScheduling=true|false (ALPHA - default=false)
VolumeSubpath=true|false
+ A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
Accelerators=true|false
AdvancedAuditing=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllowExtTrafficLocalEndpoints=true|false
AppArmor=true|false (BETA - default=true)
BlockVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CSIPersistentVolume=true|false (ALPHA - default=false)
CustomPodDNS=true|false (ALPHA - default=false)
CustomResourceValidation=true|false (BETA - default=true)
DebugContainers=true|false
DevicePlugins=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (ALPHA - default=false)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (ALPHA - default=false)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletConfigFile=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (ALPHA - default=false)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
MountPropagation=true|false (ALPHA - default=false)
PVCProtection=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (ALPHA - default=false)
PodPriority=true|false (ALPHA - default=false)
ReadOnlyAPIDataVolumes=true|false
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceProxyAllowExternalIPs=true|false
StreamingProxyRedirects=true|false (BETA - default=true)
SupportIPVSProxyMode=true|false (ALPHA - default=false)
SupportNodePidsLimit=true|false (BETA - default=true)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
VolumeScheduling=true|false (ALPHA - default=false)
VolumeSubpath=true|false
diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md.orig b/content/en/docs/reference/command-line-tools-reference/kubelet.md.orig new file mode 100644 index 0000000000000..84f0319ad9a6b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md.orig @@ -0,0 +1,1186 @@ +--- +title: kubelet +notitle: true +--- +## kubelet + + + +### Synopsis + + +The kubelet is the primary "node agent" that runs on each +node. The kubelet works in terms of a PodSpec. A PodSpec is a YAML or JSON object +that describes a pod. The kubelet takes a set of PodSpecs that are provided through +various mechanisms (primarily through the apiserver) and ensures that the containers +described in those PodSpecs are running and healthy. The kubelet doesn't manage +containers which were not created by Kubernetes. + +Other than from a PodSpec from the apiserver, there are three ways that a container +manifest can be provided to the Kubelet. + +File: Path passed as a flag on the command line. Files under this path will be monitored +periodically for updates. The monitoring period is 20s by default and is configurable +via a flag. + +HTTP endpoint: HTTP endpoint passed as a parameter on the command line. This endpoint +is checked every 20 seconds (also configurable with a flag). + +HTTP server: The kubelet can also listen for HTTP and respond to a simple API +(underspec'd currently) to submit a new manifest. + +#### Pod Lifecycle Event Generator (PLEG) + +The Pod Lifecycle Event Generator is a function of the kubelet that creates a list of +the states for all containers and pods then compares it to the previous states of the +containers and pods in a process called Relisting. This allows the PLEG to know which +pods and containers need to be synced. In versions prior to 1.2, this was accomplished +by polling and was CPU intensive. By changing to this method, this significantly reduced +resource utilization allowing for better container density. + + +``` +kubelet [flags] +``` + +### Options + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--address 0.0.0.0
The IP address for the Kubelet to serve on (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces) (default 0.0.0.0)
--allow-privileged
If true, allow containers to request privileged mode.
--alsologtostderr
log to standard error as well as files
--anonymous-auth
Enables anonymous requests to the Kubelet server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true)
--application-metrics-count-limit int
Max number of application metrics to store (per container) (default 100)
--authentication-token-webhook
Use the TokenReview API to determine authentication for bearer tokens.
--authentication-token-webhook-cache-ttl duration
The duration to cache responses from the webhook token authenticator. (default 2m0s)
--authorization-mode string
Authorization mode for Kubelet server. Valid options are AlwaysAllow or Webhook. Webhook mode uses the SubjectAccessReview API to determine authorization. (default "AlwaysAllow")
--authorization-webhook-cache-authorized-ttl duration
The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s)
--authorization-webhook-cache-unauthorized-ttl duration
The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s)
--azure-container-registry-config string
Path to the file container Azure container registry configuration information.
--boot-id-file string
Comma-separated list of files to check for boot-id. Use the first one that exists. (default "/proc/sys/kernel/random/boot_id")
--bootstrap-checkpoint-path string
Path to the directory where the checkpoints are stored
--bootstrap-kubeconfig string
Path to a kubeconfig file that will be used to get client certificate for kubelet. If the file specified by --kubeconfig does not exist, the bootstrap kubeconfig is used to request a client certificate from the API server. On success, a kubeconfig file referencing the generated client certificate and key is written to the path specified by --kubeconfig. The client certificate and key file will be stored in the directory pointed by --cert-dir.
--cert-dir string
The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "/var/lib/kubelet/pki")
--cgroup-driver string
Driver that the kubelet uses to manipulate cgroups on the host.
--cgroup-root string
Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.
--cgroups-per-qos
Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
--chaos-chance float
If > 0.0, introduce random client errors and latency. Intended for testing.
--client-ca-file string
If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.
--cloud-config string
The path to the cloud provider configuration file.
--cloud-provider string
The provider for cloud services. Specify empty string for running with no cloud provider.
--cloud-provider-gce-lb-src-cidrs cidrs
CIDRs opened in GCE firewall for LB traffic proxy & health checks (default 130.211.0.0/22,35.191.0.0/16,209.85.152.0/22,209.85.204.0/22)
--cluster-dns stringSlice
Comma-separated list of DNS server IP address.
--cluster-domain string
Domain for this cluster.
--cni-bin-dir string
The full path of the directory in which to search for CNI plugin binaries. Default: /opt/cni/bin
--cni-conf-dir string
The full path of the directory in which to search for CNI config files. Default: /etc/cni/net.d
--container-hints string
location of the container hints file (default "/etc/cadvisor/container_hints.json")
--container-runtime string
The container runtime to use. Possible values: 'docker', 'remote', 'rkt(deprecated)'. (default "docker")
--container-runtime-endpoint string
[Experimental] The endpoint of remote runtime service. Currently unix socket is supported on Linux, and tcp is supported on windows.
--containerd string
containerd endpoint (default "unix:///var/run/containerd.sock")
--containerized
Experimental support for running kubelet in a container.
--contention-profiling
Enable lock contention profiling, if profiling is enabled
--cpu-cfs-quota
Enable CPU CFS quota enforcement for containers that specify CPU limits (default true)
--cpu-manager-policy string
CPU Manager policy to use. Possible values: 'none', 'static'. (default "none")
--cpu-manager-reconcile-period NodeStatusUpdateFrequency
CPU Manager reconciliation period. Examples: '10s', or '1m'. If not supplied, defaults to NodeStatusUpdateFrequency (default 10s)
--docker string
docker endpoint (default "unix:///var/run/docker.sock")
--docker-disable-shared-pid
The Container Runtime Interface (CRI) defaults to using a shared PID namespace for containers in a pod when running with Docker 1.13.1 or higher. Setting this flag reverts to the previous behavior of isolated PID namespaces. This ability will be removed in a future Kubernetes release. (default true)
--docker-endpoint string
Use this for the docker endpoint to communicate with (default "unix:///var/run/docker.sock")
--docker-env-metadata-whitelist string
a comma-separated list of environment variable keys that needs to be collected for docker containers
--docker-only
Only report docker containers in addition to root stats
--docker-root string
DEPRECATED: docker root is read from docker info (this is a fallback, default: /var/lib/docker) (default "/var/lib/docker")
--docker-tls
use TLS to connect to docker
--docker-tls-ca string
path to trusted CA (default "ca.pem")
--docker-tls-cert string
path to client certificate (default "cert.pem")
--docker-tls-key string
path to private key (default "key.pem")
--dynamic-config-dir string
The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. Presently, you must also enable the DynamicKubeletConfig feature gate to pass this flag.
--enable-controller-attach-detach
Enables the Attach/Detach controller to manage attachment/detachment of volumes scheduled to this node, and disables kubelet from executing any attach/detach operations (default true)
--enable-debugging-handlers
Enables server endpoints for log collection and local running of containers and commands (default true)
--enable-load-reader
Whether to enable cpu load reader
--enable-server
Enable the Kubelet's server (default true)
--enforce-node-allocatable stringSlice
A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. Acceptable options are 'pods', 'system-reserved' & 'kube-reserved'. If the latter two options are specified, '--system-reserved-cgroup' & '--kube-reserved-cgroup' must also be set respectively. See /docs/tasks/administer-cluster/reserve-compute-resources/ for more details. (default [pods])
--event-burst int32
Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0 (default 10)
--event-qps int32
If > 0, limit event creations per second to this value. If 0, unlimited. (default 5)
--event-storage-age-limit string
Max length of time for which to store events (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: creation, oom) or "default" and the value is a duration. Default is applied to all non-specified event types (default "default=0")
--event-storage-event-limit string
Max number of events to store (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: creation, oom) or "default" and the value is an integer. Default is applied to all non-specified event types (default "default=0")
--eviction-hard mapStringString
A set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a pod eviction. (default imagefs.available<15%,memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%)
--eviction-max-pod-grace-period int32
Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
--eviction-minimum-reclaim mapStringString
A set of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.
--eviction-pressure-transition-period duration
Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. (default 5m0s)
--eviction-soft mapStringString
A set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a pod eviction.
--eviction-soft-grace-period mapStringString
A set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a pod eviction.
--exit-on-lock-contention
Whether kubelet should exit upon lock-file contention.
--experimental-allocatable-ignore-eviction
When set to 'true', Hard Eviction Thresholds will be ignored while calculating Node Allocatable. See /docs/tasks/administer-cluster/reserve-compute-resources/ for more details. [default=false]
--experimental-allowed-unsafe-sysctls stringSlice
Comma-separated whitelist of unsafe sysctls or unsafe sysctl patterns (ending in *). Use these at your own risk.
--experimental-bootstrap-kubeconfig string
deprecated: use --bootstrap-kubeconfig
--experimental-check-node-capabilities-before-mount
[Experimental] if set true, the kubelet will check the underlying node for required components (binaries, etc.) before performing the mount
--experimental-kernel-memcg-notification
If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.
--experimental-mounter-path string
[Experimental] Path of mounter binary. Leave empty to use the default mount.
--experimental-qos-reserved mapStringString
A set of ResourceName=Percentage (e.g. memory=50%) pairs that describe how pod resource requests are reserved at the QoS level. Currently only memory is supported. [default=none]
--fail-swap-on
Makes the Kubelet fail to start if swap is enabled on the node.
--feature-gates mapStringBool
A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIResponseCompression=true|false (ALPHA - default=false)
Accelerators=true|false
AdvancedAuditing=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllowExtTrafficLocalEndpoints=true|false
AppArmor=true|false (BETA - default=true)
BlockVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CSIPersistentVolume=true|false (ALPHA - default=false)
CustomPodDNS=true|false (ALPHA - default=false)
CustomResourceValidation=true|false (BETA - default=true)
DebugContainers=true|false
DevicePlugins=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (ALPHA - default=false)
EnableEquivalenceClassCache=true|false (ALPHA - default=false)
ExpandPersistentVolumes=true|false (ALPHA - default=false)
ExperimentalCriticalPodAnnotation=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HugePages=true|false (ALPHA - default=false)
Initializers=true|false (ALPHA - default=false)
KubeletConfigFile=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (ALPHA - default=false)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MountContainers=true|false (ALPHA - default=false)
MountPropagation=true|false (ALPHA - default=false)
PVCProtection=true|false (ALPHA - default=false)
PersistentLocalVolumes=true|false (ALPHA - default=false)
PodPriority=true|false (ALPHA - default=false)
ReadOnlyAPIDataVolumes=true|false
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceProxyAllowExternalIPs=true|false
StreamingProxyRedirects=true|false (BETA - default=true)
SupportIPVSProxyMode=true|false (ALPHA - default=false)
SupportNodePidsLimit=true|false (BETA - default=true)
TaintBasedEvictions=true|false (BETA - default=true)
TaintNodesByCondition=true|false (BETA - default=true)
VolumeScheduling=true|false (ALPHA - default=false)
VolumeSubpath=true|false
+
--file-check-frequency duration
Duration between checking config files for new data (default 20s)
--global-housekeeping-interval duration
Interval between global housekeepings (default 1m0s)
--google-json-key string
The Google Cloud Platform Service Account JSON Key to use for authentication.
--hairpin-mode string
How should the kubelet setup hairpin NAT. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service. Valid values are "promiscuous-bridge", "hairpin-veth" and "none". (default "promiscuous-bridge")
--healthz-bind-address 0.0.0.0
The IP address for the healthz server to serve on (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces) (default 127.0.0.1)
--healthz-port int32
The port of the localhost healthz endpoint (set to 0 to disable) (default 10248)
--host-ipc-sources stringSlice
Comma-separated list of sources from which the Kubelet allows pods to use the host ipc namespace. (default [*])
--host-network-sources stringSlice
Comma-separated list of sources from which the Kubelet allows pods to use of host network. (default [*])
--host-pid-sources stringSlice
Comma-separated list of sources from which the Kubelet allows pods to use the host pid namespace. (default [*])
--hostname-override string
If non-empty, will use this string as identification instead of the actual hostname.
--housekeeping-interval duration
Interval between container housekeepings (default 10s)
--http-check-frequency duration
Duration between checking http for new data (default 20s)
--image-gc-high-threshold int32
The percent of disk usage after which image garbage collection is always run. (default 85)
--image-gc-low-threshold int32
The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. (default 80)
--image-pull-progress-deadline duration
If no pulling progress is made before this deadline, the image pulling will be cancelled. (default 1m0s)
--image-service-endpoint string
[Experimental] The endpoint of remote image service. If not specified, it will be the same with container-runtime-endpoint by default. Currently unix socket is supported on Linux, and tcp is supported on windows.
--init-config-dir string
The Kubelet will look in this directory for the init configuration. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Omit this argument to use the built-in default configuration values. Presently, you must also enable the KubeletConfigFile feature gate to pass this flag.
--iptables-drop-bit int32
The bit of the fwmark space to mark packets for dropping. Must be within the range [0, 31]. (default 15)
--iptables-masquerade-bit int32
The bit of the fwmark space to mark packets for SNAT. Must be within the range [0, 31]. Please match this parameter with corresponding parameter in kube-proxy. (default 14)
--kube-api-burst int32
Burst to use while talking with kubernetes apiserver (default 10)
--kube-api-content-type string
Content type of requests sent to apiserver. (default "application/vnd.kubernetes.protobuf")
--kube-api-qps int32
QPS to use while talking with kubernetes apiserver (default 5)
--kube-reserved mapStringString
A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid=1000) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory, pid, and local ephemeral storage for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]
--kube-reserved-cgroup string
Absolute name of the top level cgroup that is used to manage kubernetes components for which compute resources were reserved via '--kube-reserved' flag. Ex. '/kube-reserved'. [default='']
--kubeconfig string
Path to a kubeconfig file, specifying how to connect to the API server. Providing --kubeconfig enables API server mode, omitting --kubeconfig enables standalone mode.
--kubelet-cgroups string
Optional absolute name of cgroups to create and run the Kubelet in.
--lock-file string
The path to file for kubelet to use as a lock file.
--log-backtrace-at traceLocation
when logging hits line file:N, emit a stack trace (default :0)
--log-cadvisor-usage
Whether to log the usage of the cAdvisor container
--log-dir string
If non-empty, write log files in this directory
--log-flush-frequency duration
Maximum number of seconds between log flushes (default 5s)
--logtostderr
log to standard error instead of files (default true)
--machine-id-file string
Comma-separated list of files to check for machine-id. Use the first one that exists. (default "/etc/machine-id,/var/lib/dbus/machine-id")
--make-iptables-util-chains
If true, kubelet will ensure iptables utility rules are present on host. (default true)
--manifest-url string
URL for accessing the container manifest
--manifest-url-header --manifest-url-header 'a:hello,b:again,c:world' --manifest-url-header 'b:beautiful'
Comma-separated list of HTTP headers to use when accessing the manifest URL. Multiple headers with the same name will be added in the same order provided. This flag can be repeatedly invoked. For example: --manifest-url-header 'a:hello,b:again,c:world' --manifest-url-header 'b:beautiful'
--max-open-files int
Number of files that can be opened by Kubelet process. (default 1000000)
--max-pods int32
Number of Pods that can run on this Kubelet. (default 110)
--minimum-image-ttl-duration duration
Minimum age for an unused image before it is garbage collected.
--network-plugin string
The name of the network plugin to be invoked for various events in kubelet/pod lifecycle
--network-plugin-mtu int32
The MTU to be passed to the network plugin, to override the default. Set to 0 to use the default 1460 MTU.
--node-ip string
IP address of the node. If set, kubelet will use this IP address for the node
--node-labels mapStringString
Labels to add when registering the node in the cluster.
--node-status-update-frequency duration
Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. (default 10s)
--oom-score-adj int32
The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000] (default -999)
--pod-cidr string
The CIDR to use for pod IP addresses, only used in standalone mode.
--pod-infra-container-image string
The image whose network/ipc namespaces containers in each pod will use. (default "k8s.gcr.io/pause:3.1")
--pod-manifest-path string
Path to the directory containing pod manifest files to run, or the path to a single pod manifest file. Files starting with dots will be ignored.
--pods-per-core int32
Number of Pods per core that can run on this Kubelet. The total number of Pods on this Kubelet cannot exceed max-pods, so max-pods will be used if this calculation results in a larger number of Pods allowed on the Kubelet. A value of 0 disables this limit.
--port int32
The port for the Kubelet to serve on. (default 10250)
--protect-kernel-defaults
Default kubelet behaviour for kernel tuning. If set, kubelet errors if any of kernel tunables is different than kubelet defaults.
--provider-id string
Unique identifier for identifying the node in a machine database, i.e cloudprovider
--read-only-port int32
The read-only port for the Kubelet to serve on with no authentication/authorization (set to 0 to disable) (default 10255)
--really-crash-for-testing
If true, when panics occur crash. Intended for testing.
--register-node
Register the node with the apiserver. If --kubeconfig is not provided, this flag is irrelevant, as the Kubelet won't have an apiserver to register with. Default=true. (default true)
--register-with-taints []api.Taint
Register the node with the given list of taints (comma separated "=:"). No-op if register-node is false.
--registry-burst int32
Maximum size of bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0 (default 10)
--registry-qps int32
If > 0, limit registry pull QPS to this value.
--resolv-conf string
Resolver configuration file used as the basis for the container DNS resolution configuration. (default "/etc/resolv.conf")
--root-dir string
Directory path for managing kubelet files (volume mounts,etc). (default "/var/lib/kubelet")
--rotate-certificates
Auto rotate the kubelet client certificates by requesting new certificates from the kube-apiserver when the certificate expiration approaches.
--rotate-server-certificates
Auto-request and rotate the kubelet serving certificates by requesting new certificates from the kube-apiserver when the certificate expiration approaches. Requires the RotateKubeletServerCertificate feature gate to be enabled, and approval of the submitted CertificateSigningRequest objects.
--runonce
If true, exit after spawning pods from local manifests or remote urls. Exclusive with --enable-server
--runtime-cgroups string
Optional absolute name of cgroups to create and run the runtime in.
--runtime-request-timeout duration
Timeout of all runtime requests except long running request - pull, logs, exec and attach. When timeout exceeded, kubelet will cancel the request, throw out an error and retry later. (default 2m0s)
--seccomp-profile-root string
Directory path for seccomp profiles. (default "/var/lib/kubelet/seccomp")
--serialize-image-pulls
Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. (default true)
--stderrthreshold severity
logs at or above this threshold go to stderr (default 2)
--storage-driver-buffer-duration duration
Writes in the storage driver will be buffered for this duration, and committed to the non memory backends as a single transaction (default 1m0s)
--storage-driver-db string
database name (default "cadvisor")
--storage-driver-host string
database host:port (default "localhost:8086")
--storage-driver-password string
database password (default "root")
--storage-driver-secure
use secure connection with database
--storage-driver-table string
table name (default "stats")
--storage-driver-user string
database username (default "root")
--streaming-connection-idle-timeout duration
Maximum time a streaming connection can be idle before the connection is automatically closed. 0 indicates no timeout. Example: '5m' (default 4h0m0s)
--sync-frequency duration
Max period between synchronizing running containers and config (default 1m0s)
--system-cgroups /
Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under /. Empty for no container. Rolling back the flag requires a reboot.
--system-reserved mapStringString
A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid=1000) pairs that describe resources reserved for non-kubernetes components. Currently only cpu, memory, and pid are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]
--system-reserved-cgroup string
Absolute name of the top level cgroup that is used to manage non-kubernetes components for which compute resources were reserved via '--system-reserved' flag. Ex. '/system-reserved'. [default='']
--tls-cert-file string
File containing x509 Certificate used for serving HTTPS (with intermediate certs, if any, concatenated after server cert). If --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to --cert-dir.
--tls-cipher-suites stringSlice
Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used. Possible values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_RC4_128_SHA
--tls-private-key-file string
File containing x509 private key matching --tls-cert-file.
-v, --v Level
log level for V logs
--version version[=true]
Print version information and quit
--vmodule moduleSpec
comma-separated list of pattern=N settings for file-filtered logging
--volume-plugin-dir string
The full path of the directory in which to search for additional third party volume plugins (default "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/")
--volume-stats-agg-period duration
Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes.
-h, --help
help for kubelet
diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md index 3d114781675a5..3758917b6b353 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md @@ -5,7 +5,6 @@ kubeadm: easily bootstrap a secure Kubernetes cluster - ┌──────────────────────────────────────────────────────────┐ │ KUBEADM │ │ Easily bootstrap a secure Kubernetes cluster │ @@ -38,29 +37,8 @@ Example usage: ### Options - - - - - - - - - - - - - - - - - - - - - - -
-h, --help
help for kubeadm
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + -h, --help help for kubeadm + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md index ced9d4b528ee8..4211c0b57e002 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md @@ -3,48 +3,17 @@ Kubeadm experimental sub-commands ### Synopsis - Kubeadm experimental sub-commands ### Options - - - - - - - - - - - - - - - -
-h, --help
help for alpha
- - +``` + -h, --help help for alpha +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs.md index 8548203f74cc8..8ddf61d005ff0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs.md @@ -3,48 +3,17 @@ Commands related to handling kubernetes certificates ### Synopsis - Commands related to handling kubernetes certificates ### Options - - - - - - - - - - - - - - - -
-h, --help
help for certs
- - +``` + -h, --help help for certs +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_certificate-key.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_certificate-key.md new file mode 100644 index 0000000000000..c42340618fac5 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_certificate-key.md @@ -0,0 +1,29 @@ + +Generate certificate keys + +### Synopsis + + +This command will print out a secure randomly-generated certificate key that can be used with +the "init" command. + +You can also use "kubeadm init --experimental-upload-certs" without specifying a certificate key and it will +generate and print one for you. + + +``` +kubeadm alpha certs certificate-key [flags] +``` + +### Options + +``` + -h, --help help for certificate-key +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_check-expiration.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_check-expiration.md new file mode 100644 index 0000000000000..3cf14558b0d28 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_check-expiration.md @@ -0,0 +1,25 @@ + +Check certificates expiration for a Kubernetes cluster + +### Synopsis + +Checks expiration for the certificates in the local PKI managed by kubeadm. + +``` +kubeadm alpha certs check-expiration [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for check-expiration +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew.md index f212b9e12be0f..4aba5d96187bf 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew.md @@ -1,9 +1,8 @@ -Renews certificates for a Kubernetes cluster +Renew certificates for a Kubernetes cluster ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ``` @@ -12,43 +11,13 @@ kubeadm alpha certs renew [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for renew
- - +``` + -h, --help help for renew +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_admin.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_admin.conf.md new file mode 100644 index 0000000000000..fafd0c7641aaf --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_admin.conf.md @@ -0,0 +1,35 @@ + +Renew the certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself + +### Synopsis + +Renew the certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew admin.conf [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for admin.conf + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md index d94d7f3d34d18..ba6e1bb75a8e5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md @@ -1,10 +1,9 @@ -renew all available certificates +Renew all available certificates ### Synopsis - -Renews all known certificates necessary to run the control plane. Renewals are run unconditionally, regardless of expiration date. Renewals can also be run individually for more control. +Renew all known certificates necessary to run the control plane. Renewals are run unconditionally, regardless of expiration date. Renewals can also be run individually for more control. ``` kubeadm alpha certs renew all [flags] @@ -12,85 +11,19 @@ kubeadm alpha certs renew all [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for all
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
- - +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for all + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md.orig new file mode 100644 index 0000000000000..ba6e1bb75a8e5 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md.orig @@ -0,0 +1,29 @@ + +Renew all available certificates + +### Synopsis + +Renew all known certificates necessary to run the control plane. Renewals are run unconditionally, regardless of expiration date. Renewals can also be run individually for more control. + +``` +kubeadm alpha certs renew all [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for all + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_BACKUP_17211.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_BACKUP_17211.md new file mode 100644 index 0000000000000..ba6e1bb75a8e5 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_BACKUP_17211.md @@ -0,0 +1,29 @@ + +Renew all available certificates + +### Synopsis + +Renew all known certificates necessary to run the control plane. Renewals are run unconditionally, regardless of expiration date. Renewals can also be run individually for more control. + +``` +kubeadm alpha certs renew all [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for all + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_BASE_17211.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_BASE_17211.md new file mode 100644 index 0000000000000..60fa27fb569eb --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_BASE_17211.md @@ -0,0 +1,96 @@ + +renew all available certificates + +### Synopsis + + +Renews all known certificates necessary to run the control plan. Renewals are run unconditionally, regardless of expiration date. Renewals can also be run individually for more control. + +``` +kubeadm alpha certs renew all [flags] +``` + +### Options + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for all
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations are searched for an existing KubeConfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
+ + + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_LOCAL_17211.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_LOCAL_17211.md new file mode 100644 index 0000000000000..ba6e1bb75a8e5 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_LOCAL_17211.md @@ -0,0 +1,29 @@ + +Renew all available certificates + +### Synopsis + +Renew all known certificates necessary to run the control plane. Renewals are run unconditionally, regardless of expiration date. Renewals can also be run individually for more control. + +``` +kubeadm alpha certs renew all [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for all + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_REMOTE_17211.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_REMOTE_17211.md new file mode 100644 index 0000000000000..d94d7f3d34d18 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all_REMOTE_17211.md @@ -0,0 +1,96 @@ + +renew all available certificates + +### Synopsis + + +Renews all known certificates necessary to run the control plane. Renewals are run unconditionally, regardless of expiration date. Renewals can also be run individually for more control. + +``` +kubeadm alpha certs renew all [flags] +``` + +### Options + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for all
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
+ + + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md index b689ecb1696aa..e7c791ed1b9c8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md @@ -1,12 +1,15 @@ -Generates the client apiserver uses to access etcd +Renew the certificate the apiserver uses to access etcd ### Synopsis +Renew the certificate the apiserver uses to access etcd. -Renews the client apiserver uses to access etcd, and saves them into apiserver-etcd-client.cert and apiserver-etcd-client.key files. +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. -Extra attributes such as SANs will be based on the existing certificates, there is no need to resupply them. +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. ``` kubeadm alpha certs renew apiserver-etcd-client [flags] @@ -14,85 +17,19 @@ kubeadm alpha certs renew apiserver-etcd-client [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for apiserver-etcd-client
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
- - +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver-etcd-client + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md.orig new file mode 100644 index 0000000000000..e7c791ed1b9c8 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md.orig @@ -0,0 +1,35 @@ + +Renew the certificate the apiserver uses to access etcd + +### Synopsis + +Renew the certificate the apiserver uses to access etcd. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew apiserver-etcd-client [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver-etcd-client + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md index 8a9ef158f91eb..064e0c24db66e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md @@ -1,12 +1,15 @@ -Generates the Client certificate for the API server to connect to kubelet +Renew the certificate for the API server to connect to kubelet ### Synopsis +Renew the certificate for the API server to connect to kubelet. -Renews the Client certificate for the API server to connect to kubelet, and saves them into apiserver-kubelet-client.cert and apiserver-kubelet-client.key files. +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. -Extra attributes such as SANs will be based on the existing certificates, there is no need to resupply them. +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. ``` kubeadm alpha certs renew apiserver-kubelet-client [flags] @@ -14,85 +17,19 @@ kubeadm alpha certs renew apiserver-kubelet-client [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for apiserver-kubelet-client
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
- - +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver-kubelet-client + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md.orig new file mode 100644 index 0000000000000..064e0c24db66e --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md.orig @@ -0,0 +1,35 @@ + +Renew the certificate for the API server to connect to kubelet + +### Synopsis + +Renew the certificate for the API server to connect to kubelet. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew apiserver-kubelet-client [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver-kubelet-client + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md index 7c86a5fc8b7df..92b62d83d0168 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md @@ -1,12 +1,15 @@ -Generates the certificate for serving the Kubernetes API +Renew the certificate for serving the Kubernetes API ### Synopsis +Renew the certificate for serving the Kubernetes API. -Renews the certificate for serving the Kubernetes API, and saves them into apiserver.cert and apiserver.key files. +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. -Extra attributes such as SANs will be based on the existing certificates, there is no need to resupply them. +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. ``` kubeadm alpha certs renew apiserver [flags] @@ -14,85 +17,19 @@ kubeadm alpha certs renew apiserver [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for apiserver
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
- - +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md.orig new file mode 100644 index 0000000000000..92b62d83d0168 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md.orig @@ -0,0 +1,35 @@ + +Renew the certificate for serving the Kubernetes API + +### Synopsis + +Renew the certificate for serving the Kubernetes API. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew apiserver [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_controller-manager.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_controller-manager.conf.md new file mode 100644 index 0000000000000..7339fc04b8629 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_controller-manager.conf.md @@ -0,0 +1,35 @@ + +Renew the certificate embedded in the kubeconfig file for the controller manager to use + +### Synopsis + +Renew the certificate embedded in the kubeconfig file for the controller manager to use. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew controller-manager.conf [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for controller-manager.conf + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md index 2ee20ab6a2a03..f1b796a08b56b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md @@ -1,12 +1,15 @@ -Generates the client certificate for liveness probes to healtcheck etcd +Renew the certificate for liveness probes to healtcheck etcd ### Synopsis +Renew the certificate for liveness probes to healtcheck etcd. -Renews the client certificate for liveness probes to healtcheck etcd, and saves them into etcd/healthcheck-client.cert and etcd/healthcheck-client.key files. +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. -Extra attributes such as SANs will be based on the existing certificates, there is no need to resupply them. +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. ``` kubeadm alpha certs renew etcd-healthcheck-client [flags] @@ -14,85 +17,19 @@ kubeadm alpha certs renew etcd-healthcheck-client [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for etcd-healthcheck-client
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
- - +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-healthcheck-client + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md.orig new file mode 100644 index 0000000000000..f1b796a08b56b --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md.orig @@ -0,0 +1,35 @@ + +Renew the certificate for liveness probes to healtcheck etcd + +### Synopsis + +Renew the certificate for liveness probes to healtcheck etcd. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew etcd-healthcheck-client [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-healthcheck-client + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md index 7520616c49c82..e5c46d5245c5b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md @@ -1,12 +1,15 @@ -Generates the credentials for etcd nodes to communicate with each other +Renew the certificate for etcd nodes to communicate with each other ### Synopsis +Renew the certificate for etcd nodes to communicate with each other. -Renews the credentials for etcd nodes to communicate with each other, and saves them into etcd/peer.cert and etcd/peer.key files. +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. -Extra attributes such as SANs will be based on the existing certificates, there is no need to resupply them. +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. ``` kubeadm alpha certs renew etcd-peer [flags] @@ -14,85 +17,19 @@ kubeadm alpha certs renew etcd-peer [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for etcd-peer
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
- - +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-peer + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md.orig new file mode 100644 index 0000000000000..e5c46d5245c5b --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md.orig @@ -0,0 +1,35 @@ + +Renew the certificate for etcd nodes to communicate with each other + +### Synopsis + +Renew the certificate for etcd nodes to communicate with each other. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew etcd-peer [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-peer + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md index 58ff0be9f9e09..997a607bd8dcc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md @@ -1,12 +1,15 @@ -Generates the certificate for serving etcd +Renew the certificate for serving etcd ### Synopsis +Renew the certificate for serving etcd. -Renews the certificate for serving etcd, and saves them into etcd/server.cert and etcd/server.key files. +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. -Extra attributes such as SANs will be based on the existing certificates, there is no need to resupply them. +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. ``` kubeadm alpha certs renew etcd-server [flags] @@ -14,85 +17,19 @@ kubeadm alpha certs renew etcd-server [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for etcd-server
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
- - +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-server + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md.orig new file mode 100644 index 0000000000000..997a607bd8dcc --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md.orig @@ -0,0 +1,35 @@ + +Renew the certificate for serving etcd + +### Synopsis + +Renew the certificate for serving etcd. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew etcd-server [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-server + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md index d92b81b008443..2945b45b2f733 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md @@ -1,12 +1,15 @@ -Generates the client for the front proxy +Renew the certificate for the front proxy client ### Synopsis +Renew the certificate for the front proxy client. -Renews the client for the front proxy, and saves them into front-proxy-client.cert and front-proxy-client.key files. +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. -Extra attributes such as SANs will be based on the existing certificates, there is no need to resupply them. +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. ``` kubeadm alpha certs renew front-proxy-client [flags] @@ -14,85 +17,19 @@ kubeadm alpha certs renew front-proxy-client [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save the certificates
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for front-proxy-client
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--use-api
Use the Kubernetes certificate API to renew certificates
- - +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for front-proxy-client + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md.orig new file mode 100644 index 0000000000000..2945b45b2f733 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md.orig @@ -0,0 +1,35 @@ + +Renew the certificate for the front proxy client + +### Synopsis + +Renew the certificate for the front proxy client. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew front-proxy-client [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for front-proxy-client + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_scheduler.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_scheduler.conf.md new file mode 100644 index 0000000000000..58dc3900a95c1 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_scheduler.conf.md @@ -0,0 +1,35 @@ + +Renew the certificate embedded in the kubeconfig file for the scheduler manager to use + +### Synopsis + +Renew the certificate embedded in the kubeconfig file for the scheduler manager to use. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm alpha certs renew scheduler.conf [flags] +``` + +### Options + +``` + --cert-dir string The path where to save the certificates (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for scheduler.conf + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --use-api Use the Kubernetes certificate API to renew certificates +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md index 24b0f4d901330..0481bb1d2a8ff 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md @@ -3,50 +3,19 @@ Kubeconfig file utilities ### Synopsis - Kubeconfig file utilities. Alpha Disclaimer: this command is currently alpha. ### Options - - - - - - - - - - - - - - - -
-h, --help
help for kubeconfig
- - +``` + -h, --help help for kubeconfig +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md index c12f50d3cf718..5bcc47139c2ef 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md @@ -1,10 +1,9 @@ -Outputs a kubeconfig file for an additional user +Output a kubeconfig file for an additional user ### Synopsis - -Outputs a kubeconfig file for an additional user. +Output a kubeconfig file for an additional user. Alpha Disclaimer: this command is currently alpha. @@ -15,91 +14,25 @@ kubeadm alpha kubeconfig user [flags] ### Examples ``` - # Outputs a kubeconfig file for an additional user named foo + # Output a kubeconfig file for an additional user named foo kubeadm alpha kubeconfig user --client-name=foo ``` ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API server is accessible on
--apiserver-bind-port int32     Default: 6443
The port the API server is accessible on
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where certificates are stored
--client-name string
The name of user. It will be used as the CN if client certificates are created
-h, --help
help for user
--org stringSlice
The orgnizations of the client certificate. It will be used as the O if client certificates are created
--token string
The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates
- - +``` + --apiserver-advertise-address string The IP address the API server is accessible on + --apiserver-bind-port int32 The port the API server is accessible on (default 6443) + --cert-dir string The path where certificates are stored (default "/etc/kubernetes/pki") + --client-name string The name of user. It will be used as the CN if client certificates are created + -h, --help help for user + --org strings The orgnizations of the client certificate. It will be used as the O if client certificates are created + --token string The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet.md index 5479cf594e084..d975aae9633c3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet.md @@ -3,48 +3,17 @@ Commands related to handling the kubelet ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ### Options - - - - - - - - - - - - - - - -
-h, --help
help for kubelet
- - +``` + -h, --help help for kubelet +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config.md index be9d4fcc26935..0040242ff2a8c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config.md @@ -3,48 +3,17 @@ Utilities for kubelet configuration ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ### Options - - - - - - - - - - - - - - - -
-h, --help
help for config
- - +``` + -h, --help help for config +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_download.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_download.md index 4d14958bafe2b..ca6e808504e33 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_download.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_download.md @@ -1,10 +1,9 @@ -Downloads the kubelet configuration from the cluster ConfigMap kubelet-config-1.X, where X is the minor version of the kubelet. +Download the kubelet configuration from the cluster ConfigMap kubelet-config-1.X, where X is the minor version of the kubelet ### Synopsis - -Downloads the kubelet configuration from a ConfigMap of the form "kubelet-config-1.X" in the cluster, where X is the minor version of the kubelet. Either kubeadm autodetects the kubelet version by exec-ing "kubelet --version" or respects the --kubelet-version parameter. +Download the kubelet configuration from a ConfigMap of the form "kubelet-config-1.X" in the cluster, where X is the minor version of the kubelet. Either kubeadm autodetects the kubelet version by exec-ing "kubelet --version" or respects the --kubelet-version parameter. Alpha Disclaimer: this command is currently alpha. @@ -15,66 +14,24 @@ kubeadm alpha kubelet config download [flags] ### Examples ``` - # Downloads the kubelet configuration from the ConfigMap in the cluster. Autodetects the kubelet version. + # Download the kubelet configuration from the ConfigMap in the cluster. Autodetect the kubelet version. kubeadm alpha phase kubelet config download - # Downloads the kubelet configuration from the ConfigMap in the cluster. Uses a specific desired kubelet version. + # Download the kubelet configuration from the ConfigMap in the cluster. Use a specific desired kubelet version. kubeadm alpha phase kubelet config download --kubelet-version 1.14.0 ``` ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-h, --help
help for download
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--kubelet-version string
The desired version for the kubelet. Defaults to being autodetected from 'kubelet --version'.
- - +``` + -h, --help help for download + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubelet-version string The desired version for the kubelet. Defaults to being autodetected from 'kubelet --version'. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_download.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_download.md.orig new file mode 100644 index 0000000000000..ca6e808504e33 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_download.md.orig @@ -0,0 +1,37 @@ + +Download the kubelet configuration from the cluster ConfigMap kubelet-config-1.X, where X is the minor version of the kubelet + +### Synopsis + +Download the kubelet configuration from a ConfigMap of the form "kubelet-config-1.X" in the cluster, where X is the minor version of the kubelet. Either kubeadm autodetects the kubelet version by exec-ing "kubelet --version" or respects the --kubelet-version parameter. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm alpha kubelet config download [flags] +``` + +### Examples + +``` + # Download the kubelet configuration from the ConfigMap in the cluster. Autodetect the kubelet version. + kubeadm alpha phase kubelet config download + + # Download the kubelet configuration from the ConfigMap in the cluster. Use a specific desired kubelet version. + kubeadm alpha phase kubelet config download --kubelet-version 1.14.0 +``` + +### Options + +``` + -h, --help help for download + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubelet-version string The desired version for the kubelet. Defaults to being autodetected from 'kubelet --version'. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md index dccd55aa3a45b..3d772d05b97d6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md @@ -1,10 +1,9 @@ -EXPERIMENTAL: Enables or updates dynamic kubelet configuration for a Node +EXPERIMENTAL: Enable or update dynamic kubelet configuration for a Node ### Synopsis - -Enables or updates dynamic kubelet configuration for a Node, against the kubelet-config-1.X ConfigMap in the cluster, where X is the minor version of the desired kubelet version. +Enable or update dynamic kubelet configuration for a Node, against the kubelet-config-1.X ConfigMap in the cluster, where X is the minor version of the desired kubelet version. WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it may have surprising side-effects at this stage. @@ -17,7 +16,7 @@ kubeadm alpha kubelet config enable-dynamic [flags] ### Examples ``` - # Enables dynamic kubelet configuration for a Node. + # Enable dynamic kubelet configuration for a Node. kubeadm alpha phase kubelet enable-dynamic-config --node-name node-1 --kubelet-version 1.14.0 WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it @@ -26,64 +25,16 @@ kubeadm alpha kubelet config enable-dynamic [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-h, --help
help for enable-dynamic
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--kubelet-version string
The desired version for the kubelet
--node-name string
Name of the node that should enable the dynamic kubelet configuration
- - +``` + -h, --help help for enable-dynamic + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubelet-version string The desired version for the kubelet + --node-name string Name of the node that should enable the dynamic kubelet configuration +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md.orig new file mode 100644 index 0000000000000..3d772d05b97d6 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md.orig @@ -0,0 +1,40 @@ + +EXPERIMENTAL: Enable or update dynamic kubelet configuration for a Node + +### Synopsis + +Enable or update dynamic kubelet configuration for a Node, against the kubelet-config-1.X ConfigMap in the cluster, where X is the minor version of the desired kubelet version. + +WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it may have surprising side-effects at this stage. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm alpha kubelet config enable-dynamic [flags] +``` + +### Examples + +``` + # Enable dynamic kubelet configuration for a Node. + kubeadm alpha phase kubelet enable-dynamic-config --node-name node-1 --kubelet-version 1.14.0 + + WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it + may have surprising side-effects at this stage. +``` + +### Options + +``` + -h, --help help for enable-dynamic + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubelet-version string The desired version for the kubelet + --node-name string Name of the node that should enable the dynamic kubelet configuration +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting.md index a4543c9ee780f..95951d4bbef03 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting.md @@ -1,50 +1,19 @@ -Makes a kubeadm cluster self-hosted +Make a kubeadm cluster self-hosted ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ### Options - - - - - - - - - - - - - - - -
-h, --help
help for selfhosting
- - +``` + -h, --help help for selfhosting +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md index 4943a77ad140a..eaa946f2c1465 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md @@ -1,10 +1,9 @@ -Converts a static Pod-hosted control plane into a self-hosted one +Convert a static Pod-hosted control plane into a self-hosted one ### Synopsis - -Converts static Pod files for control plane components into self-hosted DaemonSets configured via the Kubernetes API. +Convert static Pod files for control plane components into self-hosted DaemonSets configured via the Kubernetes API. See the documentation for self-hosting limitations. @@ -17,85 +16,25 @@ kubeadm alpha selfhosting pivot [flags] ### Examples ``` - # Converts a static Pod-hosted control plane into a self-hosted one. + # Convert a static Pod-hosted control plane into a self-hosted one. kubeadm alpha phase self-hosting convert-from-staticpods ``` ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where certificates are stored
--config string
Path to a kubeadm configuration file.
-f, --force
Pivot the cluster without prompting for confirmation
-h, --help
help for pivot
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
-s, --store-certs-in-secrets
Enable storing certs in secrets
- - +``` + --cert-dir string The path where certificates are stored (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -f, --force Pivot the cluster without prompting for confirmation + -h, --help help for pivot + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + -s, --store-certs-in-secrets Enable storing certs in secrets +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md.orig new file mode 100644 index 0000000000000..eaa946f2c1465 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md.orig @@ -0,0 +1,40 @@ + +Convert a static Pod-hosted control plane into a self-hosted one + +### Synopsis + +Convert static Pod files for control plane components into self-hosted DaemonSets configured via the Kubernetes API. + +See the documentation for self-hosting limitations. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm alpha selfhosting pivot [flags] +``` + +### Examples + +``` + # Convert a static Pod-hosted control plane into a self-hosted one. + + kubeadm alpha phase self-hosting convert-from-staticpods +``` + +### Options + +``` + --cert-dir string The path where certificates are stored (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -f, --force Pivot the cluster without prompting for confirmation + -h, --help help for pivot + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + -s, --store-certs-in-secrets Enable storing certs in secrets +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md index b45976913969f..4618befd9bd23 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md @@ -1,10 +1,9 @@ -Output shell completion code for the specified shell (bash or zsh). +Output shell completion code for the specified shell (bash or zsh) ### Synopsis - Output shell completion code for the specified shell (bash or zsh). The shell code must be evaluated to provide interactive completion of kubeadm commands. This can be done by sourcing it from @@ -50,43 +49,13 @@ source <(kubeadm completion zsh) ### Options - - - - - - - - - - - - - - - -
-h, --help
help for completion
- - +``` + -h, --help help for completion +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md index b8fc65c8d1d1f..bc085de08a9e7 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md @@ -1,10 +1,9 @@ -Manage configuration for a kubeadm cluster persisted in a ConfigMap in the cluster. +Manage configuration for a kubeadm cluster persisted in a ConfigMap in the cluster ### Synopsis - There is a ConfigMap in the kube-system namespace called "kubeadm-config" that kubeadm uses to store internal configuration about the cluster. kubeadm CLI v1.8.0+ automatically creates this ConfigMap with the config used with 'kubeadm init', but if you initialized your cluster using kubeadm v1.7.x or lower, you must use the 'config upload' command to create this @@ -17,50 +16,14 @@ kubeadm config [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - -
-h, --help
help for config
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
- - +``` + -h, --help help for config + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md.orig new file mode 100644 index 0000000000000..bc085de08a9e7 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md.orig @@ -0,0 +1,29 @@ + +Manage configuration for a kubeadm cluster persisted in a ConfigMap in the cluster + +### Synopsis + + +There is a ConfigMap in the kube-system namespace called "kubeadm-config" that kubeadm uses to store internal configuration about the +cluster. kubeadm CLI v1.8.0+ automatically creates this ConfigMap with the config used with 'kubeadm init', but if you +initialized your cluster using kubeadm v1.7.x or lower, you must use the 'config upload' command to create this +ConfigMap. This is required so that 'kubeadm upgrade' can configure your upgraded cluster correctly. + + +``` +kubeadm config [flags] +``` + +### Options + +``` + -h, --help help for config + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md index b282ef6b5fef2..6c9be5ac3cb4b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md @@ -1,10 +1,9 @@ -Interact with container images used by kubeadm. +Interact with container images used by kubeadm ### Synopsis - -Interact with container images used by kubeadm. +Interact with container images used by kubeadm ``` kubeadm config images [flags] @@ -12,50 +11,14 @@ kubeadm config images [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for images
- - +``` + -h, --help help for images +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md.orig new file mode 100644 index 0000000000000..6c9be5ac3cb4b --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md.orig @@ -0,0 +1,24 @@ + +Interact with container images used by kubeadm + +### Synopsis + +Interact with container images used by kubeadm + +``` +kubeadm config images [flags] +``` + +### Options + +``` + -h, --help help for images +``` + +### Options inherited from parent commands + +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md index 04e6f39e1340b..28a2f0a7dcb39 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md @@ -1,10 +1,9 @@ -Print a list of images kubeadm will use. The configuration file is used in case any images or image repositories are customized. +Print a list of images kubeadm will use. The configuration file is used in case any images or image repositories are customized ### Synopsis - -Print a list of images kubeadm will use. The configuration file is used in case any images or image repositories are customized. +Print a list of images kubeadm will use. The configuration file is used in case any images or image repositories are customized ``` kubeadm config images list [flags] @@ -12,71 +11,18 @@ kubeadm config images list [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to kubeadm config file.
--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
-h, --help
help for list
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
- - +``` + --config string Path to kubeadm config file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for list + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md.orig new file mode 100644 index 0000000000000..28a2f0a7dcb39 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md.orig @@ -0,0 +1,28 @@ + +Print a list of images kubeadm will use. The configuration file is used in case any images or image repositories are customized + +### Synopsis + +Print a list of images kubeadm will use. The configuration file is used in case any images or image repositories are customized + +``` +kubeadm config images list [flags] +``` + +### Options + +``` + --config string Path to kubeadm config file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for list + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") +``` + +### Options inherited from parent commands + +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md index a5a034e17075a..611524df9c2b8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md @@ -1,10 +1,9 @@ -Pull images used by kubeadm. +Pull images used by kubeadm ### Synopsis - -Pull images used by kubeadm. +Pull images used by kubeadm ``` kubeadm config images pull [flags] @@ -12,78 +11,19 @@ kubeadm config images pull [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to kubeadm config file.
--cri-socket string
Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.
--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
-h, --help
help for pull
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
- - +``` + --config string Path to kubeadm config file. + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for pull + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md.orig new file mode 100644 index 0000000000000..611524df9c2b8 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md.orig @@ -0,0 +1,29 @@ + +Pull images used by kubeadm + +### Synopsis + +Pull images used by kubeadm + +``` +kubeadm config images pull [flags] +``` + +### Options + +``` + --config string Path to kubeadm config file. + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for pull + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") +``` + +### Options inherited from parent commands + +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md index aca9ff8dfc53c..ba1361453ee7b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md @@ -1,17 +1,17 @@ -Read an older version of the kubeadm configuration API types from a file, and output the similar config object for the newer version. +Read an older version of the kubeadm configuration API types from a file, and output the similar config object for the newer version ### Synopsis - This command lets you convert configuration objects of older versions to the latest supported version, locally in the CLI tool without ever touching anything in the cluster. In this version of kubeadm, the following API versions are supported: -- kubeadm.k8s.io/v1alpha3 + - kubeadm.k8s.io/v1beta1 +- kubeadm.k8s.io/v1beta2 -Further, kubeadm can only write out config of version "kubeadm.k8s.io/v1beta1", but read both types. +Further, kubeadm can only write out config of version "kubeadm.k8s.io/v1beta2", but read both types. So regardless of what version you pass to the --old-config parameter here, the API object will be read, deserialized, defaulted, converted, validated, and re-serialized when written to stdout or --new-config if specified. @@ -26,64 +26,16 @@ kubeadm config migrate [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-h, --help
help for migrate
--new-config string
Path to the resulting equivalent kubeadm config file using the new API version. Optional, if not specified output will be sent to STDOUT.
--old-config string
Path to the kubeadm config file that is using an old API version and should be converted. This flag is mandatory.
- - +``` + -h, --help help for migrate + --new-config string Path to the resulting equivalent kubeadm config file using the new API version. Optional, if not specified output will be sent to STDOUT. + --old-config string Path to the kubeadm config file that is using an old API version and should be converted. This flag is mandatory. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md.orig new file mode 100644 index 0000000000000..ba1361453ee7b --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md.orig @@ -0,0 +1,41 @@ + +Read an older version of the kubeadm configuration API types from a file, and output the similar config object for the newer version + +### Synopsis + + +This command lets you convert configuration objects of older versions to the latest supported version, +locally in the CLI tool without ever touching anything in the cluster. +In this version of kubeadm, the following API versions are supported: + +- kubeadm.k8s.io/v1beta1 +- kubeadm.k8s.io/v1beta2 + +Further, kubeadm can only write out config of version "kubeadm.k8s.io/v1beta2", but read both types. +So regardless of what version you pass to the --old-config parameter here, the API object will be +read, deserialized, defaulted, converted, validated, and re-serialized when written to stdout or +--new-config if specified. + +In other words, the output of this command is what kubeadm actually would read internally if you +submitted this file to "kubeadm init" + + +``` +kubeadm config migrate [flags] +``` + +### Options + +``` + -h, --help help for migrate + --new-config string Path to the resulting equivalent kubeadm config file using the new API version. Optional, if not specified output will be sent to STDOUT. + --old-config string Path to the kubeadm config file that is using an old API version and should be converted. This flag is mandatory. +``` + +### Options inherited from parent commands + +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md index 0eb0d43c6ed95..159b87581dca7 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md @@ -3,7 +3,6 @@ Print configuration ### Synopsis - This command prints configurations for subcommands provided. ``` @@ -12,50 +11,14 @@ kubeadm config print [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for print
- - +``` + -h, --help help for print +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md.orig new file mode 100644 index 0000000000000..159b87581dca7 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md.orig @@ -0,0 +1,24 @@ + +Print configuration + +### Synopsis + +This command prints configurations for subcommands provided. + +``` +kubeadm config print [flags] +``` + +### Options + +``` + -h, --help help for print +``` + +### Options inherited from parent commands + +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md index 66fb9d5a08635..239a969a827f8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md @@ -4,7 +4,6 @@ Print default init configuration, that can be used for 'kubeadm init' ### Synopsis - This command prints objects such as the default init configuration that is used for 'kubeadm init'. Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like {"abcdef.0123456789abcdef" "" "nil" [] []} in order to pass validation but @@ -17,57 +16,15 @@ kubeadm config print init-defaults [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - -
--component-configs stringSlice
A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed.
-h, --help
help for init-defaults
- - +``` + --component-configs strings A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed. + -h, --help help for init-defaults +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md.orig new file mode 100644 index 0000000000000..239a969a827f8 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md.orig @@ -0,0 +1,30 @@ + +Print default init configuration, that can be used for 'kubeadm init' + +### Synopsis + + +This command prints objects such as the default init configuration that is used for 'kubeadm init'. + +Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like {"abcdef.0123456789abcdef" "" "nil" [] []} in order to pass validation but +not perform the real computation for creating a token. + + +``` +kubeadm config print init-defaults [flags] +``` + +### Options + +``` + --component-configs strings A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed. + -h, --help help for init-defaults +``` + +### Options inherited from parent commands + +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md index 1cfcf8c720bff..57d5024988964 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md @@ -4,7 +4,6 @@ Print default join configuration, that can be used for 'kubeadm join' ### Synopsis - This command prints objects such as the default join configuration that is used for 'kubeadm join'. Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like {"abcdef.0123456789abcdef" "" "nil" [] []} in order to pass validation but @@ -17,57 +16,15 @@ kubeadm config print join-defaults [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - -
--component-configs stringSlice
A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed.
-h, --help
help for join-defaults
- - +``` + --component-configs strings A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed. + -h, --help help for join-defaults +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md.orig new file mode 100644 index 0000000000000..57d5024988964 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md.orig @@ -0,0 +1,30 @@ + +Print default join configuration, that can be used for 'kubeadm join' + +### Synopsis + + +This command prints objects such as the default join configuration that is used for 'kubeadm join'. + +Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like {"abcdef.0123456789abcdef" "" "nil" [] []} in order to pass validation but +not perform the real computation for creating a token. + + +``` +kubeadm config print join-defaults [flags] +``` + +### Options + +``` + --component-configs strings A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed. + -h, --help help for join-defaults +``` + +### Options inherited from parent commands + +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload.md.orig similarity index 100% rename from content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload.md rename to content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload.md.orig diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload_from-file.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload_from-file.md.orig similarity index 100% rename from content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload_from-file.md rename to content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload_from-file.md.orig diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload_from-flags.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload_from-flags.md.orig similarity index 100% rename from content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload_from-flags.md rename to content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_upload_from-flags.md.orig diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md index cbc723c1ae2fa..a311347b32b6f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md @@ -1,10 +1,9 @@ -View the kubeadm configuration stored inside the cluster. +View the kubeadm configuration stored inside the cluster ### Synopsis - Using this command, you can view the ConfigMap in the cluster where the configuration for kubeadm is located. The configuration is located in the "kube-system" namespace in the "kubeadm-config" ConfigMap. @@ -16,50 +15,14 @@ kubeadm config view [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for view
- - +``` + -h, --help help for view +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md.orig new file mode 100644 index 0000000000000..a311347b32b6f --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md.orig @@ -0,0 +1,28 @@ + +View the kubeadm configuration stored inside the cluster + +### Synopsis + + +Using this command, you can view the ConfigMap in the cluster where the configuration for kubeadm is located. + +The configuration is located in the "kube-system" namespace in the "kubeadm-config" ConfigMap. + + +``` +kubeadm config view [flags] +``` + +### Options + +``` + -h, --help help for view +``` + +### Options inherited from parent commands + +``` + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md index b5666cdfdc720..c7faf7064008d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md @@ -1,47 +1,46 @@ -Run this command in order to set up the Kubernetes control plane. +Run this command in order to set up the Kubernetes control plane ### Synopsis - -Run this command in order to set up the Kubernetes control plane. +Run this command in order to set up the Kubernetes control plane The "init" command executes the following phases: ``` preflight Run pre-flight checks -kubelet-start Writes kubelet settings and (re)starts the kubelet +kubelet-start Write kubelet settings and (re)start the kubelet certs Certificate generation - /ca Generates the self-signed Kubernetes CA to provision identities for other Kubernetes components - /apiserver Generates the certificate for serving the Kubernetes API - /apiserver-kubelet-client Generates the Client certificate for the API server to connect to kubelet - /front-proxy-ca Generates the self-signed CA to provision identities for front proxy - /front-proxy-client Generates the client for the front proxy - /etcd-ca Generates the self-signed CA to provision identities for etcd - /etcd-server Generates the certificate for serving etcd - /apiserver-etcd-client Generates the client apiserver uses to access etcd - /etcd-peer Generates the credentials for etcd nodes to communicate with each other - /etcd-healthcheck-client Generates the client certificate for liveness probes to healtcheck etcd - /sa Generates a private key for signing service account tokens along with its public key -kubeconfig Generates all kubeconfig files necessary to establish the control plane and the admin kubeconfig file - /admin Generates a kubeconfig file for the admin to use and for kubeadm itself - /kubelet Generates a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes - /controller-manager Generates a kubeconfig file for the controller manager to use - /scheduler Generates a kubeconfig file for the scheduler to use -control-plane Generates all static Pod manifest files necessary to establish the control plane + /etcd-ca Generate the self-signed CA to provision identities for etcd + /apiserver-etcd-client Generate the certificate the apiserver uses to access etcd + /etcd-healthcheck-client Generate the certificate for liveness probes to healtcheck etcd + /etcd-server Generate the certificate for serving etcd + /etcd-peer Generate the certificate for etcd nodes to communicate with each other + /ca Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components + /apiserver Generate the certificate for serving the Kubernetes API + /apiserver-kubelet-client Generate the certificate for the API server to connect to kubelet + /front-proxy-ca Generate the self-signed CA to provision identities for front proxy + /front-proxy-client Generate the certificate for the front proxy client + /sa Generate a private key for signing service account tokens along with its public key +kubeconfig Generate all kubeconfig files necessary to establish the control plane and the admin kubeconfig file + /admin Generate a kubeconfig file for the admin to use and for kubeadm itself + /kubelet Generate a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes + /controller-manager Generate a kubeconfig file for the controller manager to use + /scheduler Generate a kubeconfig file for the scheduler to use +control-plane Generate all static Pod manifest files necessary to establish the control plane /apiserver Generates the kube-apiserver static Pod manifest /controller-manager Generates the kube-controller-manager static Pod manifest /scheduler Generates the kube-scheduler static Pod manifest -etcd Generates static Pod manifest file for local etcd. - /local Generates the static Pod manifest file for a local, single-node local etcd instance. -upload-config Uploads the kubeadm and kubelet configuration to a ConfigMap - /kubeadm Uploads the kubeadm ClusterConfiguration to a ConfigMap - /kubelet Uploads the kubelet component config to a ConfigMap +etcd Generate static Pod manifest file for local etcd + /local Generate the static Pod manifest file for a local, single-node local etcd instance +upload-config Upload the kubeadm and kubelet configuration to a ConfigMap + /kubeadm Upload the kubeadm ClusterConfiguration to a ConfigMap + /kubelet Upload the kubelet component config to a ConfigMap upload-certs Upload certificates to kubeadm-certs mark-control-plane Mark a node as a control-plane bootstrap-token Generates bootstrap tokens used to join a node to a cluster -addon Installs required addons for passing Conformance tests - /coredns Installs the CoreDNS addon to a Kubernetes cluster - /kube-proxy Installs the kube-proxy addon to a Kubernetes cluster +addon Install required addons for passing Conformance tests + /coredns Install the CoreDNS addon to a Kubernetes cluster + /kube-proxy Install the kube-proxy addon to a Kubernetes cluster ``` @@ -51,197 +50,35 @@ kubeadm init [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--apiserver-cert-extra-sans stringSlice
Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--certificate-key string
Key used to encrypt the control-plane certificates in the kubeadm-certs Secret.
--config string
Path to a kubeadm configuration file.
--cri-socket string
Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.
--dry-run
Don't apply any changes; just output what would be done.
--experimental-upload-certs
Upload control-plane certificates to the kubeadm-certs Secret.
--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
-h, --help
help for init
--ignore-preflight-errors stringSlice
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.
--image-repository string     Default: "k8s.gcr.io"
Choose a container registry to pull control plane images from
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
--node-name string
Specify the node name.
--pod-network-cidr string
Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.
--service-cidr string     Default: "10.96.0.0/12"
Use alternative range of IP address for service VIPs.
--service-dns-domain string     Default: "cluster.local"
Use alternative domain for services, e.g. "myorg.internal".
--skip-certificate-key-print
Don't print the key used to encrypt the control-plane certificates.
--skip-phases stringSlice
List of phases to be skipped
--skip-token-print
Skip printing of the default bootstrap token generated by 'kubeadm init'.
--token string
The token to use for establishing bidirectional trust between nodes and control-plane nodes. The format is [a-z0-9]{6}\.[a-z0-9]{16} - e.g. abcdef.0123456789abcdef
--token-ttl duration     Default: 24h0m0s
The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --apiserver-cert-extra-sans strings Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --certificate-key string Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. + --config string Path to a kubeadm configuration file. + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --dry-run Don't apply any changes; just output what would be done. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for init + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --node-name string Specify the node name. + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") + --skip-certificate-key-print Don't print the key used to encrypt the control-plane certificates. + --skip-phases strings List of phases to be skipped + --skip-token-print Skip printing of the default bootstrap token generated by 'kubeadm init'. + --token string The token to use for establishing bidirectional trust between nodes and control-plane nodes. The format is [a-z0-9]{6}\.[a-z0-9]{16} - e.g. abcdef.0123456789abcdef + --token-ttl duration The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire (default 24h0m0s) + --upload-certs Upload control-plane certificates to the kubeadm-certs Secret. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md.orig new file mode 100644 index 0000000000000..c7faf7064008d --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md.orig @@ -0,0 +1,84 @@ + +Run this command in order to set up the Kubernetes control plane + +### Synopsis + +Run this command in order to set up the Kubernetes control plane + +The "init" command executes the following phases: +``` +preflight Run pre-flight checks +kubelet-start Write kubelet settings and (re)start the kubelet +certs Certificate generation + /etcd-ca Generate the self-signed CA to provision identities for etcd + /apiserver-etcd-client Generate the certificate the apiserver uses to access etcd + /etcd-healthcheck-client Generate the certificate for liveness probes to healtcheck etcd + /etcd-server Generate the certificate for serving etcd + /etcd-peer Generate the certificate for etcd nodes to communicate with each other + /ca Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components + /apiserver Generate the certificate for serving the Kubernetes API + /apiserver-kubelet-client Generate the certificate for the API server to connect to kubelet + /front-proxy-ca Generate the self-signed CA to provision identities for front proxy + /front-proxy-client Generate the certificate for the front proxy client + /sa Generate a private key for signing service account tokens along with its public key +kubeconfig Generate all kubeconfig files necessary to establish the control plane and the admin kubeconfig file + /admin Generate a kubeconfig file for the admin to use and for kubeadm itself + /kubelet Generate a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes + /controller-manager Generate a kubeconfig file for the controller manager to use + /scheduler Generate a kubeconfig file for the scheduler to use +control-plane Generate all static Pod manifest files necessary to establish the control plane + /apiserver Generates the kube-apiserver static Pod manifest + /controller-manager Generates the kube-controller-manager static Pod manifest + /scheduler Generates the kube-scheduler static Pod manifest +etcd Generate static Pod manifest file for local etcd + /local Generate the static Pod manifest file for a local, single-node local etcd instance +upload-config Upload the kubeadm and kubelet configuration to a ConfigMap + /kubeadm Upload the kubeadm ClusterConfiguration to a ConfigMap + /kubelet Upload the kubelet component config to a ConfigMap +upload-certs Upload certificates to kubeadm-certs +mark-control-plane Mark a node as a control-plane +bootstrap-token Generates bootstrap tokens used to join a node to a cluster +addon Install required addons for passing Conformance tests + /coredns Install the CoreDNS addon to a Kubernetes cluster + /kube-proxy Install the kube-proxy addon to a Kubernetes cluster +``` + + +``` +kubeadm init [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --apiserver-cert-extra-sans strings Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --certificate-key string Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. + --config string Path to a kubeadm configuration file. + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --dry-run Don't apply any changes; just output what would be done. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for init + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --node-name string Specify the node name. + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") + --skip-certificate-key-print Don't print the key used to encrypt the control-plane certificates. + --skip-phases strings List of phases to be skipped + --skip-token-print Skip printing of the default bootstrap token generated by 'kubeadm init'. + --token string The token to use for establishing bidirectional trust between nodes and control-plane nodes. The format is [a-z0-9]{6}\.[a-z0-9]{16} - e.g. abcdef.0123456789abcdef + --token-ttl duration The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire (default 24h0m0s) + --upload-certs Upload control-plane certificates to the kubeadm-certs Secret. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md index b2946caf1fa02..c469017dfd905 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md @@ -1,50 +1,19 @@ -use this command to invoke single phase of the init workflow +Use this command to invoke single phase of the init workflow ### Synopsis - -use this command to invoke single phase of the init workflow +Use this command to invoke single phase of the init workflow ### Options - - - - - - - - - - - - - - - -
-h, --help
help for phase
- - +``` + -h, --help help for phase +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md index 1ddc02f451112..53aea81144903 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md @@ -1,9 +1,8 @@ -Installs required addons for passing Conformance tests +Install required addons for passing Conformance tests ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ``` @@ -12,43 +11,13 @@ kubeadm init phase addon [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for addon
- - +``` + -h, --help help for addon +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md index 55e2e3b62f322..f27dd956996bb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md @@ -1,10 +1,9 @@ -Installs all the addons +Install all the addons ### Synopsis - -Installs all the addons +Install all the addons ``` kubeadm init phase addon all [flags] @@ -12,113 +11,23 @@ kubeadm init phase addon all [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--config string
Path to a kubeadm configuration file.
--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
-h, --help
help for all
--image-repository string     Default: "k8s.gcr.io"
Choose a container registry to pull control plane images from
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
--pod-network-cidr string
Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.
--service-cidr string     Default: "10.96.0.0/12"
Use alternative range of IP address for service VIPs.
--service-dns-domain string     Default: "cluster.local"
Use alternative domain for services, e.g. "myorg.internal".
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --config string Path to a kubeadm configuration file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for all + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md.orig new file mode 100644 index 0000000000000..f27dd956996bb --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md.orig @@ -0,0 +1,33 @@ + +Install all the addons + +### Synopsis + +Install all the addons + +``` +kubeadm init phase addon all [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --config string Path to a kubeadm configuration file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for all + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md index 514d33219ec74..1e7c742f97931 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md @@ -1,10 +1,9 @@ -Installs the CoreDNS addon to a Kubernetes cluster +Install the CoreDNS addon to a Kubernetes cluster ### Synopsis - -Installs the CoreDNS addon components via the API server. Please note that although the DNS server is deployed, it will not be scheduled until CNI is installed. +Install the CoreDNS addon components via the API server. Please note that although the DNS server is deployed, it will not be scheduled until CNI is installed. ``` kubeadm init phase addon coredns [flags] @@ -12,92 +11,20 @@ kubeadm init phase addon coredns [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
-h, --help
help for coredns
--image-repository string     Default: "k8s.gcr.io"
Choose a container registry to pull control plane images from
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
--service-cidr string     Default: "10.96.0.0/12"
Use alternative range of IP address for service VIPs.
--service-dns-domain string     Default: "cluster.local"
Use alternative domain for services, e.g. "myorg.internal".
- - +``` + --config string Path to a kubeadm configuration file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for coredns + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md.orig new file mode 100644 index 0000000000000..1e7c742f97931 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md.orig @@ -0,0 +1,30 @@ + +Install the CoreDNS addon to a Kubernetes cluster + +### Synopsis + +Install the CoreDNS addon components via the API server. Please note that although the DNS server is deployed, it will not be scheduled until CNI is installed. + +``` +kubeadm init phase addon coredns [flags] +``` + +### Options + +``` + --config string Path to a kubeadm configuration file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for coredns + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md index cacd3899032a3..2af54fcf9d0fa 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md @@ -1,10 +1,9 @@ -Installs the kube-proxy addon to a Kubernetes cluster +Install the kube-proxy addon to a Kubernetes cluster ### Synopsis - -Installs the kube-proxy addon components via the API server. +Install the kube-proxy addon components via the API server. ``` kubeadm init phase addon kube-proxy [flags] @@ -12,92 +11,20 @@ kubeadm init phase addon kube-proxy [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--config string
Path to a kubeadm configuration file.
-h, --help
help for kube-proxy
--image-repository string     Default: "k8s.gcr.io"
Choose a container registry to pull control plane images from
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
--pod-network-cidr string
Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --config string Path to a kubeadm configuration file. + -h, --help help for kube-proxy + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md.orig new file mode 100644 index 0000000000000..2af54fcf9d0fa --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md.orig @@ -0,0 +1,30 @@ + +Install the kube-proxy addon to a Kubernetes cluster + +### Synopsis + +Install the kube-proxy addon components via the API server. + +``` +kubeadm init phase addon kube-proxy [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --config string Path to a kubeadm configuration file. + -h, --help help for kube-proxy + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md index 6c3e12bc40725..da037aa1248e3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md @@ -3,7 +3,6 @@ Generates bootstrap tokens used to join a node to a cluster ### Synopsis - Bootstrap tokens are used for establishing bidirectional trust between a node joining the cluster and a the control-plane node. This command makes all the configurations required to make bootstrap tokens works and then creates an initial token. @@ -15,71 +14,23 @@ kubeadm init phase bootstrap-token [flags] ### Examples ``` - # Makes all the bootstrap token configurations and creates an initial token, functionally + # Make all the bootstrap token configurations and create an initial token, functionally # equivalent to what generated by kubeadm init. kubeadm init phase bootstrap-token ``` ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
-h, --help
help for bootstrap-token
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--skip-token-print
Skip printing of the default bootstrap token generated by 'kubeadm init'.
- - +``` + --config string Path to a kubeadm configuration file. + -h, --help help for bootstrap-token + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --skip-token-print Skip printing of the default bootstrap token generated by 'kubeadm init'. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md.orig new file mode 100644 index 0000000000000..da037aa1248e3 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md.orig @@ -0,0 +1,36 @@ + +Generates bootstrap tokens used to join a node to a cluster + +### Synopsis + +Bootstrap tokens are used for establishing bidirectional trust between a node joining the cluster and a the control-plane node. + +This command makes all the configurations required to make bootstrap tokens works and then creates an initial token. + +``` +kubeadm init phase bootstrap-token [flags] +``` + +### Examples + +``` + # Make all the bootstrap token configurations and create an initial token, functionally + # equivalent to what generated by kubeadm init. + kubeadm init phase bootstrap-token +``` + +### Options + +``` + --config string Path to a kubeadm configuration file. + -h, --help help for bootstrap-token + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --skip-token-print Skip printing of the default bootstrap token generated by 'kubeadm init'. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md index 538a7a1b33934..0c7c04d9ef3b1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md @@ -3,7 +3,6 @@ Certificate generation ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ``` @@ -12,43 +11,13 @@ kubeadm init phase certs [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for certs
- - +``` + -h, --help help for certs +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md index db2cb1800ccd7..6798a92d1ee04 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md @@ -1,10 +1,9 @@ -Generates all certificates +Generate all certificates ### Synopsis - -Generates all certificates +Generate all certificates ``` kubeadm init phase certs all [flags] @@ -12,85 +11,19 @@ kubeadm init phase certs all [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-cert-extra-sans stringSlice
Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for all
--service-cidr string     Default: "10.96.0.0/12"
Use alternative range of IP address for service VIPs.
--service-dns-domain string     Default: "cluster.local"
Use alternative domain for services, e.g. "myorg.internal".
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-cert-extra-sans strings Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for all + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md.orig new file mode 100644 index 0000000000000..6798a92d1ee04 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md.orig @@ -0,0 +1,29 @@ + +Generate all certificates + +### Synopsis + +Generate all certificates + +``` +kubeadm init phase certs all [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-cert-extra-sans strings Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for all + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md index 3fe97b8a52acf..03af7712c04e7 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md @@ -1,10 +1,9 @@ -Generates the client apiserver uses to access etcd +Generate the certificate the apiserver uses to access etcd ### Synopsis - -Generates the client apiserver uses to access etcd, and saves them into apiserver-etcd-client.cert and apiserver-etcd-client.key files. +Generate the certificate the apiserver uses to access etcd, and save them into apiserver-etcd-client.cert and apiserver-etcd-client.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. @@ -16,71 +15,17 @@ kubeadm init phase certs apiserver-etcd-client [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for apiserver-etcd-client
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver-etcd-client +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md.orig new file mode 100644 index 0000000000000..03af7712c04e7 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md.orig @@ -0,0 +1,31 @@ + +Generate the certificate the apiserver uses to access etcd + +### Synopsis + +Generate the certificate the apiserver uses to access etcd, and save them into apiserver-etcd-client.cert and apiserver-etcd-client.key files. + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs apiserver-etcd-client [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver-etcd-client +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md index 0844fbefd411a..3ef086552157e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md @@ -1,10 +1,9 @@ -Generates the Client certificate for the API server to connect to kubelet +Generate the certificate for the API server to connect to kubelet ### Synopsis - -Generates the Client certificate for the API server to connect to kubelet, and saves them into apiserver-kubelet-client.cert and apiserver-kubelet-client.key files. +Generate the certificate for the API server to connect to kubelet, and save them into apiserver-kubelet-client.cert and apiserver-kubelet-client.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. @@ -16,71 +15,17 @@ kubeadm init phase certs apiserver-kubelet-client [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for apiserver-kubelet-client
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver-kubelet-client +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md.orig new file mode 100644 index 0000000000000..3ef086552157e --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md.orig @@ -0,0 +1,31 @@ + +Generate the certificate for the API server to connect to kubelet + +### Synopsis + +Generate the certificate for the API server to connect to kubelet, and save them into apiserver-kubelet-client.cert and apiserver-kubelet-client.key files. + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs apiserver-kubelet-client [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver-kubelet-client +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md index 6722f88789687..9562ffd8fedaa 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md @@ -1,10 +1,9 @@ -Generates the certificate for serving the Kubernetes API +Generate the certificate for serving the Kubernetes API ### Synopsis - -Generates the certificate for serving the Kubernetes API, and saves them into apiserver.cert and apiserver.key files. +Generate the certificate for serving the Kubernetes API, and save them into apiserver.cert and apiserver.key files. Default SANs are kubernetes, kubernetes.default, kubernetes.default.svc, kubernetes.default.svc.cluster.local, 10.96.0.1, 127.0.0.1 @@ -18,99 +17,21 @@ kubeadm init phase certs apiserver [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-cert-extra-sans stringSlice
Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for apiserver
--service-cidr string     Default: "10.96.0.0/12"
Use alternative range of IP address for service VIPs.
--service-dns-domain string     Default: "cluster.local"
Use alternative domain for services, e.g. "myorg.internal".
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-cert-extra-sans strings Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md.orig new file mode 100644 index 0000000000000..9562ffd8fedaa --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md.orig @@ -0,0 +1,37 @@ + +Generate the certificate for serving the Kubernetes API + +### Synopsis + +Generate the certificate for serving the Kubernetes API, and save them into apiserver.cert and apiserver.key files. + +Default SANs are kubernetes, kubernetes.default, kubernetes.default.svc, kubernetes.default.svc.cluster.local, 10.96.0.1, 127.0.0.1 + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs apiserver [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-cert-extra-sans strings Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for apiserver + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") + --service-dns-domain string Use alternative domain for services, e.g. "myorg.internal". (default "cluster.local") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md index 46ceaf2d5927d..cc5d479e7bddb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md @@ -1,10 +1,9 @@ -Generates the self-signed Kubernetes CA to provision identities for other Kubernetes components +Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components ### Synopsis - -Generates the self-signed Kubernetes CA to provision identities for other Kubernetes components, and saves them into ca.cert and ca.key files. +Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components, and save them into ca.cert and ca.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. @@ -16,57 +15,15 @@ kubeadm init phase certs ca [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for ca
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for ca +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md.orig new file mode 100644 index 0000000000000..cc5d479e7bddb --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md.orig @@ -0,0 +1,29 @@ + +Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components + +### Synopsis + +Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components, and save them into ca.cert and ca.key files. + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs ca [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for ca +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md index 31b68305cfd36..48345f6b01fa2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md @@ -1,10 +1,9 @@ -Generates the self-signed CA to provision identities for etcd +Generate the self-signed CA to provision identities for etcd ### Synopsis - -Generates the self-signed CA to provision identities for etcd, and saves them into etcd/ca.cert and etcd/ca.key files. +Generate the self-signed CA to provision identities for etcd, and save them into etcd/ca.cert and etcd/ca.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. @@ -16,57 +15,15 @@ kubeadm init phase certs etcd-ca [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for etcd-ca
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for etcd-ca +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md.orig new file mode 100644 index 0000000000000..48345f6b01fa2 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md.orig @@ -0,0 +1,29 @@ + +Generate the self-signed CA to provision identities for etcd + +### Synopsis + +Generate the self-signed CA to provision identities for etcd, and save them into etcd/ca.cert and etcd/ca.key files. + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs etcd-ca [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for etcd-ca +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md index d522c84d86e44..c4428459b747b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md @@ -1,10 +1,9 @@ -Generates the client certificate for liveness probes to healtcheck etcd +Generate the certificate for liveness probes to healtcheck etcd ### Synopsis - -Generates the client certificate for liveness probes to healtcheck etcd, and saves them into etcd/healthcheck-client.cert and etcd/healthcheck-client.key files. +Generate the certificate for liveness probes to healtcheck etcd, and save them into etcd/healthcheck-client.cert and etcd/healthcheck-client.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. @@ -16,71 +15,17 @@ kubeadm init phase certs etcd-healthcheck-client [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for etcd-healthcheck-client
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-healthcheck-client +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md.orig new file mode 100644 index 0000000000000..c4428459b747b --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md.orig @@ -0,0 +1,31 @@ + +Generate the certificate for liveness probes to healtcheck etcd + +### Synopsis + +Generate the certificate for liveness probes to healtcheck etcd, and save them into etcd/healthcheck-client.cert and etcd/healthcheck-client.key files. + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs etcd-healthcheck-client [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-healthcheck-client +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md index e3b5d8fd70942..fd9f355a6abe4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md @@ -1,10 +1,9 @@ -Generates the credentials for etcd nodes to communicate with each other +Generate the certificate for etcd nodes to communicate with each other ### Synopsis - -Generates the credentials for etcd nodes to communicate with each other, and saves them into etcd/peer.cert and etcd/peer.key files. +Generate the certificate for etcd nodes to communicate with each other, and save them into etcd/peer.cert and etcd/peer.key files. Default SANs are localhost, 127.0.0.1, 127.0.0.1, ::1 @@ -18,71 +17,17 @@ kubeadm init phase certs etcd-peer [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for etcd-peer
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-peer +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md.orig new file mode 100644 index 0000000000000..fd9f355a6abe4 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md.orig @@ -0,0 +1,33 @@ + +Generate the certificate for etcd nodes to communicate with each other + +### Synopsis + +Generate the certificate for etcd nodes to communicate with each other, and save them into etcd/peer.cert and etcd/peer.key files. + +Default SANs are localhost, 127.0.0.1, 127.0.0.1, ::1 + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs etcd-peer [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-peer +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md index 74712f32573f3..19489e9f16cdc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md @@ -1,10 +1,9 @@ -Generates the certificate for serving etcd +Generate the certificate for serving etcd ### Synopsis - -Generates the certificate for serving etcd, and saves them into etcd/server.cert and etcd/server.key files. +Generate the certificate for serving etcd, and save them into etcd/server.cert and etcd/server.key files. Default SANs are localhost, 127.0.0.1, 127.0.0.1, ::1 @@ -18,71 +17,17 @@ kubeadm init phase certs etcd-server [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for etcd-server
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-server +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md.orig new file mode 100644 index 0000000000000..19489e9f16cdc --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md.orig @@ -0,0 +1,33 @@ + +Generate the certificate for serving etcd + +### Synopsis + +Generate the certificate for serving etcd, and save them into etcd/server.cert and etcd/server.key files. + +Default SANs are localhost, 127.0.0.1, 127.0.0.1, ::1 + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs etcd-server [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for etcd-server +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md index ed8006d13bebf..dcf4e16da259d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md @@ -1,10 +1,9 @@ -Generates the self-signed CA to provision identities for front proxy +Generate the self-signed CA to provision identities for front proxy ### Synopsis - -Generates the self-signed CA to provision identities for front proxy, and saves them into front-proxy-ca.cert and front-proxy-ca.key files. +Generate the self-signed CA to provision identities for front proxy, and save them into front-proxy-ca.cert and front-proxy-ca.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. @@ -16,57 +15,15 @@ kubeadm init phase certs front-proxy-ca [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for front-proxy-ca
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for front-proxy-ca +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md.orig new file mode 100644 index 0000000000000..dcf4e16da259d --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md.orig @@ -0,0 +1,29 @@ + +Generate the self-signed CA to provision identities for front proxy + +### Synopsis + +Generate the self-signed CA to provision identities for front proxy, and save them into front-proxy-ca.cert and front-proxy-ca.key files. + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs front-proxy-ca [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for front-proxy-ca +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md index 947313cf7afb6..8594356941b57 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md @@ -1,10 +1,9 @@ -Generates the client for the front proxy +Generate the certificate for the front proxy client ### Synopsis - -Generates the client for the front proxy, and saves them into front-proxy-client.cert and front-proxy-client.key files. +Generate the certificate for the front proxy client, and save them into front-proxy-client.cert and front-proxy-client.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. @@ -16,71 +15,17 @@ kubeadm init phase certs front-proxy-client [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--csr-dir string
The path to output the CSRs and private keys to
--csr-only
Create CSRs instead of generating certificates
-h, --help
help for front-proxy-client
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for front-proxy-client +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md.orig new file mode 100644 index 0000000000000..8594356941b57 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md.orig @@ -0,0 +1,31 @@ + +Generate the certificate for the front proxy client + +### Synopsis + +Generate the certificate for the front proxy client, and save them into front-proxy-client.cert and front-proxy-client.key files. + +If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs front-proxy-client [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --csr-dir string The path to output the CSRs and private keys to + --csr-only Create CSRs instead of generating certificates + -h, --help help for front-proxy-client +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md index c2d12b4ec67a3..d820c1db4a9a1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md @@ -1,10 +1,9 @@ -Generates a private key for signing service account tokens along with its public key +Generate a private key for signing service account tokens along with its public key ### Synopsis - -Generates the private key for signing service account tokens along with its public key, and saves them into sa.key and sa.pub files. If both files already exist, kubeadm skips the generation step and existing files will be used. +Generate the private key for signing service account tokens along with its public key, and save them into sa.key and sa.pub files. If both files already exist, kubeadm skips the generation step and existing files will be used. Alpha Disclaimer: this command is currently alpha. @@ -14,50 +13,14 @@ kubeadm init phase certs sa [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
-h, --help
help for sa
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + -h, --help help for sa +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md.orig new file mode 100644 index 0000000000000..d820c1db4a9a1 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md.orig @@ -0,0 +1,26 @@ + +Generate a private key for signing service account tokens along with its public key + +### Synopsis + +Generate the private key for signing service account tokens along with its public key, and save them into sa.key and sa.pub files. If both files already exist, kubeadm skips the generation step and existing files will be used. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm init phase certs sa [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + -h, --help help for sa +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md index 38cc40ed905ca..e3b47df73fd31 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md @@ -1,9 +1,8 @@ -Generates all static Pod manifest files necessary to establish the control plane +Generate all static Pod manifest files necessary to establish the control plane ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ``` @@ -12,43 +11,13 @@ kubeadm init phase control-plane [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for control-plane
- - +``` + -h, --help help for control-plane +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md index 5b1ec24cf1545..d5a229d47e4cb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md @@ -1,10 +1,9 @@ -Generates all static Pod manifest files +Generate all static Pod manifest files ### Synopsis - -Generates all static Pod manifest files +Generate all static Pod manifest files ``` kubeadm init phase control-plane all [flags] @@ -23,127 +22,25 @@ kubeadm init phase control-plane all [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--apiserver-extra-args mapStringString
A set of extra flags to pass to the API Server or override default ones in form of <flagname>=<value>
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--controller-manager-extra-args mapStringString
A set of extra flags to pass to the Controller Manager or override default ones in form of <flagname>=<value>
--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
-h, --help
help for all
--image-repository string     Default: "k8s.gcr.io"
Choose a container registry to pull control plane images from
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
--pod-network-cidr string
Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.
--scheduler-extra-args mapStringString
A set of extra flags to pass to the Scheduler or override default ones in form of <flagname>=<value>
--service-cidr string     Default: "10.96.0.0/12"
Use alternative range of IP address for service VIPs.
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --apiserver-extra-args mapStringString A set of extra flags to pass to the API Server or override default ones in form of = + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --controller-manager-extra-args mapStringString A set of extra flags to pass to the Controller Manager or override default ones in form of = + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for all + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. + --scheduler-extra-args mapStringString A set of extra flags to pass to the Scheduler or override default ones in form of = + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md.orig new file mode 100644 index 0000000000000..d5a229d47e4cb --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md.orig @@ -0,0 +1,46 @@ + +Generate all static Pod manifest files + +### Synopsis + +Generate all static Pod manifest files + +``` +kubeadm init phase control-plane all [flags] +``` + +### Examples + +``` + # Generates all static Pod manifest files for control plane components, + # functionally equivalent to what is generated by kubeadm init. + kubeadm init phase control-plane all + + # Generates all static Pod manifest files using options read from a configuration file. + kubeadm init phase control-plane all --config config.yaml +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --apiserver-extra-args mapStringString A set of extra flags to pass to the API Server or override default ones in form of = + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --controller-manager-extra-args mapStringString A set of extra flags to pass to the Controller Manager or override default ones in form of = + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for all + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. + --scheduler-extra-args mapStringString A set of extra flags to pass to the Scheduler or override default ones in form of = + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md index 4f008fa915f72..a73f6979debb4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md @@ -3,7 +3,6 @@ Generates the kube-apiserver static Pod manifest ### Synopsis - Generates the kube-apiserver static Pod manifest ``` @@ -12,106 +11,22 @@ kubeadm init phase control-plane apiserver [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--apiserver-extra-args mapStringString
A set of extra flags to pass to the API Server or override default ones in form of <flagname>=<value>
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
-h, --help
help for apiserver
--image-repository string     Default: "k8s.gcr.io"
Choose a container registry to pull control plane images from
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
--service-cidr string     Default: "10.96.0.0/12"
Use alternative range of IP address for service VIPs.
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --apiserver-extra-args mapStringString A set of extra flags to pass to the API Server or override default ones in form of = + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for apiserver + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md.orig new file mode 100644 index 0000000000000..a73f6979debb4 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md.orig @@ -0,0 +1,32 @@ + +Generates the kube-apiserver static Pod manifest + +### Synopsis + +Generates the kube-apiserver static Pod manifest + +``` +kubeadm init phase control-plane apiserver [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --apiserver-extra-args mapStringString A set of extra flags to pass to the API Server or override default ones in form of = + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for apiserver + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --service-cidr string Use alternative range of IP address for service VIPs. (default "10.96.0.0/12") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md index b5d7578202a37..a12e82f3c96ea 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md @@ -3,7 +3,6 @@ Generates the kube-controller-manager static Pod manifest ### Synopsis - Generates the kube-controller-manager static Pod manifest ``` @@ -12,85 +11,19 @@ kubeadm init phase control-plane controller-manager [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
--controller-manager-extra-args mapStringString
A set of extra flags to pass to the Controller Manager or override default ones in form of <flagname>=<value>
-h, --help
help for controller-manager
--image-repository string     Default: "k8s.gcr.io"
Choose a container registry to pull control plane images from
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
--pod-network-cidr string
Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --controller-manager-extra-args mapStringString A set of extra flags to pass to the Controller Manager or override default ones in form of = + -h, --help help for controller-manager + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md.orig new file mode 100644 index 0000000000000..a12e82f3c96ea --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md.orig @@ -0,0 +1,29 @@ + +Generates the kube-controller-manager static Pod manifest + +### Synopsis + +Generates the kube-controller-manager static Pod manifest + +``` +kubeadm init phase control-plane controller-manager [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + --controller-manager-extra-args mapStringString A set of extra flags to pass to the Controller Manager or override default ones in form of = + -h, --help help for controller-manager + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --pod-network-cidr string Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md index 9a05d613295a3..13219cef3662c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md @@ -3,7 +3,6 @@ Generates the kube-scheduler static Pod manifest ### Synopsis - Generates the kube-scheduler static Pod manifest ``` @@ -12,78 +11,18 @@ kubeadm init phase control-plane scheduler [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for scheduler
--image-repository string     Default: "k8s.gcr.io"
Choose a container registry to pull control plane images from
--kubernetes-version string     Default: "stable-1"
Choose a specific Kubernetes version for the control plane.
--scheduler-extra-args mapStringString
A set of extra flags to pass to the Scheduler or override default ones in form of <flagname>=<value>
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for scheduler + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --scheduler-extra-args mapStringString A set of extra flags to pass to the Scheduler or override default ones in form of = +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md.orig new file mode 100644 index 0000000000000..13219cef3662c --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md.orig @@ -0,0 +1,28 @@ + +Generates the kube-scheduler static Pod manifest + +### Synopsis + +Generates the kube-scheduler static Pod manifest + +``` +kubeadm init phase control-plane scheduler [flags] +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for scheduler + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") + --kubernetes-version string Choose a specific Kubernetes version for the control plane. (default "stable-1") + --scheduler-extra-args mapStringString A set of extra flags to pass to the Scheduler or override default ones in form of = +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md index fa2a91f055085..e6c93bc7e2ecc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md @@ -1,9 +1,8 @@ -Generates static Pod manifest file for local etcd. +Generate static Pod manifest file for local etcd ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ``` @@ -12,43 +11,13 @@ kubeadm init phase etcd [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for etcd
- - +``` + -h, --help help for etcd +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md index 6b6e2e613b0eb..c07f3d39ca345 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md @@ -1,10 +1,9 @@ -Generates the static Pod manifest file for a local, single-node local etcd instance. +Generate the static Pod manifest file for a local, single-node local etcd instance ### Synopsis - -Generates the static Pod manifest file for a local, single-node local etcd instance. +Generate the static Pod manifest file for a local, single-node local etcd instance ``` kubeadm init phase etcd local [flags] @@ -24,64 +23,16 @@ kubeadm init phase etcd local [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for local
--image-repository string     Default: "k8s.gcr.io"
Choose a container registry to pull control plane images from
- - +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for local + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md.orig new file mode 100644 index 0000000000000..c07f3d39ca345 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md.orig @@ -0,0 +1,38 @@ + +Generate the static Pod manifest file for a local, single-node local etcd instance + +### Synopsis + +Generate the static Pod manifest file for a local, single-node local etcd instance + +``` +kubeadm init phase etcd local [flags] +``` + +### Examples + +``` + # Generates the static Pod manifest file for etcd, functionally + # equivalent to what is generated by kubeadm init. + kubeadm init phase etcd local + + # Generates the static Pod manifest file for etcd using options + # read from a configuration file. + kubeadm init phase etcd local --config config.yaml +``` + +### Options + +``` + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for local + --image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md index 8931be52e7964..e903238a9effb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md @@ -1,9 +1,8 @@ -Generates all kubeconfig files necessary to establish the control plane and the admin kubeconfig file +Generate all kubeconfig files necessary to establish the control plane and the admin kubeconfig file ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ``` @@ -12,43 +11,13 @@ kubeadm init phase kubeconfig [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for kubeconfig
- - +``` + -h, --help help for kubeconfig +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md index dfb534b06a864..4173da018fb03 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md @@ -1,10 +1,9 @@ -Generates a kubeconfig file for the admin to use and for kubeadm itself +Generate a kubeconfig file for the admin to use and for kubeadm itself ### Synopsis - -Generates the kubeconfig file for the admin and for kubeadm itself, and saves it to admin.conf file. +Generate the kubeconfig file for the admin and for kubeadm itself, and save it to admin.conf file. ``` kubeadm init phase kubeconfig admin [flags] @@ -12,78 +11,18 @@ kubeadm init phase kubeconfig admin [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for admin
--kubeconfig-dir string     Default: "/etc/kubernetes"
The path where to save the kubeconfig file.
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for admin + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md.orig new file mode 100644 index 0000000000000..4173da018fb03 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md.orig @@ -0,0 +1,28 @@ + +Generate a kubeconfig file for the admin to use and for kubeadm itself + +### Synopsis + +Generate the kubeconfig file for the admin and for kubeadm itself, and save it to admin.conf file. + +``` +kubeadm init phase kubeconfig admin [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for admin + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md index 49fd712d254bd..5182f62548269 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md @@ -1,10 +1,9 @@ -Generates all kubeconfig files +Generate all kubeconfig files ### Synopsis - -Generates all kubeconfig files +Generate all kubeconfig files ``` kubeadm init phase kubeconfig all [flags] @@ -12,85 +11,19 @@ kubeadm init phase kubeconfig all [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for all
--kubeconfig-dir string     Default: "/etc/kubernetes"
The path where to save the kubeconfig file.
--node-name string
Specify the node name.
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for all + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") + --node-name string Specify the node name. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md.orig new file mode 100644 index 0000000000000..5182f62548269 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md.orig @@ -0,0 +1,29 @@ + +Generate all kubeconfig files + +### Synopsis + +Generate all kubeconfig files + +``` +kubeadm init phase kubeconfig all [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for all + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md index de6b6f09dfa30..5e6514887abf0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md @@ -1,10 +1,9 @@ -Generates a kubeconfig file for the controller manager to use +Generate a kubeconfig file for the controller manager to use ### Synopsis - -Generates the kubeconfig file for the controller manager to use and saves it to controller-manager.conf file +Generate the kubeconfig file for the controller manager to use and save it to controller-manager.conf file ``` kubeadm init phase kubeconfig controller-manager [flags] @@ -12,78 +11,18 @@ kubeadm init phase kubeconfig controller-manager [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for controller-manager
--kubeconfig-dir string     Default: "/etc/kubernetes"
The path where to save the kubeconfig file.
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for controller-manager + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md.orig new file mode 100644 index 0000000000000..5e6514887abf0 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md.orig @@ -0,0 +1,28 @@ + +Generate a kubeconfig file for the controller manager to use + +### Synopsis + +Generate the kubeconfig file for the controller manager to use and save it to controller-manager.conf file + +``` +kubeadm init phase kubeconfig controller-manager [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for controller-manager + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md index de886a59a54e6..4d08ce4bcfd17 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md @@ -1,10 +1,9 @@ -Generates a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes +Generate a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes ### Synopsis - -Generates the kubeconfig file for the kubelet to use and saves it to kubelet.conf file. +Generate the kubeconfig file for the kubelet to use and save it to kubelet.conf file. Please note that this should only be used for cluster bootstrapping purposes. After your control plane is up, you should request all kubelet credentials from the CSR API. @@ -14,85 +13,19 @@ kubeadm init phase kubeconfig kubelet [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for kubelet
--kubeconfig-dir string     Default: "/etc/kubernetes"
The path where to save the kubeconfig file.
--node-name string
Specify the node name.
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for kubelet + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") + --node-name string Specify the node name. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md.orig new file mode 100644 index 0000000000000..4d08ce4bcfd17 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md.orig @@ -0,0 +1,31 @@ + +Generate a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes + +### Synopsis + +Generate the kubeconfig file for the kubelet to use and save it to kubelet.conf file. + +Please note that this should only be used for cluster bootstrapping purposes. After your control plane is up, you should request all kubelet credentials from the CSR API. + +``` +kubeadm init phase kubeconfig kubelet [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for kubelet + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md index 0a6b5e56e14e4..72784a950809d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md @@ -1,10 +1,9 @@ -Generates a kubeconfig file for the scheduler to use +Generate a kubeconfig file for the scheduler to use ### Synopsis - -Generates the kubeconfig file for the scheduler to use and saves it to scheduler.conf file. +Generate the kubeconfig file for the scheduler to use and save it to scheduler.conf file. ``` kubeadm init phase kubeconfig scheduler [flags] @@ -12,78 +11,18 @@ kubeadm init phase kubeconfig scheduler [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
Port for the API Server to bind to.
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where to save and store the certificates.
--config string
Path to a kubeadm configuration file.
-h, --help
help for scheduler
--kubeconfig-dir string     Default: "/etc/kubernetes"
The path where to save the kubeconfig file.
- - +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for scheduler + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md.orig new file mode 100644 index 0000000000000..72784a950809d --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md.orig @@ -0,0 +1,28 @@ + +Generate a kubeconfig file for the scheduler to use + +### Synopsis + +Generate the kubeconfig file for the scheduler to use and save it to scheduler.conf file. + +``` +kubeadm init phase kubeconfig scheduler [flags] +``` + +### Options + +``` + --apiserver-advertise-address string The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 Port for the API Server to bind to. (default 6443) + --cert-dir string The path where to save and store the certificates. (default "/etc/kubernetes/pki") + --config string Path to a kubeadm configuration file. + -h, --help help for scheduler + --kubeconfig-dir string The path where to save the kubeconfig file. (default "/etc/kubernetes") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md index a8fcfb5619fef..4ee94fe1d0a87 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md @@ -1,10 +1,9 @@ -Writes kubelet settings and (re)starts the kubelet +Write kubelet settings and (re)start the kubelet ### Synopsis - -Writes a file with KubeletConfiguration and an environment file with node specific kubelet settings, and then (re)starts kubelet. +Write a file with KubeletConfiguration and an environment file with node specific kubelet settings, and then (re)start kubelet. ``` kubeadm init phase kubelet-start [flags] @@ -19,64 +18,16 @@ kubeadm init phase kubelet-start [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
--cri-socket string
Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.
-h, --help
help for kubelet-start
--node-name string
Specify the node name.
- - +``` + --config string Path to a kubeadm configuration file. + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + -h, --help help for kubelet-start + --node-name string Specify the node name. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md.orig new file mode 100644 index 0000000000000..4ee94fe1d0a87 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md.orig @@ -0,0 +1,33 @@ + +Write kubelet settings and (re)start the kubelet + +### Synopsis + +Write a file with KubeletConfiguration and an environment file with node specific kubelet settings, and then (re)start kubelet. + +``` +kubeadm init phase kubelet-start [flags] +``` + +### Examples + +``` + # Writes a dynamic environment file with kubelet flags from a InitConfiguration file. + kubeadm init phase kubelet-start --config config.yaml +``` + +### Options + +``` + --config string Path to a kubeadm configuration file. + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + -h, --help help for kubelet-start + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md index ca7a77f76b11b..f30046cc55b7f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md @@ -3,7 +3,6 @@ Mark a node as a control-plane ### Synopsis - Mark a node as a control-plane ``` @@ -22,57 +21,15 @@ kubeadm init phase mark-control-plane [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
-h, --help
help for mark-control-plane
--node-name string
Specify the node name.
- - +``` + --config string Path to a kubeadm configuration file. + -h, --help help for mark-control-plane + --node-name string Specify the node name. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md.orig new file mode 100644 index 0000000000000..f30046cc55b7f --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md.orig @@ -0,0 +1,35 @@ + +Mark a node as a control-plane + +### Synopsis + +Mark a node as a control-plane + +``` +kubeadm init phase mark-control-plane [flags] +``` + +### Examples + +``` + # Applies control-plane label and taint to the current node, functionally equivalent to what executed by kubeadm init. + kubeadm init phase mark-control-plane --config config.yml + + # Applies control-plane label and taint to a specific node + kubeadm init phase mark-control-plane --node-name myNode +``` + +### Options + +``` + --config string Path to a kubeadm configuration file. + -h, --help help for mark-control-plane + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md index a306194ae65b0..499c88dfcd440 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md @@ -3,7 +3,6 @@ Run pre-flight checks ### Synopsis - Run pre-flight checks for kubeadm init. ``` @@ -19,57 +18,15 @@ kubeadm init phase preflight [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
-h, --help
help for preflight
--ignore-preflight-errors stringSlice
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.
- - +``` + --config string Path to a kubeadm configuration file. + -h, --help help for preflight + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md.orig new file mode 100644 index 0000000000000..499c88dfcd440 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md.orig @@ -0,0 +1,32 @@ + +Run pre-flight checks + +### Synopsis + +Run pre-flight checks for kubeadm init. + +``` +kubeadm init phase preflight [flags] +``` + +### Examples + +``` + # Run pre-flight checks for kubeadm init using a config file. + kubeadm init phase preflight --config kubeadm-config.yml +``` + +### Options + +``` + --config string Path to a kubeadm configuration file. + -h, --help help for preflight + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md index bddd578751fcf..6db96943a062e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md @@ -12,52 +12,13 @@ kubeadm init phase upload-certs [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--certificate-key string
Key used to encrypt the control-plane certificates in the kubeadm-certs Secret.
--config string
Path to a kubeadm configuration file.
--experimental-upload-certs
Upload control-plane certificates to the kubeadm-certs Secret.
-h, --help
help for upload-certs
--skip-certificate-key-print
Don't print the key used to encrypt the control-plane certificates.
- - +``` + --certificate-key string Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. + --config string Path to a kubeadm configuration file. + -h, --help help for upload-certs + --skip-certificate-key-print Don't print the key used to encrypt the control-plane certificates. + --upload-certs Upload control-plane certificates to the kubeadm-certs Secret. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md.orig new file mode 100644 index 0000000000000..6db96943a062e --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md.orig @@ -0,0 +1,43 @@ + +Upload certificates to kubeadm-certs + +### Synopsis + + +This command is not meant to be run on its own. See list of available subcommands. + +``` +kubeadm init phase upload-certs [flags] +``` + +### Options + +``` + --certificate-key string Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. + --config string Path to a kubeadm configuration file. + -h, --help help for upload-certs + --skip-certificate-key-print Don't print the key used to encrypt the control-plane certificates. + --upload-certs Upload control-plane certificates to the kubeadm-certs Secret. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md index 0f20e04b16fc0..c44ed6054818c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md @@ -1,9 +1,8 @@ -Uploads the kubeadm and kubelet configuration to a ConfigMap +Upload the kubeadm and kubelet configuration to a ConfigMap ### Synopsis - This command is not meant to be run on its own. See list of available subcommands. ``` @@ -12,43 +11,13 @@ kubeadm init phase upload-config [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for upload-config
- - +``` + -h, --help help for upload-config +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md index 992005b10d66c..d6ccc063d36b1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md @@ -1,10 +1,9 @@ -Uploads all configuration to a config map +Upload all configuration to a config map ### Synopsis - -Uploads all configuration to a config map +Upload all configuration to a config map ``` kubeadm init phase upload-config all [flags] @@ -12,57 +11,15 @@ kubeadm init phase upload-config all [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
-h, --help
help for all
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
- - +``` + --config string Path to a kubeadm configuration file. + -h, --help help for all + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md.orig new file mode 100644 index 0000000000000..d6ccc063d36b1 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md.orig @@ -0,0 +1,25 @@ + +Upload all configuration to a config map + +### Synopsis + +Upload all configuration to a config map + +``` +kubeadm init phase upload-config all [flags] +``` + +### Options + +``` + --config string Path to a kubeadm configuration file. + -h, --help help for all + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md index 5c6d8f2eeb558..cec14b629f0e8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md @@ -1,10 +1,9 @@ -Uploads the kubeadm ClusterConfiguration to a ConfigMap +Upload the kubeadm ClusterConfiguration to a ConfigMap ### Synopsis - -Uploads the kubeadm ClusterConfiguration to a ConfigMap called kubeadm-config in the kube-system namespace. This enables correct configuration of system components and a seamless user experience when upgrading. +Upload the kubeadm ClusterConfiguration to a ConfigMap called kubeadm-config in the kube-system namespace. This enables correct configuration of system components and a seamless user experience when upgrading. Alternatively, you can use kubeadm config. @@ -15,63 +14,21 @@ kubeadm init phase upload-config kubeadm [flags] ### Examples ``` - # uploads the configuration of your cluster + # upload the configuration of your cluster kubeadm init phase upload-config --config=myConfig.yaml ``` ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
-h, --help
help for kubeadm
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
- - +``` + --config string Path to a kubeadm configuration file. + -h, --help help for kubeadm + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md.orig new file mode 100644 index 0000000000000..cec14b629f0e8 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md.orig @@ -0,0 +1,34 @@ + +Upload the kubeadm ClusterConfiguration to a ConfigMap + +### Synopsis + +Upload the kubeadm ClusterConfiguration to a ConfigMap called kubeadm-config in the kube-system namespace. This enables correct configuration of system components and a seamless user experience when upgrading. + +Alternatively, you can use kubeadm config. + +``` +kubeadm init phase upload-config kubeadm [flags] +``` + +### Examples + +``` + # upload the configuration of your cluster + kubeadm init phase upload-config --config=myConfig.yaml +``` + +### Options + +``` + --config string Path to a kubeadm configuration file. + -h, --help help for kubeadm + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md index f9c14aacfcb3f..549703c9702c8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md @@ -1,10 +1,9 @@ -Uploads the kubelet component config to a ConfigMap +Upload the kubelet component config to a ConfigMap ### Synopsis - -Uploads kubelet configuration extracted from the kubeadm InitConfiguration object to a ConfigMap of the form kubelet-config-1.X in the cluster, where X is the minor version of the current (API Server) Kubernetes version. +Upload kubelet configuration extracted from the kubeadm InitConfiguration object to a ConfigMap of the form kubelet-config-1.X in the cluster, where X is the minor version of the current (API Server) Kubernetes version. ``` kubeadm init phase upload-config kubelet [flags] @@ -13,63 +12,21 @@ kubeadm init phase upload-config kubelet [flags] ### Examples ``` - # Uploads the kubelet configuration from the kubeadm Config file to a ConfigMap in the cluster. + # Upload the kubelet configuration from the kubeadm Config file to a ConfigMap in the cluster. kubeadm init phase upload-config kubelet --config kubeadm.yaml ``` ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
-h, --help
help for kubelet
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
- - +``` + --config string Path to a kubeadm configuration file. + -h, --help help for kubelet + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md.orig new file mode 100644 index 0000000000000..549703c9702c8 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md.orig @@ -0,0 +1,32 @@ + +Upload the kubelet component config to a ConfigMap + +### Synopsis + +Upload kubelet configuration extracted from the kubeadm InitConfiguration object to a ConfigMap of the form kubelet-config-1.X in the cluster, where X is the minor version of the current (API Server) Kubernetes version. + +``` +kubeadm init phase upload-config kubelet [flags] +``` + +### Examples + +``` + # Upload the kubelet configuration from the kubeadm Config file to a ConfigMap in the cluster. + kubeadm init phase upload-config kubelet --config kubeadm.yaml +``` + +### Options + +``` + --config string Path to a kubeadm configuration file. + -h, --help help for kubelet + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md index f6ec6cf998f42..8e8c47444e857 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md @@ -4,7 +4,6 @@ Run this on any machine you wish to join an existing cluster ### Synopsis - When joining a kubeadm initialized cluster, we need to establish bidirectional trust. This is split into discovery (having the Node trust the Kubernetes Control Plane) and TLS bootstrap (having the @@ -50,13 +49,13 @@ Often times the same token is used for both parts. In this case, the The "join [api-server-endpoint]" command executes the following phases: ``` preflight Run join pre-flight checks -control-plane-prepare Prepares the machine for serving a control plane. - /download-certs [EXPERIMENTAL] Downloads certificates shared among control-plane nodes from the kubeadm-certs Secret - /certs Generates the certificates for the new control plane components - /kubeconfig Generates the kubeconfig for the new control plane components - /control-plane Generates the manifests for the new control plane components -kubelet-start Writes kubelet settings, certificates and (re)starts the kubelet -control-plane-join Joins a machine as a control plane instance +control-plane-prepare Prepare the machine for serving a control plane + /download-certs [EXPERIMENTAL] Download certificates shared among control-plane nodes from the kubeadm-certs Secret + /certs Generate the certificates for the new control plane components + /kubeconfig Generate the kubeconfig for the new control plane components + /control-plane Generate the manifests for the new control plane components +kubelet-start Write kubelet settings, certificates and (re)start the kubelet +control-plane-join Join a machine as a control plane instance /etcd Add a new local etcd member /update-status Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap /mark-control-plane Mark a node as a control-plane @@ -69,148 +68,29 @@ kubeadm join [api-server-endpoint] [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
If the node should host a new control plane instance, the port for the API Server to bind to.
--certificate-key string
Use this key to decrypt the certificate secrets uploaded by init.
--config string
Path to kubeadm config file.
--cri-socket string
Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.
--discovery-file string
For file-based discovery, a file or URL from which to load cluster information.
--discovery-token string
For token-based discovery, the token used to validate cluster information fetched from the API server.
--discovery-token-ca-cert-hash stringSlice
For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>").
--discovery-token-unsafe-skip-ca-verification
For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for join
--ignore-preflight-errors stringSlice
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.
--node-name string
Specify the node name.
--skip-phases stringSlice
List of phases to be skipped
--tls-bootstrap-token string
Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.
--token string
Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.
- - +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for join + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --node-name string Specify the node name. + --skip-phases strings List of phases to be skipped + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md.orig new file mode 100644 index 0000000000000..8e8c47444e857 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md.orig @@ -0,0 +1,96 @@ + +Run this on any machine you wish to join an existing cluster + +### Synopsis + + +When joining a kubeadm initialized cluster, we need to establish +bidirectional trust. This is split into discovery (having the Node +trust the Kubernetes Control Plane) and TLS bootstrap (having the +Kubernetes Control Plane trust the Node). + +There are 2 main schemes for discovery. The first is to use a shared +token along with the IP address of the API server. The second is to +provide a file - a subset of the standard kubeconfig file. This file +can be a local file or downloaded via an HTTPS URL. The forms are +kubeadm join --discovery-token abcdef.1234567890abcdef 1.2.3.4:6443, +kubeadm join --discovery-file path/to/file.conf, or kubeadm join +--discovery-file https://url/file.conf. Only one form can be used. If +the discovery information is loaded from a URL, HTTPS must be used. +Also, in that case the host installed CA bundle is used to verify +the connection. + +If you use a shared token for discovery, you should also pass the +--discovery-token-ca-cert-hash flag to validate the public key of the +root certificate authority (CA) presented by the Kubernetes Control Plane. +The value of this flag is specified as ":", +where the supported hash type is "sha256". The hash is calculated over +the bytes of the Subject Public Key Info (SPKI) object (as in RFC7469). +This value is available in the output of "kubeadm init" or can be +calculated using standard tools. The --discovery-token-ca-cert-hash flag +may be repeated multiple times to allow more than one public key. + +If you cannot know the CA public key hash ahead of time, you can pass +the --discovery-token-unsafe-skip-ca-verification flag to disable this +verification. This weakens the kubeadm security model since other nodes +can potentially impersonate the Kubernetes Control Plane. + +The TLS bootstrap mechanism is also driven via a shared token. This is +used to temporarily authenticate with the Kubernetes Control Plane to submit a +certificate signing request (CSR) for a locally created key pair. By +default, kubeadm will set up the Kubernetes Control Plane to automatically +approve these signing requests. This token is passed in with the +--tls-bootstrap-token abcdef.1234567890abcdef flag. + +Often times the same token is used for both parts. In this case, the +--token flag can be used instead of specifying each token individually. + + +The "join [api-server-endpoint]" command executes the following phases: +``` +preflight Run join pre-flight checks +control-plane-prepare Prepare the machine for serving a control plane + /download-certs [EXPERIMENTAL] Download certificates shared among control-plane nodes from the kubeadm-certs Secret + /certs Generate the certificates for the new control plane components + /kubeconfig Generate the kubeconfig for the new control plane components + /control-plane Generate the manifests for the new control plane components +kubelet-start Write kubelet settings, certificates and (re)start the kubelet +control-plane-join Join a machine as a control plane instance + /etcd Add a new local etcd member + /update-status Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap + /mark-control-plane Mark a node as a control-plane +``` + + +``` +kubeadm join [api-server-endpoint] [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for join + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --node-name string Specify the node name. + --skip-phases strings List of phases to be skipped + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md index 1989f88ee7601..438f6d8fbbc28 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md @@ -1,10 +1,9 @@ -use this command to invoke single phase of the join workflow +Use this command to invoke single phase of the join workflow ### Synopsis - -use this command to invoke single phase of the join workflow +Use this command to invoke single phase of the join workflow ### Options diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md.orig new file mode 100644 index 0000000000000..438f6d8fbbc28 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md.orig @@ -0,0 +1,49 @@ + +Use this command to invoke single phase of the join workflow + +### Synopsis + +Use this command to invoke single phase of the join workflow + +### Options + + + + + + + + + + + + + + + + +
-h, --help
help for phase
+ + + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md index 468b1eb2ef8bc..8d14b464ba248 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md @@ -1,10 +1,9 @@ -Joins a machine as a control plane instance +Join a machine as a control plane instance ### Synopsis - -Joins a machine as a control plane instance +Join a machine as a control plane instance ``` kubeadm join phase control-plane-join [flags] diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md.orig new file mode 100644 index 0000000000000..8d14b464ba248 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md.orig @@ -0,0 +1,60 @@ + +Join a machine as a control plane instance + +### Synopsis + +Join a machine as a control plane instance + +``` +kubeadm join phase control-plane-join [flags] +``` + +### Examples + +``` + # Joins a machine as a control plane instance + kubeadm join phase control-plane-join all +``` + +### Options + + + + + + + + + + + + + + + + +
-h, --help
help for control-plane-join
+ + + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md index 78db629a16a17..c51823d743699 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md @@ -1,10 +1,9 @@ -Joins a machine as a control plane instance +Join a machine as a control plane instance ### Synopsis - -Joins a machine as a control plane instance +Join a machine as a control plane instance ``` kubeadm join phase control-plane-join all [flags] @@ -12,52 +11,14 @@ kubeadm join phase control-plane-join all [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--config string
Path to kubeadm config file.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for all
--node-name string
Specify the node name.
- - +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for all + --node-name string Specify the node name. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md.orig new file mode 100644 index 0000000000000..c51823d743699 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md.orig @@ -0,0 +1,43 @@ + +Join a machine as a control plane instance + +### Synopsis + +Join a machine as a control plane instance + +``` +kubeadm join phase control-plane-join all [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for all + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md index 29d0a33e761f7..efc20a4d07d7c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md @@ -12,52 +12,14 @@ kubeadm join phase control-plane-join etcd [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--config string
Path to kubeadm config file.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for etcd
--node-name string
Specify the node name.
- - +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for etcd + --node-name string Specify the node name. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md.orig new file mode 100644 index 0000000000000..efc20a4d07d7c --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md.orig @@ -0,0 +1,44 @@ + +Add a new local etcd member + +### Synopsis + + +Add a new local etcd member + +``` +kubeadm join phase control-plane-join etcd [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for etcd + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md index 920068acfe924..e2c6753083924 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md @@ -12,45 +12,13 @@ kubeadm join phase control-plane-join mark-control-plane [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to kubeadm config file.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for mark-control-plane
--node-name string
Specify the node name.
- - +``` + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for mark-control-plane + --node-name string Specify the node name. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md.orig new file mode 100644 index 0000000000000..e2c6753083924 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md.orig @@ -0,0 +1,43 @@ + +Mark a node as a control-plane + +### Synopsis + + +Mark a node as a control-plane + +``` +kubeadm join phase control-plane-join mark-control-plane [flags] +``` + +### Options + +``` + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for mark-control-plane + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md index 674f1b47ec2ee..34bb0f92b5a8e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md @@ -12,52 +12,14 @@ kubeadm join phase control-plane-join update-status [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--config string
Path to kubeadm config file.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for update-status
--node-name string
Specify the node name.
- - +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for update-status + --node-name string Specify the node name. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md.orig new file mode 100644 index 0000000000000..34bb0f92b5a8e --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md.orig @@ -0,0 +1,44 @@ + +Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap + +### Synopsis + + +Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap + +``` +kubeadm join phase control-plane-join update-status [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for update-status + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md index 4512d7b41acdf..7aecb0c8cce62 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md @@ -1,10 +1,9 @@ -Prepares the machine for serving a control plane. +Prepare the machine for serving a control plane ### Synopsis - -Prepares the machine for serving a control plane. +Prepare the machine for serving a control plane ``` kubeadm join phase control-plane-prepare [flags] diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md.orig new file mode 100644 index 0000000000000..7aecb0c8cce62 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md.orig @@ -0,0 +1,60 @@ + +Prepare the machine for serving a control plane + +### Synopsis + +Prepare the machine for serving a control plane + +``` +kubeadm join phase control-plane-prepare [flags] +``` + +### Examples + +``` + # Prepares the machine for serving a control plane + kubeadm join phase control-plane-prepare all +``` + +### Options + + + + + + + + + + + + + + + + +
-h, --help
help for control-plane-prepare
+ + + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md index 2f90d51092bd5..9ef35f347a30a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md @@ -1,10 +1,9 @@ -Prepares the machine for serving a control plane. +Prepare the machine for serving a control plane ### Synopsis - -Prepares the machine for serving a control plane. +Prepare the machine for serving a control plane ``` kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags] @@ -12,108 +11,22 @@ kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
If the node should host a new control plane instance, the port for the API Server to bind to.
--certificate-key string
Use this key to decrypt the certificate secrets uploaded by init.
--config string
Path to kubeadm config file.
--discovery-file string
For file-based discovery, a file or URL from which to load cluster information.
--discovery-token string
For token-based discovery, the token used to validate cluster information fetched from the API server.
--discovery-token-ca-cert-hash stringSlice
For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>").
--discovery-token-unsafe-skip-ca-verification
For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for all
--node-name string
Specify the node name.
--tls-bootstrap-token string
Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.
--token string
Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.
- - +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for all + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md.orig new file mode 100644 index 0000000000000..9ef35f347a30a --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md.orig @@ -0,0 +1,51 @@ + +Prepare the machine for serving a control plane + +### Synopsis + +Prepare the machine for serving a control plane + +``` +kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for all + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md index 3a0228912bdeb..e4199954e592c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md @@ -1,10 +1,9 @@ -Generates the certificates for the new control plane components +Generate the certificates for the new control plane components ### Synopsis - -Generates the certificates for the new control plane components +Generate the certificates for the new control plane components ``` kubeadm join phase control-plane-prepare certs [api-server-endpoint] [flags] @@ -12,94 +11,20 @@ kubeadm join phase control-plane-prepare certs [api-server-endpoint] [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--config string
Path to kubeadm config file.
--discovery-file string
For file-based discovery, a file or URL from which to load cluster information.
--discovery-token string
For token-based discovery, the token used to validate cluster information fetched from the API server.
--discovery-token-ca-cert-hash stringSlice
For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>").
--discovery-token-unsafe-skip-ca-verification
For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for certs
--node-name string
Specify the node name.
--tls-bootstrap-token string
Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.
--token string
Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.
- - +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for certs + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md.orig new file mode 100644 index 0000000000000..e4199954e592c --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md.orig @@ -0,0 +1,49 @@ + +Generate the certificates for the new control plane components + +### Synopsis + +Generate the certificates for the new control plane components + +``` +kubeadm join phase control-plane-prepare certs [api-server-endpoint] [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for certs + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md index 5952b70f8adcd..a0d501f530214 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md @@ -1,10 +1,9 @@ -Generates the manifests for the new control plane components +Generate the manifests for the new control plane components ### Synopsis - -Generates the manifests for the new control plane components +Generate the manifests for the new control plane components ``` kubeadm join phase control-plane-prepare control-plane [flags] @@ -12,52 +11,14 @@ kubeadm join phase control-plane-prepare control-plane [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
If the node should host a new control plane instance, the port for the API Server to bind to.
--config string
Path to kubeadm config file.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for control-plane
- - +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for control-plane +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md.orig new file mode 100644 index 0000000000000..a0d501f530214 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md.orig @@ -0,0 +1,43 @@ + +Generate the manifests for the new control plane components + +### Synopsis + +Generate the manifests for the new control plane components + +``` +kubeadm join phase control-plane-prepare control-plane [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for control-plane +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md index a250bc0f08e3b..2ca0ddcb1c41c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md @@ -1,10 +1,9 @@ -[EXPERIMENTAL] Downloads certificates shared among control-plane nodes from the kubeadm-certs Secret +[EXPERIMENTAL] Download certificates shared among control-plane nodes from the kubeadm-certs Secret ### Synopsis - -[EXPERIMENTAL] Downloads certificates shared among control-plane nodes from the kubeadm-certs Secret +[EXPERIMENTAL] Download certificates shared among control-plane nodes from the kubeadm-certs Secret ``` kubeadm join phase control-plane-prepare download-certs [api-server-endpoint] [flags] @@ -12,87 +11,19 @@ kubeadm join phase control-plane-prepare download-certs [api-server-endpoint] [f ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--certificate-key string
Use this key to decrypt the certificate secrets uploaded by init.
--config string
Path to kubeadm config file.
--discovery-file string
For file-based discovery, a file or URL from which to load cluster information.
--discovery-token string
For token-based discovery, the token used to validate cluster information fetched from the API server.
--discovery-token-ca-cert-hash stringSlice
For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>").
--discovery-token-unsafe-skip-ca-verification
For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for download-certs
--tls-bootstrap-token string
Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.
--token string
Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.
- - +``` + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for download-certs + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md.orig new file mode 100644 index 0000000000000..2ca0ddcb1c41c --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md.orig @@ -0,0 +1,48 @@ + +[EXPERIMENTAL] Download certificates shared among control-plane nodes from the kubeadm-certs Secret + +### Synopsis + +[EXPERIMENTAL] Download certificates shared among control-plane nodes from the kubeadm-certs Secret + +``` +kubeadm join phase control-plane-prepare download-certs [api-server-endpoint] [flags] +``` + +### Options + +``` + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for download-certs + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md index 94d0f5c8a8030..535e4eba2980c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md @@ -1,10 +1,9 @@ -Generates the kubeconfig for the new control plane components +Generate the kubeconfig for the new control plane components ### Synopsis - -Generates the kubeconfig for the new control plane components +Generate the kubeconfig for the new control plane components ``` kubeadm join phase control-plane-prepare kubeconfig [api-server-endpoint] [flags] @@ -12,87 +11,19 @@ kubeadm join phase control-plane-prepare kubeconfig [api-server-endpoint] [flags ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--certificate-key string
Use this key to decrypt the certificate secrets uploaded by init.
--config string
Path to kubeadm config file.
--discovery-file string
For file-based discovery, a file or URL from which to load cluster information.
--discovery-token string
For token-based discovery, the token used to validate cluster information fetched from the API server.
--discovery-token-ca-cert-hash stringSlice
For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>").
--discovery-token-unsafe-skip-ca-verification
For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for kubeconfig
--tls-bootstrap-token string
Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.
--token string
Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.
- - +``` + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for kubeconfig + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md.orig new file mode 100644 index 0000000000000..535e4eba2980c --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md.orig @@ -0,0 +1,48 @@ + +Generate the kubeconfig for the new control plane components + +### Synopsis + +Generate the kubeconfig for the new control plane components + +``` +kubeadm join phase control-plane-prepare kubeconfig [api-server-endpoint] [flags] +``` + +### Options + +``` + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for kubeconfig + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md index a533bf41543c5..68278bd32907d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md @@ -1,10 +1,9 @@ -Writes kubelet settings, certificates and (re)starts the kubelet +Write kubelet settings, certificates and (re)start the kubelet ### Synopsis - -Writes a file with KubeletConfiguration and an environment file with node specific kubelet settings, and then (re)starts kubelet. +Write a file with KubeletConfiguration and an environment file with node specific kubelet settings, and then (re)start kubelet. ``` kubeadm join phase kubelet-start [api-server-endpoint] [flags] diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md.orig new file mode 100644 index 0000000000000..68278bd32907d --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md.orig @@ -0,0 +1,116 @@ + +Write kubelet settings, certificates and (re)start the kubelet + +### Synopsis + +Write a file with KubeletConfiguration and an environment file with node specific kubelet settings, and then (re)start kubelet. + +``` +kubeadm join phase kubelet-start [api-server-endpoint] [flags] +``` + +### Options + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--config string
Path to kubeadm config file.
--cri-socket string
Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.
--discovery-file string
For file-based discovery, a file or URL from which to load cluster information.
--discovery-token string
For token-based discovery, the token used to validate cluster information fetched from the API server.
--discovery-token-ca-cert-hash stringSlice
For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>").
--discovery-token-unsafe-skip-ca-verification
For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.
-h, --help
help for kubelet-start
--node-name string
Specify the node name.
--tls-bootstrap-token string
Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.
--token string
Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.
+ + + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md index 09fbeb55a023e..5570c4f07b22c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md @@ -19,122 +19,24 @@ kubeadm join phase preflight [api-server-endpoint] [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--apiserver-advertise-address string
If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
--apiserver-bind-port int32     Default: 6443
If the node should host a new control plane instance, the port for the API Server to bind to.
--certificate-key string
Use this key to decrypt the certificate secrets uploaded by init.
--config string
Path to kubeadm config file.
--cri-socket string
Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.
--discovery-file string
For file-based discovery, a file or URL from which to load cluster information.
--discovery-token string
For token-based discovery, the token used to validate cluster information fetched from the API server.
--discovery-token-ca-cert-hash stringSlice
For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>").
--discovery-token-unsafe-skip-ca-verification
For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.
--experimental-control-plane
Create a new control plane instance on this node
-h, --help
help for preflight
--ignore-preflight-errors stringSlice
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.
--node-name string
Specify the node name.
--tls-bootstrap-token string
Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.
--token string
Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.
- - +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for preflight + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` ### Options inherited from parent commands diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md.orig new file mode 100644 index 0000000000000..5570c4f07b22c --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md.orig @@ -0,0 +1,61 @@ + +Run join pre-flight checks + +### Synopsis + + +Run pre-flight checks for kubeadm join. + +``` +kubeadm join phase preflight [api-server-endpoint] [flags] +``` + +### Examples + +``` + # Run join pre-flight checks using a config file. + kubeadm join phase preflight --config kubeadm-config.yml +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --control-plane Create a new control plane instance on this node + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for preflight + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + + + + + + + + + + + + + + + + +
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md index 2ec107141aed7..8ce1e6dfba8ab 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md @@ -1,10 +1,18 @@ -Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join'. +Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join' ### Synopsis +Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join' + +The "reset" command executes the following phases: +``` +preflight Run reset pre-flight checks +update-cluster-status Remove this node from the ClusterStatus object. +remove-etcd-member Remove a local etcd member. +cleanup-node Run cleanup node. +``` -Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join'. ``` kubeadm reset [flags] @@ -12,78 +20,19 @@ kubeadm reset [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path to the directory where the certificates are stored. If specified, clean this directory.
--cri-socket string
Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.
-f, --force
Reset the node without prompting for confirmation.
-h, --help
help for reset
--ignore-preflight-errors stringSlice
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
- - +``` + --cert-dir string The path to the directory where the certificates are stored. If specified, clean this directory. (default "/etc/kubernetes/pki") + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + -f, --force Reset the node without prompting for confirmation. + -h, --help help for reset + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --skip-phases strings List of phases to be skipped +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md.orig new file mode 100644 index 0000000000000..8ce1e6dfba8ab --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md.orig @@ -0,0 +1,38 @@ + +Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join' + +### Synopsis + +Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join' + +The "reset" command executes the following phases: +``` +preflight Run reset pre-flight checks +update-cluster-status Remove this node from the ClusterStatus object. +remove-etcd-member Remove a local etcd member. +cleanup-node Run cleanup node. +``` + + +``` +kubeadm reset [flags] +``` + +### Options + +``` + --cert-dir string The path to the directory where the certificates are stored. If specified, clean this directory. (default "/etc/kubernetes/pki") + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + -f, --force Reset the node without prompting for confirmation. + -h, --help help for reset + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --skip-phases strings List of phases to be skipped +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md new file mode 100644 index 0000000000000..cec28480c045f --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md @@ -0,0 +1,19 @@ + +Use this command to invoke single phase of the reset workflow + +### Synopsis + +Use this command to invoke single phase of the reset workflow + +### Options + +``` + -h, --help help for phase +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md new file mode 100644 index 0000000000000..2d7c4e99268ca --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md @@ -0,0 +1,25 @@ + +Run cleanup node. + +### Synopsis + +Run cleanup node. + +``` +kubeadm reset phase cleanup-node [flags] +``` + +### Options + +``` + --cert-dir string The path to the directory where the certificates are stored. If specified, clean this directory. (default "/etc/kubernetes/pki") + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + -h, --help help for cleanup-node +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md new file mode 100644 index 0000000000000..e40c9707123f7 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md @@ -0,0 +1,25 @@ + +Run reset pre-flight checks + +### Synopsis + +Run pre-flight checks for kubeadm reset. + +``` +kubeadm reset phase preflight [flags] +``` + +### Options + +``` + -f, --force Reset the node without prompting for confirmation. + -h, --help help for preflight + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md new file mode 100644 index 0000000000000..7642494a5bec2 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md @@ -0,0 +1,24 @@ + +Remove a local etcd member. + +### Synopsis + +Remove a local etcd member for a control plane node. + +``` +kubeadm reset phase remove-etcd-member [flags] +``` + +### Options + +``` + -h, --help help for remove-etcd-member + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md new file mode 100644 index 0000000000000..08b87e73ae125 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md @@ -0,0 +1,23 @@ + +Remove this node from the ClusterStatus object. + +### Synopsis + +Remove this node from the ClusterStatus object if the node is a control plane node. + +``` +kubeadm reset phase update-cluster-status [flags] +``` + +### Options + +``` + -h, --help help for update-cluster-status +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md index 2fe531f9e438e..6899b98296d1b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md @@ -1,10 +1,9 @@ -Manage bootstrap tokens. +Manage bootstrap tokens ### Synopsis - This command manages bootstrap tokens. It is optional and needed only for advanced use cases. In short, bootstrap tokens are used for establishing bidirectional trust between a client and a server. @@ -29,54 +28,17 @@ kubeadm token [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--dry-run
Whether to enable dry-run mode or not
-h, --help
help for token
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
- - +``` + --dry-run Whether to enable dry-run mode or not + -h, --help help for token + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` ### Options inherited from parent commands - - - - - - - - - - - - - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +```
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md.orig new file mode 100644 index 0000000000000..6899b98296d1b --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md.orig @@ -0,0 +1,47 @@ + +Manage bootstrap tokens + +### Synopsis + + +This command manages bootstrap tokens. It is optional and needed only for advanced use cases. + +In short, bootstrap tokens are used for establishing bidirectional trust between a client and a server. +A bootstrap token can be used when a client (for example a node that is about to join the cluster) needs +to trust the server it is talking to. Then a bootstrap token with the "signing" usage can be used. +bootstrap tokens can also function as a way to allow short-lived authentication to the API Server +(the token serves as a way for the API Server to trust the client), for example for doing the TLS Bootstrap. + +What is a bootstrap token more exactly? + - It is a Secret in the kube-system namespace of type "bootstrap.kubernetes.io/token". + - A bootstrap token must be of the form "[a-z0-9]{6}.[a-z0-9]{16}". The former part is the public token ID, + while the latter is the Token Secret and it must be kept private at all circumstances! + - The name of the Secret must be named "bootstrap-token-(token-id)". + +You can read more about bootstrap tokens here: + https://kubernetes.io/docs/admin/bootstrap-tokens/ + + +``` +kubeadm token [flags] +``` + +### Options + +``` + --dry-run Whether to enable dry-run mode or not + -h, --help help for token + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + + + + + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md index 637e42469f193..6c35a2166bc91 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md @@ -1,10 +1,9 @@ -Create bootstrap tokens on the server. +Create bootstrap tokens on the server ### Synopsis - This command will create a bootstrap token for you. You can specify the usages for this token, the "time to live" and an optional human friendly description. @@ -19,99 +18,21 @@ kubeadm token create [token] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
--description string
A human friendly description of how this token is used.
--groups stringSlice     Default: [system:bootstrappers:kubeadm:default-node-token]
Extra groups that this token will authenticate as when used for authentication. Must match "\\Asystem:bootstrappers:[a-z0-9:-]{0,255}[a-z0-9]\\z"
-h, --help
help for create
--print-join-command
Instead of printing only the token, print the full 'kubeadm join' flag needed to join the cluster using the token.
--ttl duration     Default: 24h0m0s
The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire
--usages stringSlice     Default: [signing,authentication]
Describes the ways in which this token can be used. You can pass --usages multiple times or provide a comma separated list of options. Valid options: [signing,authentication]
- - +``` + --config string Path to a kubeadm configuration file. + --description string A human friendly description of how this token is used. + --groups strings Extra groups that this token will authenticate as when used for authentication. Must match "\\Asystem:bootstrappers:[a-z0-9:-]{0,255}[a-z0-9]\\z" (default [system:bootstrappers:kubeadm:default-node-token]) + -h, --help help for create + --print-join-command Instead of printing only the token, print the full 'kubeadm join' flag needed to join the cluster using the token. + --ttl duration The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire (default 24h0m0s) + --usages strings Describes the ways in which this token can be used. You can pass --usages multiple times or provide a comma separated list of options. Valid options: [signing,authentication] (default [signing,authentication]) +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--dry-run
Whether to enable dry-run mode or not
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --dry-run Whether to enable dry-run mode or not + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md.orig new file mode 100644 index 0000000000000..6c35a2166bc91 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md.orig @@ -0,0 +1,38 @@ + +Create bootstrap tokens on the server + +### Synopsis + + +This command will create a bootstrap token for you. +You can specify the usages for this token, the "time to live" and an optional human friendly description. + +The [token] is the actual token to write. +This should be a securely generated random token of the form "[a-z0-9]{6}.[a-z0-9]{16}". +If no [token] is given, kubeadm will generate a random token instead. + + +``` +kubeadm token create [token] +``` + +### Options + +``` + --config string Path to a kubeadm configuration file. + --description string A human friendly description of how this token is used. + --groups strings Extra groups that this token will authenticate as when used for authentication. Must match "\\Asystem:bootstrappers:[a-z0-9:-]{0,255}[a-z0-9]\\z" (default [system:bootstrappers:kubeadm:default-node-token]) + -h, --help help for create + --print-join-command Instead of printing only the token, print the full 'kubeadm join' flag needed to join the cluster using the token. + --ttl duration The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire (default 24h0m0s) + --usages strings Describes the ways in which this token can be used. You can pass --usages multiple times or provide a comma separated list of options. Valid options: [signing,authentication] (default [signing,authentication]) +``` + +### Options inherited from parent commands + +``` + --dry-run Whether to enable dry-run mode or not + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md index 57f1fec727084..8dd1908b43ce2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md @@ -1,73 +1,30 @@ -Delete bootstrap tokens on the server. +Delete bootstrap tokens on the server ### Synopsis - -This command will delete a given bootstrap token for you. +This command will delete a list of bootstrap tokens for you. The [token-value] is the full Token of the form "[a-z0-9]{6}.[a-z0-9]{16}" or the Token ID of the form "[a-z0-9]{6}" to delete. ``` -kubeadm token delete [token-value] +kubeadm token delete [token-value] ... ``` ### Options - - - - - - - - - - - - - - - -
-h, --help
help for delete
- - +``` + -h, --help help for delete +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--dry-run
Whether to enable dry-run mode or not
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --dry-run Whether to enable dry-run mode or not + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md.orig new file mode 100644 index 0000000000000..8dd1908b43ce2 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md.orig @@ -0,0 +1,30 @@ + +Delete bootstrap tokens on the server + +### Synopsis + + +This command will delete a list of bootstrap tokens for you. + +The [token-value] is the full Token of the form "[a-z0-9]{6}.[a-z0-9]{16}" or the +Token ID of the form "[a-z0-9]{6}" to delete. + + +``` +kubeadm token delete [token-value] ... +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --dry-run Whether to enable dry-run mode or not + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md index 1ff495dda5460..a49dfb185810c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md @@ -1,10 +1,9 @@ -Generate and print a bootstrap token, but do not create it on the server. +Generate and print a bootstrap token, but do not create it on the server ### Synopsis - This command will print out a randomly-generated bootstrap token that can be used with the "init" and "join" commands. @@ -22,57 +21,15 @@ kubeadm token generate [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for generate
- - +``` + -h, --help help for generate +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--dry-run
Whether to enable dry-run mode or not
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --dry-run Whether to enable dry-run mode or not + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md.orig new file mode 100644 index 0000000000000..a49dfb185810c --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md.orig @@ -0,0 +1,35 @@ + +Generate and print a bootstrap token, but do not create it on the server + +### Synopsis + + +This command will print out a randomly-generated bootstrap token that can be used with +the "init" and "join" commands. + +You don't have to use this command in order to generate a token. You can do so +yourself as long as it is in the format "[a-z0-9]{6}.[a-z0-9]{16}". This +command is provided for convenience to generate tokens in the given format. + +You can also use "kubeadm init" without specifying a token and it will +generate and print one for you. + + +``` +kubeadm token generate [flags] +``` + +### Options + +``` + -h, --help help for generate +``` + +### Options inherited from parent commands + +``` + --dry-run Whether to enable dry-run mode or not + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md index bf2023fc45e66..b4d2699d13f6e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md @@ -1,10 +1,9 @@ -List bootstrap tokens on the server. +List bootstrap tokens on the server ### Synopsis - This command will list all bootstrap tokens for you. @@ -14,57 +13,15 @@ kubeadm token list [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for list
- - +``` + -h, --help help for list +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--dry-run
Whether to enable dry-run mode or not
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --dry-run Whether to enable dry-run mode or not + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md.orig new file mode 100644 index 0000000000000..b4d2699d13f6e --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md.orig @@ -0,0 +1,27 @@ + +List bootstrap tokens on the server + +### Synopsis + + +This command will list all bootstrap tokens for you. + + +``` +kubeadm token list [flags] +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --dry-run Whether to enable dry-run mode or not + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md index dd3d5f9d897ec..fd29befad9ea0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md @@ -1,10 +1,9 @@ -Upgrade your cluster smoothly to a newer version with this command. +Upgrade your cluster smoothly to a newer version with this command ### Synopsis - -Upgrade your cluster smoothly to a newer version with this command. +Upgrade your cluster smoothly to a newer version with this command ``` kubeadm upgrade [flags] @@ -12,43 +11,13 @@ kubeadm upgrade [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for upgrade
- - +``` + -h, --help help for upgrade +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md index 6cdbf8e7e4a10..7a0ad92572ac8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md @@ -1,10 +1,9 @@ -Upgrade your Kubernetes cluster to the specified version. +Upgrade your Kubernetes cluster to the specified version ### Synopsis - -Upgrade your Kubernetes cluster to the specified version. +Upgrade your Kubernetes cluster to the specified version ``` kubeadm upgrade apply [version] @@ -12,127 +11,26 @@ kubeadm upgrade apply [version] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--allow-experimental-upgrades
Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.
--allow-release-candidate-upgrades
Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.
--config string
Path to a kubeadm configuration file.
--dry-run
Do not change any state, just output what actions would be performed.
--etcd-upgrade     Default: true
Perform the upgrade of etcd.
--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
-f, --force
Force upgrading although some requirements might not be met. This also implies non-interactive mode.
-h, --help
help for apply
--ignore-preflight-errors stringSlice
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.
--image-pull-timeout duration     Default: 15m0s
The maximum amount of time to wait for the control plane pods to be downloaded.
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--print-config
Specifies whether the configuration file that will be used in the upgrade should be printed or not.
-y, --yes
Perform the upgrade and do not prompt for confirmation (non-interactive mode).
- - +``` + --allow-experimental-upgrades Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes. + --allow-release-candidate-upgrades Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes. + --certificate-renewal Perform the renewal of certificates used by component changed during upgrades. (default true) + --config string Path to a kubeadm configuration file. + --dry-run Do not change any state, just output what actions would be performed. + --etcd-upgrade Perform the upgrade of etcd. (default true) + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -f, --force Force upgrading although some requirements might not be met. This also implies non-interactive mode. + -h, --help help for apply + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --image-pull-timeout duration The maximum amount of time to wait for the control plane pods to be downloaded. (default 15m0s) + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --print-config Specifies whether the configuration file that will be used in the upgrade should be printed or not. + -y, --yes Perform the upgrade and do not prompt for confirmation (non-interactive mode). +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md.orig new file mode 100644 index 0000000000000..7a0ad92572ac8 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md.orig @@ -0,0 +1,36 @@ + +Upgrade your Kubernetes cluster to the specified version + +### Synopsis + +Upgrade your Kubernetes cluster to the specified version + +``` +kubeadm upgrade apply [version] +``` + +### Options + +``` + --allow-experimental-upgrades Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes. + --allow-release-candidate-upgrades Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes. + --certificate-renewal Perform the renewal of certificates used by component changed during upgrades. (default true) + --config string Path to a kubeadm configuration file. + --dry-run Do not change any state, just output what actions would be performed. + --etcd-upgrade Perform the upgrade of etcd. (default true) + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -f, --force Force upgrading although some requirements might not be met. This also implies non-interactive mode. + -h, --help help for apply + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --image-pull-timeout duration The maximum amount of time to wait for the control plane pods to be downloaded. (default 15m0s) + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --print-config Specifies whether the configuration file that will be used in the upgrade should be printed or not. + -y, --yes Perform the upgrade and do not prompt for confirmation (non-interactive mode). +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md index 0403034bdfaab..950b91d7eaca5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md @@ -3,7 +3,6 @@ Show what differences would be applied to existing static pod manifests. See als ### Synopsis - Show what differences would be applied to existing static pod manifests. See also: kubeadm upgrade apply --dry-run ``` @@ -12,78 +11,18 @@ kubeadm upgrade diff [version] [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--api-server-manifest string     Default: "/etc/kubernetes/manifests/kube-apiserver.yaml"
path to API server manifest
--config string
Path to a kubeadm configuration file.
-c, --context-lines int     Default: 3
How many lines of context in the diff
--controller-manager-manifest string     Default: "/etc/kubernetes/manifests/kube-controller-manager.yaml"
path to controller manifest
-h, --help
help for diff
--scheduler-manifest string     Default: "/etc/kubernetes/manifests/kube-scheduler.yaml"
path to scheduler manifest
- - +``` + --api-server-manifest string path to API server manifest (default "/etc/kubernetes/manifests/kube-apiserver.yaml") + --config string Path to a kubeadm configuration file. + -c, --context-lines int How many lines of context in the diff (default 3) + --controller-manager-manifest string path to controller manifest (default "/etc/kubernetes/manifests/kube-controller-manager.yaml") + -h, --help help for diff + --scheduler-manifest string path to scheduler manifest (default "/etc/kubernetes/manifests/kube-scheduler.yaml") +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md index 0c76e50de5b8a..0c58916493e76 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md @@ -1,10 +1,16 @@ -Upgrade commands for a node in the cluster. Currently only supports upgrading the configuration, not the kubelet itself. +Upgrade commands for a node in the cluster ### Synopsis +Upgrade commands for a node in the cluster + +The "node" command executes the following phases: +``` +control-plane Upgrade the control plane instance deployed on this node, if any +kubelet-config Upgrade the kubelet configuration for this node +``` -Upgrade commands for a node in the cluster. Currently only supports upgrading the configuration, not the kubelet itself. ``` kubeadm upgrade node [flags] @@ -12,43 +18,17 @@ kubeadm upgrade node [flags] ### Options - - - - - - - - - - - - - - - -
-h, --help
help for node
- - +``` + --dry-run Do not change any state, just output the actions that would be performed. + -h, --help help for node + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubelet-version string The *desired* version for the kubelet config after the upgrade. If not specified, the KubernetesVersion from the kubeadm-config ConfigMap will be used + --skip-phases strings List of phases to be skipped +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_config.md.orig similarity index 100% rename from content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_config.md rename to content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_config.md.orig diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_experimental-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_experimental-control-plane.md.orig similarity index 100% rename from content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_experimental-control-plane.md rename to content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_experimental-control-plane.md.orig diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md new file mode 100644 index 0000000000000..872e9f593d9ae --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md @@ -0,0 +1,19 @@ + +Use this command to invoke single phase of the node workflow + +### Synopsis + +Use this command to invoke single phase of the node workflow + +### Options + +``` + -h, --help help for phase +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md new file mode 100644 index 0000000000000..ed0d571997e00 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md @@ -0,0 +1,25 @@ + +Upgrade the control plane instance deployed on this node, if any + +### Synopsis + +Upgrade the control plane instance deployed on this node, if any + +``` +kubeadm upgrade node phase control-plane [flags] +``` + +### Options + +``` + --dry-run Do not change any state, just output the actions that would be performed. + -h, --help help for control-plane + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md new file mode 100644 index 0000000000000..dee8786af7632 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md @@ -0,0 +1,26 @@ + +Upgrade the kubelet configuration for this node + +### Synopsis + +Download the kubelet configuration from a ConfigMap of the form "kubelet-config-1.X" in the cluster, where X is the minor version of the kubelet. kubeadm uses the KuberneteVersion field in the kubeadm-config ConfigMap to determine what the desired kubelet version is, but the user can override this by using the --kubelet-version parameter. + +``` +kubeadm upgrade node phase kubelet-config [flags] +``` + +### Options + +``` + --dry-run Do not change any state, just output the actions that would be performed. + -h, --help help for kubelet-config + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --kubelet-version string The *desired* version for the kubelet config after the upgrade. If not specified, the KubernetesVersion from the kubeadm-config ConfigMap will be used +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md index 3c37a56b662cc..62d73bd99a9cf 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md @@ -1,10 +1,9 @@ -Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter. +Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter ### Synopsis - -Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter. +Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter ``` kubeadm upgrade plan [version] [flags] @@ -12,92 +11,20 @@ kubeadm upgrade plan [version] [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--allow-experimental-upgrades
Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.
--allow-release-candidate-upgrades
Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.
--config string
Path to a kubeadm configuration file.
--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
-h, --help
help for plan
--ignore-preflight-errors stringSlice
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--print-config
Specifies whether the configuration file that will be used in the upgrade should be printed or not.
- - +``` + --allow-experimental-upgrades Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes. + --allow-release-candidate-upgrades Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes. + --config string Path to a kubeadm configuration file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for plan + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --print-config Specifies whether the configuration file that will be used in the upgrade should be printed or not. +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md.orig b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md.orig new file mode 100644 index 0000000000000..62d73bd99a9cf --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md.orig @@ -0,0 +1,30 @@ + +Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter + +### Synopsis + +Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter + +``` +kubeadm upgrade plan [version] [flags] +``` + +### Options + +``` + --allow-experimental-upgrades Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes. + --allow-release-candidate-upgrades Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes. + --config string Path to a kubeadm configuration file. + --feature-gates string A set of key=value pairs that describe feature gates for various features. No feature gates are available in this release. + -h, --help help for plan + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --kubeconfig string The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. (default "/etc/kubernetes/admin.conf") + --print-config Specifies whether the configuration file that will be used in the upgrade should be printed or not. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md index 9e824f238c491..df927d0759627 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md @@ -3,7 +3,6 @@ Print the version of kubeadm ### Synopsis - Print the version of kubeadm ``` @@ -12,50 +11,14 @@ kubeadm version [flags] ### Options - - - - - - - - - - - - - - - - - - - - - - -
-h, --help
help for version
-o, --output string
Output format; available options are 'yaml', 'json' and 'short'
- - +``` + -h, --help help for version + -o, --output string Output format; available options are 'yaml', 'json' and 'short' +``` ### Options inherited from parent commands - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md index 067f3aa126cf2..1929de0015f7c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md +++ b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md @@ -465,26 +465,6 @@ Deploy the `kube-dns` Deployment and Service: - It's the upstream CoreDNS deployment relatively unmodified - The `kube-dns` ServiceAccount is bound to the privileges in the `system:kube-dns` ClusterRole -### Optional self-hosting - -To enable self hosting on an existing static Pod control-plane use `kubeadm alpha selfhosting pivot`. - -Self hosting basically replaces static Pods for control plane components with DaemonSets; this is achieved by executing -following procedure for API server, scheduler and controller manager static Pods: - -- Load the static Pod specification from disk -- Extract the PodSpec from static Pod manifest file -- Mutate the PodSpec to be compatible with self-hosting, and more in detail: - - Add node selector attribute targeting nodes with `node-role.kubernetes.io/master=""` label, - - Add a toleration for `node-role.kubernetes.io/master:NoSchedule` taint, - - Set `spec.DNSPolicy` to `ClusterFirstWithHostNet` -- Build a new DaemonSet object for the self-hosted component in question. Use the above mentioned PodSpec -- Create the DaemonSet resource in `kube-system` namespace. Wait until the Pods are running. -- Remove the static Pod manifest file. The kubelet will stop the original static Pod-hosted component that was running - -Please note that self hosting is not yet resilient to node restarts; this can be fixed with external checkpointing or with kubelet checkpointing - for the control plane Pods. See [self-hosting](/docs/reference/setup-tools/kubeadm/kubeadm-init/#self-hosting) for more details. - ## kubeadm join phases internal design Similarly to `kubeadm init`, also `kubeadm join` internal workflow consists of a sequence of atomic work tasks to perform. diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md index ad59320140614..35e38af3f723f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md @@ -14,19 +14,41 @@ weight: 90 ## kubeadm alpha certs renew {#cmd-certs-renew} You can renew all Kubernetes certificates using the `all` subcommand or renew them selectively. +For more details about certificate expiration and renewal see the [certificate management documentation](docs/tasks/administer-cluster/kubeadm/kubeadm-certs). {{< tabs name="tab-certs-renew" >}} {{< tab name="renew" include="generated/kubeadm_alpha_certs_renew.md" />}} {{< tab name="all" include="generated/kubeadm_alpha_certs_renew_all.md" />}} +{{< tab name="admin.conf" include="generated/kubeadm_alpha_certs_renew_admin.conf.md" />}} {{< tab name="apiserver-etcd-client" include="generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md" />}} {{< tab name="apiserver-kubelet-client" include="generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md" />}} {{< tab name="apiserver" include="generated/kubeadm_alpha_certs_renew_apiserver.md" />}} +{{< tab name="controller-manager.conf" include="generated/kubeadm_alpha_certs_renew_controller-manager.conf.md" />}} {{< tab name="etcd-healthcheck-client" include="generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md" />}} {{< tab name="etcd-peer" include="generated/kubeadm_alpha_certs_renew_etcd-peer.md" />}} {{< tab name="etcd-server" include="generated/kubeadm_alpha_certs_renew_etcd-server.md" />}} {{< tab name="front-proxy-client" include="generated/kubeadm_alpha_certs_renew_front-proxy-client.md" />}} +{{< tab name="scheduler.conf" include="generated/kubeadm_alpha_certs_renew_scheduler.conf.md" />}} {{< /tabs >}} +## kubeadm alpha certs certificate-key {#cmd-certs-certificate-key} + +This command can be used to generate a new control-plane certificate key. +The key can be passed as `--certificate-key` to `kubeadm init` and `kubeadm join` +to enable the automatic copy of certificates when joining additional control-plane nodes. + +{{< tabs name="tab-certs-certificate-key" >}} +{{< tab name="certificate-key" include="generated/kubeadm_alpha_certs_certificate-key.md" />}} +{{< /tabs >}} + +## kubeadm alpha certs check-expiration {#cmd-certs-check-expiration} + +This command checks expiration for the certificates in the local PKI managed by kubeadm. +For more details about certificate expiration and renewal see the [certificate management documentation](docs/tasks/administer-cluster/kubeadm/kubeadm-certs). + +{{< tabs name="tab-certs-check-expiration" >}} +{{< tab name="check-expiration" include="generated/kubeadm_alpha_certs_check-expiration.md" />}} +{{< /tabs >}} ## kubeadm alpha kubeconfig user {#cmd-phase-kubeconfig} @@ -51,7 +73,9 @@ to enable the DynamicKubeletConfiguration feature. ## kubeadm alpha selfhosting pivot {#cmd-selfhosting} -The subcommand `pivot` can be used to conver a static Pod-hosted control plane into a self-hosted one. +The subcommand `pivot` can be used to convert a static Pod-hosted control plane into a self-hosted one. + +[Documentation](/docs/setup/independent/self-hosting) {{< tabs name="selfhosting" >}} {{< tab name="selfhosting" include="generated/kubeadm_alpha_selfhosting.md" />}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md index e7ea1eaefe02d..b740d18e9cfd2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md @@ -28,12 +28,6 @@ has to be used. {{% /capture %}} {{% capture body %}} -## kubeadm config upload from-file {#cmd-config-from-file} -{{< include "generated/kubeadm_config_upload_from-file.md" >}} - -## kubeadm config upload from-flags {#cmd-config-from-flags} -{{< include "generated/kubeadm_config_upload_from-flags.md" >}} - ## kubeadm config view {#cmd-config-view} {{< include "generated/kubeadm_config_view.md" >}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md index b5644d854c50d..7cbbfcbddb1c9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md @@ -32,7 +32,7 @@ Can be used to create all required certificates by kubeadm. {{< tab name="etcd-server" include="generated/kubeadm_init_phase_certs_etcd-server.md" />}} {{< tab name="front-proxy-ca" include="generated/kubeadm_init_phase_certs_front-proxy-ca.md" />}} {{< tab name="front-proxy-client" include="generated/kubeadm_init_phase_certs_front-proxy-client.md" />}} -{{< tab name="certs_sa" include="generated/kubeadm_init_phase_certs_sa.md" />}} +{{< tab name="sa" include="generated/kubeadm_init_phase_certs_sa.md" />}} {{< /tabs >}} ## kubeadm init phase kubeconfig {#cmd-phase-kubeconfig} @@ -145,18 +145,18 @@ kubeadm config images list/pull --config=someconfig.yaml kubeadm upgrade apply --config=someconfig.yaml ``` -The file has to contain a [`DNS`](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1#DNS) field in[`ClusterConfiguration`](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1#ClusterConfiguration) +The file has to contain a [`DNS`](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#DNS) field in[`ClusterConfiguration`](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#ClusterConfiguration) and also a type for the addon - `kube-dns` (default value is `CoreDNS`). ```yaml -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration dns: type: "kube-dns" ``` -For more details on each field in the `v1beta1` configuration you can navigate to our -[API reference pages.] (https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1) +For more details on each field in the `v1beta2` configuration you can navigate to our +[API reference pages.] (https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) ## What's next * [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes control-plane node diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md index 7b473188ad7d4..e84c50c66e35b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -92,7 +92,7 @@ You can also use `--help` to see the list of sub-phases for a certain parent pha sudo kubeadm init phase control-plane --help ``` -`kubeadm init` also expose a flag called `--skip-phases` that can be used to skip certain phases. The flag accepts a list of phase names and the names can be taken from the above ordered list. +`kubeadm init` also exposes a flag called `--skip-phases` that can be used to skip certain phases. The flag accepts a list of phase names and the names can be taken from the above ordered list. An example: @@ -118,12 +118,11 @@ configuration file options. This file is passed in the `--config` option. In Kubernetes 1.11 and later, the default configuration can be printed out using the [kubeadm config print](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. -It is **recommended** that you migrate your old `v1alpha3` configuration to `v1beta1` using -the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command, -because `v1alpha3` will be removed in Kubernetes 1.15. +It is **recommended** that you migrate your old `v1beta1` configuration to `v1beta2` using +the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. -For more details on each field in the `v1beta1` configuration you can navigate to our -[API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1). +For more details on each field in the `v1beta2` configuration you can navigate to our +[API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2). ### Adding kube-proxy parameters {#kube-proxy} @@ -155,6 +154,30 @@ Allowed customization are: Please note that the configuration field `kubernetesVersion` or the command line flag `--kubernetes-version` affect the version of the images. +### Uploading control-plane certificates to the cluster + +By adding the flag `--upload-certs` to `kubeadm init` you can temporary upload +the control-plane certificates to a Secret in the cluster. Please note that this Secret +will expire automatically after 2 hours. The certificates are encrypted using +a 32byte key that can be specified using `--certificate-key`. The same key can be used +to download the certificates when additional control-plane nodes are joining, by passing +`--control-plane` and `--certificate-key` to `kubeadm join`. + +The following phase command can be used to re-upload the certificates after expiration: + +``` +kubeadm init phase upload-certs --upload-certs --certificate-key=SOME_VALUE +``` + +If the flag `--certificate-key` is not passed to `kubeadm init` and +`kubeadm init phase upload-certs` a new key will be generated automatically. + +The following command can be used to generate a new key on demand: + +``` +kubeadm alpha certs certificate-key +``` + ### Using custom certificates {#custom-certificates} By default, kubeadm generates all the certificates needed for a cluster to run. @@ -187,8 +210,51 @@ The kubeadm package ships with configuration for how the kubelet should be run. Note that the `kubeadm` CLI command never touches this drop-in file. This drop-in file belongs to the kubeadm deb/rpm package. -To find out more about how kubeadm manages the kubelet have a look at -[this page](/docs/setup/independent/kubelet-integration). +This is what it looks like: + + +``` +[Service] +Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf +--kubeconfig=/etc/kubernetes/kubelet.conf" +Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" +# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating +the KUBELET_KUBEADM_ARGS variable dynamically +EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env +# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, +#the user should use the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. +# KUBELET_EXTRA_ARGS should be sourced from this file. +EnvironmentFile=-/etc/default/kubelet +ExecStart= +ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS +``` + +Here's a breakdown of what/why: + +* `--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf` path to a kubeconfig + file that is used to get client certificates for kubelet during node join. + On success, a kubeconfig file is written to the path specified by `--kubeconfig`. +* `--kubeconfig=/etc/kubernetes/kubelet.conf` points to the kubeconfig file that + tells the kubelet where the API server is. This file also has the kubelet's + credentials. +* `--pod-manifest-path=/etc/kubernetes/manifests` specifies from where to read + static Pod manifests used for starting the control plane. +* `--allow-privileged=true` allows this kubelet to run privileged Pods. +* `--network-plugin=cni` uses CNI networking. +* `--cni-conf-dir=/etc/cni/net.d` specifies where to look for the + [CNI spec file(s)](https://github.com/containernetworking/cni/blob/master/SPEC.md). +* `--cni-bin-dir=/opt/cni/bin` specifies where to look for the actual CNI binaries. +* `--cluster-dns=10.96.0.10` use this cluster-internal DNS server for `nameserver` + entries in Pods' `/etc/resolv.conf`. +* `--cluster-domain=cluster.local` uses this cluster-internal DNS domain for + `search` entries in Pods' `/etc/resolv.conf`. +* `--client-ca-file=/etc/kubernetes/pki/ca.crt` authenticates requests to the Kubelet + API using this CA certificate. +* `--authorization-mode=Webhook` authorizes requests to the Kubelet API by `POST`-ing + a `SubjectAccessReview` to the API server. +* `--rotate-certificates` auto rotate the kubelet client certificates by requesting new + certificates from the `kube-apiserver` when the certificate expiration approaches. +* `--cert-dir`the directory where the TLS certs are located. ### Use kubeadm with CRI runtimes @@ -236,57 +302,6 @@ to the kubelet. Be aware that overriding the hostname can [interfere with cloud providers](https://github.com/kubernetes/website/pull/8873). -### Self-hosting the Kubernetes control plane {#self-hosting} - -As of 1.8, you can experimentally create a _self-hosted_ Kubernetes control -plane. This means that key components such as the API server, controller -manager, and scheduler run as [DaemonSet pods](/docs/concepts/workloads/controllers/daemonset/) -configured via the Kubernetes API instead of [static pods](/docs/tasks/administer-cluster/static-pod/) -configured in the kubelet via static files. - -To create a self-hosted cluster see the `kubeadm alpha selfhosting` command. - -#### Caveats - -1. Self-hosting in 1.8 and later has some important limitations. In particular, a - self-hosted cluster _cannot recover from a reboot of the control-plane node_ - without manual intervention. - -1. A self-hosted cluster is not upgradeable using `kubeadm upgrade`. - -1. By default, self-hosted control plane Pods rely on credentials loaded from - [`hostPath`](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) - volumes. Except for initial creation, these credentials are not managed by - kubeadm. - -1. The self-hosted portion of the control plane does not include etcd, - which still runs as a static Pod. - -#### Process - -The self-hosting bootstrap process is documented in the [kubeadm design -document](https://github.com/kubernetes/kubeadm/blob/master/docs/design/design_v1.9.md#optional-self-hosting). - -In summary, `kubeadm alpha selfhosting` works as follows: - - 1. Waits for this bootstrap static control plane to be running and - healthy. This is identical to the `kubeadm init` process without self-hosting. - - 1. Uses the static control plane Pod manifests to construct a set of - DaemonSet manifests that will run the self-hosted control plane. - It also modifies these manifests where necessary, for example adding new volumes - for secrets. - - 1. Creates DaemonSets in the `kube-system` namespace and waits for the - resulting Pods to be running. - - 1. Once self-hosted Pods are operational, their associated static Pods are deleted - and kubeadm moves on to install the next component. This triggers kubelet to - stop those static Pods. - - 1. When the original static control plane stops, the new self-hosted control - plane is able to bind to listening ports and become active. - ### Running kubeadm without an internet connection For running kubeadm without an internet connection you have to pre-pull the required control-plane images. @@ -318,9 +333,16 @@ know the IP address that the control-plane node will have after it is started. kubeadm token generate ``` -1. Start both the control-plane node and the worker nodes concurrently with this token. - As they come up they should find each other and form the cluster. The same - `--token` argument can be used on both `kubeadm init` and `kubeadm join`. +1. Start both the control-plane node and the worker nodes concurrently with this token. + As they come up they should find each other and form the cluster. The same + `--token` argument can be used on both `kubeadm init` and `kubeadm join`. + +1. Similar can be done for `--certificate-key` when joining additional control-plane + nodes. The key can be generated using: + + ```shell + kubeadm alpha certs certificate-key + ``` Once the cluster is up, you can grab the admin credentials from the control-plane node at `/etc/kubernetes/admin.conf` and use that to talk to the cluster. diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md.orig b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md.orig new file mode 100644 index 0000000000000..e84c50c66e35b --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md.orig @@ -0,0 +1,363 @@ +--- +reviewers: +- mikedanese +- luxas +- jbeda +title: kubeadm init +content_template: templates/concept +weight: 20 +--- +{{% capture overview %}} +This command initializes a Kubernetes control-plane node. +{{% /capture %}} + +{{% capture body %}} + +{{< include "generated/kubeadm_init.md" >}} + +### Init workflow {#init-workflow} +`kubeadm init` bootstraps a Kubernetes control-plane node by executing the +following steps: + +1. Runs a series of pre-flight checks to validate the system state + before making changes. Some checks only trigger warnings, others are + considered errors and will exit kubeadm until the problem is corrected or the + user specifies `--ignore-preflight-errors=`. + +1. Generates a self-signed CA (or using an existing one if provided) to set up + identities for each component in the cluster. If the user has provided their + own CA cert and/or key by dropping it in the cert directory configured via `--cert-dir` + (`/etc/kubernetes/pki` by default) this step is skipped as described in the + [Using custom certificates](#custom-certificates) document. + The APIServer certs will have additional SAN entries for any `--apiserver-cert-extra-sans` arguments, lowercased if necessary. + +1. Writes kubeconfig files in `/etc/kubernetes/` for + the kubelet, the controller-manager and the scheduler to use to connect to the + API server, each with its own identity, as well as an additional + kubeconfig file for administration named `admin.conf`. + +1. Generates static Pod manifests for the API server, + controller manager and scheduler. In case an external etcd is not provided, + an additional static Pod manifest is generated for etcd. + + Static Pod manifests are written to `/etc/kubernetes/manifests`; the kubelet + watches this directory for Pods to create on startup. + + Once control plane Pods are up and running, the `kubeadm init` sequence can continue. + +1. Apply labels and taints to the control-plane node so that no additional workloads will + run there. + +1. Generates the token that additional nodes can use to register + themselves with a control-plane in the future. Optionally, the user can provide a + token via `--token`, as described in the + [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token/) docs. + +1. Makes all the necessary configurations for allowing node joining with the + [Bootstrap Tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) and + [TLS Bootstrap](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) + mechanism: + + - Write a ConfigMap for making available all the information required + for joining, and set up related RBAC access rules. + + - Let Bootstrap Tokens access the CSR signing API. + + - Configure auto-approval for new CSR requests. + + See [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) for additional info. + +1. Installs a DNS server (CoreDNS) and the kube-proxy addon components via the API server. + In Kubernetes version 1.11 and later CoreDNS is the default DNS server. + To install kube-dns instead of CoreDNS, the DNS addon has to be configured in the kubeadm `ClusterConfiguration`. For more information about the configuration see the section + `Using kubeadm init with a configuration file` below. + Please note that although the DNS server is deployed, it will not be scheduled until CNI is installed. + +### Using init phases with kubeadm {#init-phases} + +Kubeadm allows you create a control-plane node in phases. In 1.13 the `kubeadm init phase` command has graduated to GA from it’s previous alpha state under `kubeadm alpha phase`. + +To view the ordered list of phases and sub-phases you can call `kubeadm init --help`. The list will be located at the top of the help screen and each phase will have a description next to it. +Note that by calling `kubeadm init` all of the phases and sub-phases will be executed in this exact order. + +Some phases have unique flags, so if you want to have a look at the list of available options add `--help`, for example: + +```shell +sudo kubeadm init phase control-plane controller-manager --help +``` + +You can also use `--help` to see the list of sub-phases for a certain parent phase: + +```shell +sudo kubeadm init phase control-plane --help +``` + +`kubeadm init` also exposes a flag called `--skip-phases` that can be used to skip certain phases. The flag accepts a list of phase names and the names can be taken from the above ordered list. + +An example: + +```shell +sudo kubeadm init phase control-plane all --config=configfile.yaml +sudo kubeadm init phase etcd local --config=configfile.yaml +# you can now modify the control plane and etcd manifest files +sudo kubeadm init --skip-phases=control-plane,etcd --config=configfile.yaml +``` + +What this example would do is write the manifest files for the control plane and etcd in `/etc/kubernetes/manifests` based on the configuration in `configfile.yaml`. This allows you to modify the files and then skip these phases using `--skip-phases`. By calling the last command you will create a control plane node with the custom manifest files. + +### Using kubeadm init with a configuration file {#config-file} + +{{< caution >}} +The config file is still considered beta and may change in future versions. +{{< /caution >}} + +It's possible to configure `kubeadm init` with a configuration file instead of command +line flags, and some more advanced features may only be available as +configuration file options. This file is passed in the `--config` option. + +In Kubernetes 1.11 and later, the default configuration can be printed out using the +[kubeadm config print](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. + +It is **recommended** that you migrate your old `v1beta1` configuration to `v1beta2` using +the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. + +For more details on each field in the `v1beta2` configuration you can navigate to our +[API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2). + +### Adding kube-proxy parameters {#kube-proxy} + +For information about kube-proxy parameters in the kubeadm configuration see: +- [kube-proxy](https://godoc.org/k8s.io/kubernetes/pkg/proxy/apis/config#KubeProxyConfiguration) + +For information about enabling IPVS mode with kubeadm see: +- [IPVS](https://github.com/kubernetes/kubernetes/blob/master/pkg/proxy/ipvs/README.md) + +### Passing custom flags to control plane components {#control-plane-flags} + +For information about passing flags to control plane components see: +- [control-plane-flags](/docs/setup/production-environment/tools/kubeadm/control-plane-flags/) + +### Using custom images {#custom-images} + +By default, kubeadm pulls images from `k8s.gcr.io`, unless +the requested Kubernetes version is a CI version. In this case, +`gcr.io/kubernetes-ci-images` is used. + +You can override this behavior by using [kubeadm with a configuration file](#config-file). +Allowed customization are: + +* To provide an alternative `imageRepository` to be used instead of + `k8s.gcr.io`. +* To set `useHyperKubeImage` to `true` to use the HyperKube image. +* To provide a specific `imageRepository` and `imageTag` for etcd or DNS add-on. + +Please note that the configuration field `kubernetesVersion` or the command line flag +`--kubernetes-version` affect the version of the images. + +### Uploading control-plane certificates to the cluster + +By adding the flag `--upload-certs` to `kubeadm init` you can temporary upload +the control-plane certificates to a Secret in the cluster. Please note that this Secret +will expire automatically after 2 hours. The certificates are encrypted using +a 32byte key that can be specified using `--certificate-key`. The same key can be used +to download the certificates when additional control-plane nodes are joining, by passing +`--control-plane` and `--certificate-key` to `kubeadm join`. + +The following phase command can be used to re-upload the certificates after expiration: + +``` +kubeadm init phase upload-certs --upload-certs --certificate-key=SOME_VALUE +``` + +If the flag `--certificate-key` is not passed to `kubeadm init` and +`kubeadm init phase upload-certs` a new key will be generated automatically. + +The following command can be used to generate a new key on demand: + +``` +kubeadm alpha certs certificate-key +``` + +### Using custom certificates {#custom-certificates} + +By default, kubeadm generates all the certificates needed for a cluster to run. +You can override this behavior by providing your own certificates. + +To do so, you must place them in whatever directory is specified by the +`--cert-dir` flag or `CertificatesDir` configuration file key. By default this +is `/etc/kubernetes/pki`. + +If a given certificate and private key pair exists, kubeadm skips the +generation step and existing files are used for the prescribed +use case. This means you can, for example, copy an existing CA into `/etc/kubernetes/pki/ca.crt` +and `/etc/kubernetes/pki/ca.key`, and kubeadm will use this CA for signing the rest +of the certs. + +#### External CA mode {#external-ca-mode} + +It is also possible to provide just the `ca.crt` file and not the +`ca.key` file (this is only available for the root CA file, not other cert pairs). +If all other certificates and kubeconfig files are in place, kubeadm recognizes +this condition and activates the "External CA" mode. kubeadm will proceed without the +CA key on disk. + +Instead, run the controller-manager standalone with `--controllers=csrsigner` and +point to the CA certificate and key. + +### Managing the kubeadm drop-in file for the kubelet {#kubelet-drop-in} + +The kubeadm package ships with configuration for how the kubelet should +be run. Note that the `kubeadm` CLI command never touches this drop-in file. +This drop-in file belongs to the kubeadm deb/rpm package. + +This is what it looks like: + + +``` +[Service] +Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf +--kubeconfig=/etc/kubernetes/kubelet.conf" +Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" +# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating +the KUBELET_KUBEADM_ARGS variable dynamically +EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env +# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, +#the user should use the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. +# KUBELET_EXTRA_ARGS should be sourced from this file. +EnvironmentFile=-/etc/default/kubelet +ExecStart= +ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS +``` + +Here's a breakdown of what/why: + +* `--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf` path to a kubeconfig + file that is used to get client certificates for kubelet during node join. + On success, a kubeconfig file is written to the path specified by `--kubeconfig`. +* `--kubeconfig=/etc/kubernetes/kubelet.conf` points to the kubeconfig file that + tells the kubelet where the API server is. This file also has the kubelet's + credentials. +* `--pod-manifest-path=/etc/kubernetes/manifests` specifies from where to read + static Pod manifests used for starting the control plane. +* `--allow-privileged=true` allows this kubelet to run privileged Pods. +* `--network-plugin=cni` uses CNI networking. +* `--cni-conf-dir=/etc/cni/net.d` specifies where to look for the + [CNI spec file(s)](https://github.com/containernetworking/cni/blob/master/SPEC.md). +* `--cni-bin-dir=/opt/cni/bin` specifies where to look for the actual CNI binaries. +* `--cluster-dns=10.96.0.10` use this cluster-internal DNS server for `nameserver` + entries in Pods' `/etc/resolv.conf`. +* `--cluster-domain=cluster.local` uses this cluster-internal DNS domain for + `search` entries in Pods' `/etc/resolv.conf`. +* `--client-ca-file=/etc/kubernetes/pki/ca.crt` authenticates requests to the Kubelet + API using this CA certificate. +* `--authorization-mode=Webhook` authorizes requests to the Kubelet API by `POST`-ing + a `SubjectAccessReview` to the API server. +* `--rotate-certificates` auto rotate the kubelet client certificates by requesting new + certificates from the `kube-apiserver` when the certificate expiration approaches. +* `--cert-dir`the directory where the TLS certs are located. + +### Use kubeadm with CRI runtimes + +Since v1.6.0, Kubernetes has enabled the use of CRI, Container Runtime Interface, by default. +The container runtime used by default is Docker, which is enabled through the built-in +`dockershim` CRI implementation inside of the `kubelet`. + +Other CRI-based runtimes include: + +- [cri-containerd](https://github.com/containerd/cri-containerd) +- [cri-o](https://github.com/kubernetes-incubator/cri-o) +- [frakti](https://github.com/kubernetes/frakti) +- [rkt](https://github.com/kubernetes-incubator/rktlet) + +Refer to the [CRI installation instructions](/docs/setup/cri) for more information. + +After you have successfully installed `kubeadm` and `kubelet`, execute +these two additional steps: + +1. Install the runtime shim on every node, following the installation + document in the runtime shim project listing above. + +1. Configure kubelet to use the remote CRI runtime. Please remember to change + `RUNTIME_ENDPOINT` to your own value like `/var/run/{your_runtime}.sock`: + +```shell +cat > /etc/systemd/system/kubelet.service.d/20-cri.conf <.<16 + character string>`. More formally, it must match the regex: + `[a-z0-9]{6}\.[a-z0-9]{16}`. + + kubeadm can generate a token for you: + + ```shell + kubeadm token generate + ``` + +1. Start both the control-plane node and the worker nodes concurrently with this token. + As they come up they should find each other and form the cluster. The same + `--token` argument can be used on both `kubeadm init` and `kubeadm join`. + +1. Similar can be done for `--certificate-key` when joining additional control-plane + nodes. The key can be generated using: + + ```shell + kubeadm alpha certs certificate-key + ``` + +Once the cluster is up, you can grab the admin credentials from the control-plane node +at `/etc/kubernetes/admin.conf` and use that to talk to the cluster. + +Note that this style of bootstrap has some relaxed security guarantees because +it does not allow the root CA hash to be validated with +`--discovery-token-ca-cert-hash` (since it's not generated when the nodes are +provisioned). For details, see the [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/). + +{{% /capture %}} + +{{% capture whatsnext %}} +* [kubeadm init phase](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/) to understand more about +`kubeadm init` phases +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to bootstrap a Kubernetes worker node and join it to the cluster +* [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade/) to upgrade a Kubernetes cluster to a newer version +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` +{{% /capture %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md index ca171f937a26f..8434a07206f3a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md @@ -98,9 +98,12 @@ kubeadm join --discovery-token abcdef.1234567890abcdef --discovery-token-ca-cert For control-plane nodes: ```shell -kubeadm join --discovery-token abcdef.1234567890abcdef --discovery-token-ca-cert-hash sha256:1234..cdef --experimental-control-plane 1.2.3.4:6443 +kubeadm join --discovery-token abcdef.1234567890abcdef --discovery-token-ca-cert-hash sha256:1234..cdef --control-plane 1.2.3.4:6443 ``` +You can also call `join` for a control-plane node with `--certificate-key` to copy certificates to this node, +if the `kubeadm init` command was called with `--upload-certs`. + **Advantages:** - Allows bootstrapping nodes to securely discover a root of trust for the diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset-phase.md new file mode 100644 index 0000000000000..6d17e73c09c42 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset-phase.md @@ -0,0 +1,53 @@ +--- +title: kubeadm reset phase +weight: 90 +--- +In v1.15.0, kubeadm introduces the `kubeadm reset phase` command with the aim of making kubeadm more modular. This modularity enables you to invoke atomic sub-steps of the reset process. +Hence, you can let kubeadm do some parts and fill in yourself where you need customizations. + +`kubeadm reset phase` is consistent with the [kubeadm reset workflow](/docs/reference/setup-tools/kubeadm/kubeadm-reset/#reset-workflow), +and behind the scene both use the same code. + +## kubeadm reset phase {#cmd-reset-phase} + +{{< tabs name="tab-phase" >}} +{{< tab name="phase" include="generated/kubeadm_reset_phase.md" />}} +{{< /tabs >}} + +## kubeadm reset phase preflight {#cmd-reset-phase-preflight} + +Using this phase you can execute preflight checks on a node that is being reset. + +{{< tabs name="tab-preflight" >}} +{{< tab name="preflight" include="generated/kubeadm_reset_phase_preflight.md" />}} +{{< /tabs >}} + +## kubeadm reset phase update-cluster-status {#cmd-reset-phase-update-cluster-status} + +Using this phase you can remove this control-plane node from the ClusterStatus object. + +{{< tabs name="tab-update-cluster-status" >}} +{{< tab name="update-cluster-status" include="generated/kubeadm_reset_phase_update-cluster-status.md" />}} +{{< /tabs >}} + +## kubeadm reset phase remove-etcd-member {#cmd-reset-phase-remove-etcd-member} + +Using this phase you can remove this control-plane node's etcd member from the etcd cluster. + +{{< tabs name="tab-remove-etcd-member" >}} +{{< tab name="remove-etcd-member" include="generated/kubeadm_reset_phase_remove-etcd-member.md" />}} +{{< /tabs >}} + +## kubeadm reset phase cleanup-node {#cmd-reset-phase-cleanup-node} + +Using this phase you can perform cleanup on this node. + +{{< tabs name="tab-cleanup-node" >}} +{{< tab name="cleanup-node" include="generated/kubeadm_reset_phase_cleanup-node.md" />}} +{{< /tabs >}} + +## What's next +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes control-plane node +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to connect a node to the cluster +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` +* [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/) to try experimental functionality diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md index 94ecaeb09805f..5847e6209dd58 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md @@ -14,7 +14,18 @@ This command reverts any changes made by `kubeadm init` or `kubeadm join`. {{% capture body %}} {{< include "generated/kubeadm_reset.md" >}} -### External etcd clean up! +### Reset workflow {#reset-workflow} + +`kubeadm reset` is responsible for cleaning up a node local file system from files that were created using +the `kubeadm init` or `kubeadm join` commands. For control-plane nodes `reset` also removes the local stacked +etcd member of this node from the etcd cluster and also removes this node's information from the kubeadm +`ClusterStatus` object. `ClusterStatus` is a kubeadm managed Kubernetes API object that holds a list of kube-apiserver endpoints. + +`kubeadm reset phase` can be used to execute the separate phases of the above workflow. +To skip a list of phases you can use the `--skip-phases` flag, which works in a similar way to +the `kubeadm join` and `kubeadm init` phase runners. + +### External etcd clean up `kubeadm reset` will not delete any etcd data if external etcd is used. This means that if you run `kubeadm init` again using the same etcd endpoints, you will see state from previous clusters. diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md new file mode 100644 index 0000000000000..6224a18e0e50e --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md @@ -0,0 +1,26 @@ +--- +title: kubeadm upgrade phase +weight: 90 +--- +In v1.15.0, kubeadm introduced preliminary support for `kubeadm upgrade node` phases. +Phases for other `kubeadm upgrade` sub-commands such as `apply`, could be added in the +following releases. + +## kubeadm upgrade node phase {#cmd-node-phase} + +Using this phase you can choose to execute the separate steps of the upgrade of +secondary control-plane or worker nodes. Please note that `kubeadm upgrade apply` still has to +be called on a primary control-plane node. + +{{< tabs name="tab-phase" >}} +{{< tab name="phase" include="generated/kubeadm_upgrade_node_phase.md" />}} +{{< tab name="control-plane" include="generated/kubeadm_upgrade_node_phase_control-plane.md" />}} +{{< tab name="kubelet-config" include="generated/kubeadm_upgrade_node_phase_kubelet-config.md" />}} +{{< /tabs >}} + +## What's next +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes control-plane node +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to connect a node to the cluster +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` +* [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade/) to upgrade a kubeadm node +* [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/) to try experimental functionality diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md index 2e9b9613e5dda..2f389b851ef0b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md @@ -14,15 +14,15 @@ cluster if necessary. {{% /capture %}} {{% capture body %}} + ## kubeadm upgrade guidance Every upgrade process might be a bit different, so we've documented each minor upgrade process individually. For more version-specific upgrade guidance, see the following resources: - * [1.10 to 1.11 upgrades](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-11/) - * [1.11 to 1.12 upgrades](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-12/) * [1.12 to 1.13 upgrades](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-13/) * [1.13 to 1.14 upgrades](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14/) + * [1.14 to 1.15 upgrades](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15/) _For older versions, please refer to older documentation sets on the Kubernetes website._ @@ -31,6 +31,11 @@ applied to static pod manifests. To use kube-dns with upgrades in Kubernetes v1.13.0 and later please follow [this guide](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon). +In Kubernetes v1.15.0 and later, `kubeadm upgrade apply` and `kubeadm upgrade node` will also +automatically renew the kubeadm managed certificates on this node, including those stored in kubeconfig files. +To opt-out, it is possible to pass the flag `--certificate-renewal=false`. For more details about certificate +renewal see the [certificate management documentation](docs/tasks/administer-cluster/kubeadm/kubeadm-certs). + ## kubeadm upgrade plan {#cmd-upgrade-plan} {{< include "generated/kubeadm_upgrade_plan.md" >}} @@ -40,11 +45,8 @@ To use kube-dns with upgrades in Kubernetes v1.13.0 and later please follow [thi ## kubeadm upgrade diff {#cmd-upgrade-diff} {{< include "generated/kubeadm_upgrade_diff.md" >}} -## kubeadm upgrade node config {#cmd-upgrade-node-config} -{{< include "generated/kubeadm_upgrade_node_config.md" >}} - -## kubeadm upgrade node experimental-control-plane {#cmd-experimental-control-plane} -{{< include "generated/kubeadm_upgrade_node_experimental-control-plane.md" >}} +## kubeadm upgrade node {#cmd-upgrade-node} +{{< include "generated/kubeadm_upgrade_node.md" >}} {{% /capture %}} diff --git a/content/en/docs/reference/using-api/api-concepts.md b/content/en/docs/reference/using-api/api-concepts.md index b9aaada90cca7..6d2c4b694eb62 100644 --- a/content/en/docs/reference/using-api/api-concepts.md +++ b/content/en/docs/reference/using-api/api-concepts.md @@ -87,6 +87,24 @@ For example: ... A given Kubernetes server will only preserve a historical list of changes for a limited time. Clusters using etcd3 preserve changes in the last 5 minutes by default. When the requested watch operations fail because the historical version of that resource is not available, clients must handle the case by recognizing the status code `410 Gone`, clearing their local cache, performing a list operation, and starting the watch from the `resourceVersion` returned by that new list operation. Most client libraries offer some form of standard tool for this logic. (In Go this is called a `Reflector` and is located in the `k8s.io/client-go/cache` package.) +To mitigate the impact of short history window, we introduced a concept of `bookmark` watch event. It is a special kind of event to pass an information that all changes up to a given `resourceVersion` client is requesting has already been send. Object returned in that event is of the type requested by the request, but only `resourceVersion` field is set, e.g.: + + GET /api/v1/namespaces/test/pods?watch=1&resourceVersion=10245&allowWatchBookmarks=true + --- + 200 OK + Transfer-Encoding: chunked + Content-Type: application/json + { + "type": "ADDED", + "object": {"kind": "Pod", "apiVersion": "v1", "metadata": {"resourceVersion": "10596", ...}, ...} + } + ... + { + "type": "BOOKMARK", + "object": {"kind": "Pod", "apiVersion": "v1", "metadata": {"resourceVersion": "12746"} } + } + +`Bookmark` events can be requested by `allowWatchBookmarks=true` option in watch requests, but clients shouldn't assume bookmarks are returned at any specific interval, nor may they assume the server will send any `bookmark` event. As of 1.15 release, it is an Alpha feature. ## Retrieving large results sets in chunks diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index ef4499e7679e8..7a03137669907 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -23,7 +23,7 @@ The `extraArgs` field consist of `key: value` pairs. To override a flag for a co 2. Add the flags to override to the field. For more details on each field in the configuration you can navigate to our -[API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1#ClusterConfiguration#ClusterConfiguration). +[API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#ClusterConfiguration). {{% /capture %}} @@ -35,7 +35,7 @@ For details, see the [reference documentation for kube-apiserver](/docs/referenc Example usage: ```yaml -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.13.0 metadata: @@ -54,7 +54,7 @@ For details, see the [reference documentation for kube-controller-manager](/docs Example usage: ```yaml -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.13.0 metadata: @@ -72,7 +72,7 @@ For details, see the [reference documentation for kube-scheduler](/docs/referenc Example usage: ```yaml -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.13.0 metadata: diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md.orig b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md.orig new file mode 100644 index 0000000000000..7a03137669907 --- /dev/null +++ b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md.orig @@ -0,0 +1,87 @@ +--- +reviewers: +- sig-cluster-lifecycle +title: Customizing control plane configuration with kubeadm +content_template: templates/concept +weight: 40 +--- + +{{% capture overview %}} + +{{< feature-state for_k8s_version="1.12" state="stable" >}} + +The kubeadm `ClusterConfiguration` object exposes the field `extraArgs` that can override the default flags passed to control plane +components such as the APIServer, ControllerManager and Scheduler. The components are defined using the following fields: + +- `apiServer` +- `controllerManager` +- `scheduler` + +The `extraArgs` field consist of `key: value` pairs. To override a flag for a control plane component: + +1. Add the appropriate fields to your configuration. +2. Add the flags to override to the field. + +For more details on each field in the configuration you can navigate to our +[API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#ClusterConfiguration). + +{{% /capture %}} + +{{% capture body %}} + +## APIServer flags + +For details, see the [reference documentation for kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/). + +Example usage: +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: v1.13.0 +metadata: + name: 1.13-sample +apiServer: + extraArgs: + advertise-address: 192.168.0.103 + anonymous-auth: false + enable-admission-plugins: AlwaysPullImages,DefaultStorageClass + audit-log-path: /home/johndoe/audit.log +``` + +## ControllerManager flags + +For details, see the [reference documentation for kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/). + +Example usage: +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: v1.13.0 +metadata: + name: 1.13-sample +controllerManager: + extraArgs: + cluster-signing-key-file: /home/johndoe/keys/ca.key + bind-address: 0.0.0.0 + deployment-controller-sync-period: 50 +``` + +## Scheduler flags + +For details, see the [reference documentation for kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/). + +Example usage: +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: v1.13.0 +metadata: + name: 1.13-sample +scheduler: + extraArgs: + address: 0.0.0.0 + config: /home/johndoe/schedconfig.yaml + kubeconfig: /home/johndoe/kubeconfig.yaml +``` + +{{% /capture %}} diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index c98c8a680cbae..eaacb748d74d2 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -1,7 +1,7 @@ --- reviewers: - sig-cluster-lifecycle -title: Creating a single master cluster with kubeadm +title: Creating a single control-plane cluster with kubeadm content_template: templates/task weight: 30 --- @@ -33,18 +33,17 @@ installing deb or rpm packages. The responsible SIG for kubeadm, but you may also build them from source for other OSes. -### kubeadm Maturity +### kubeadm maturity | Area | Maturity Level | |---------------------------|--------------- | | Command line UX | GA | | Implementation | GA | -| Config file API | beta | +| Config file API | Beta | | CoreDNS | GA | -| kubeadm alpha subcommands | alpha | -| High availability | alpha | -| DynamicKubeletConfig | alpha | -| Self-hosting | alpha | +| kubeadm alpha subcommands | Alpha | +| High availability | Beta | +| DynamicKubeletConfig | Alpha | kubeadm's overall feature state is **GA**. Some sub-features, like the configuration @@ -70,6 +69,8 @@ timeframe; which also applies to `kubeadm`. | v1.11.x | June 2018 | March 2019   | | v1.12.x | September 2018 | June 2019   | | v1.13.x | December 2018 | September 2019   | +| v1.14.x | March 2019 | December 2019   | +| v1.15.x | June 2019 | March 2020   | {{% /capture %}} @@ -78,7 +79,7 @@ timeframe; which also applies to `kubeadm`. - One or more machines running a deb/rpm-compatible OS, for example Ubuntu or CentOS - 2 GB or more of RAM per machine. Any less leaves little room for your apps. -- 2 CPUs or more on the master +- 2 CPUs or more on the control-plane node - Full network connectivity among all machines in the cluster. A public or private network is fine. @@ -107,9 +108,9 @@ kubeadm to tell it what to do. This crashloop is expected and normal. After you initialize your master, the kubelet runs normally. {{< /note >}} -### Initializing your master +### Initializing your control-plane node -The master is the machine where the control plane components run, including +The control-plane node is the machine where the control plane components run, including etcd (the cluster database) and the API server (which the kubectl CLI communicates with). @@ -240,8 +241,8 @@ export KUBECONFIG=/etc/kubernetes/admin.conf Make a record of the `kubeadm join` command that `kubeadm init` outputs. You need this command to [join nodes to your cluster](#join-nodes). -The token is used for mutual authentication between the master and the joining -nodes. The token included here is secret. Keep it safe, because anyone with this +The token is used for mutual authentication between the control-plane node and the joining +nodes. The token included here is secret. Keep it safe, because anyone with this token can add authenticated nodes to your cluster. These tokens can be listed, created, and deleted with the `kubeadm token` command. See the [kubeadm reference guide](/docs/reference/setup-tools/kubeadm/kubeadm-token/). @@ -430,8 +431,8 @@ If your network is not working or CoreDNS is not in the Running state, checkout ### Control plane node isolation -By default, your cluster will not schedule pods on the master for security -reasons. If you want to be able to schedule pods on the master, e.g. for a +By default, your cluster will not schedule pods on the control-plane node for security +reasons. If you want to be able to schedule pods on the control-plane node, e.g. for a single-machine Kubernetes cluster for development, run: ```bash @@ -447,7 +448,7 @@ taint "node-role.kubernetes.io/master:" not found ``` This will remove the `node-role.kubernetes.io/master` taint from any nodes that -have it, including the master node, meaning that the scheduler will then be able +have it, including the control-plane node, meaning that the scheduler will then be able to schedule pods everywhere. ### Joining your nodes {#join-nodes} @@ -462,7 +463,7 @@ The nodes are where your workloads (containers and pods, etc) run. To add new no kubeadm join --token : --discovery-token-ca-cert-hash sha256: ``` -If you do not have the token, you can get it by running the following command on the master node: +If you do not have the token, you can get it by running the following command on the control-plane node: ``` bash kubeadm token list @@ -479,7 +480,7 @@ TOKEN TTL EXPIRES USAGES DESCRIPTION ``` By default, tokens expire after 24 hours. If you are joining a node to the cluster after the current token has expired, -you can create a new token by running the following command on the master node: +you can create a new token by running the following command on the control-plane node: ``` bash kubeadm token create @@ -491,7 +492,7 @@ The output is similar to this: 5didvk.d09sbcov8ph2amjw ``` -If you don't have the value of `--discovery-token-ca-cert-hash`, you can get it by running the following command chain on the master node: +If you don't have the value of `--discovery-token-ca-cert-hash`, you can get it by running the following command chain on the control-plane node: ``` bash openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \ @@ -524,12 +525,12 @@ Run 'kubectl get nodes' on the master to see this machine join. ``` A few seconds later, you should notice this node in the output from `kubectl get -nodes` when run on the master. +nodes` when run on the control-plane node. -### (Optional) Controlling your cluster from machines other than the master +### (Optional) Controlling your cluster from machines other than the control-plane node In order to get a kubectl on some other computer (e.g. laptop) to talk to your -cluster, you need to copy the administrator kubeconfig file from your master +cluster, you need to copy the administrator kubeconfig file from your control-plane node to your workstation like this: ``` bash @@ -569,7 +570,7 @@ To undo what kubeadm did, you should first [drain the node](/docs/reference/generated/kubectl/kubectl-commands#drain) and make sure that the node is empty before shutting it down. -Talking to the master with the appropriate credentials, run: +Talking to the control-plane node with the appropriate credentials, run: ```bash kubectl drain --delete-local-data --force --ignore-daemonsets @@ -657,18 +658,17 @@ supports your chosen platform. ## Limitations {#limitations} -Please note: kubeadm is a work in progress and these limitations will be -addressed in due course. +The cluster created here has a single control-plane node, with a single etcd database +running on it. This means that if the control-plane node fails, your cluster may lose +data and may need to be recreated from scratch. -1. The cluster created here has a single master, with a single etcd database - running on it. This means that if the master fails, your cluster may lose - data and may need to be recreated from scratch. Adding HA support - (multiple etcd servers, multiple API servers, etc) to kubeadm is - still a work-in-progress. +Workarounds: - Workaround: regularly - [back up etcd](https://coreos.com/etcd/docs/latest/admin_guide.html). The - etcd data directory configured by kubeadm is at `/var/lib/etcd` on the master. +* Regularly [back up etcd](https://coreos.com/etcd/docs/latest/admin_guide.html). The + etcd data directory configured by kubeadm is at `/var/lib/etcd` on the control-plane node. + +* Use multiple control-plane nodes by completing the + [HA setup](/docs/setup/independent/ha-topology) instead. ## Troubleshooting {#troubleshooting} diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md.orig b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md.orig new file mode 100644 index 0000000000000..eaacb748d74d2 --- /dev/null +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md.orig @@ -0,0 +1,675 @@ +--- +reviewers: +- sig-cluster-lifecycle +title: Creating a single control-plane cluster with kubeadm +content_template: templates/task +weight: 30 +--- + +{{% capture overview %}} + +**kubeadm** helps you bootstrap a minimum viable Kubernetes cluster that conforms to best practices. With kubeadm, your cluster should pass [Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). Kubeadm also supports other cluster +lifecycle functions, such as upgrades, downgrade, and managing [bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/). + +Because you can install kubeadm on various types of machine (e.g. laptop, server, +Raspberry Pi, etc.), it's well suited for integration with provisioning systems +such as Terraform or Ansible. + +kubeadm's simplicity means it can serve a wide range of use cases: + +- New users can start with kubeadm to try Kubernetes out for the first time. +- Users familiar with Kubernetes can spin up clusters with kubeadm and test their applications. +- Larger projects can include kubeadm as a building block in a more complex system that can also include other installer tools. + +kubeadm is designed to be a simple way for new users to start trying +Kubernetes out, possibly for the first time, a way for existing users to +test their application on and stitch together a cluster easily, and also to be +a building block in other ecosystem and/or installer tool with a larger +scope. + +You can install _kubeadm_ very easily on operating systems that support +installing deb or rpm packages. The responsible SIG for kubeadm, +[SIG Cluster Lifecycle](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle), provides these packages pre-built for you, +but you may also build them from source for other OSes. + + +### kubeadm maturity + +| Area | Maturity Level | +|---------------------------|--------------- | +| Command line UX | GA | +| Implementation | GA | +| Config file API | Beta | +| CoreDNS | GA | +| kubeadm alpha subcommands | Alpha | +| High availability | Beta | +| DynamicKubeletConfig | Alpha | + + +kubeadm's overall feature state is **GA**. Some sub-features, like the configuration +file API are still under active development. The implementation of creating the cluster +may change slightly as the tool evolves, but the overall implementation should be pretty stable. +Any commands under `kubeadm alpha` are by definition, supported on an alpha level. + + +### Support timeframes + +Kubernetes releases are generally supported for nine months, and during that +period a patch release may be issued from the release branch if a severe bug or +security issue is found. Here are the latest Kubernetes releases and the support +timeframe; which also applies to `kubeadm`. + +| Kubernetes version | Release month | End-of-life-month | +|--------------------|----------------|-------------------| +| v1.6.x | March 2017 | December 2017 | +| v1.7.x | June 2017 | March 2018 | +| v1.8.x | September 2017 | June 2018 | +| v1.9.x | December 2017 | September 2018   | +| v1.10.x | March 2018 | December 2018   | +| v1.11.x | June 2018 | March 2019   | +| v1.12.x | September 2018 | June 2019   | +| v1.13.x | December 2018 | September 2019   | +| v1.14.x | March 2019 | December 2019   | +| v1.15.x | June 2019 | March 2020   | + +{{% /capture %}} + +{{% capture prerequisites %}} + +- One or more machines running a deb/rpm-compatible OS, for example Ubuntu or CentOS +- 2 GB or more of RAM per machine. Any less leaves little room for your + apps. +- 2 CPUs or more on the control-plane node +- Full network connectivity among all machines in the cluster. A public or + private network is fine. + +{{% /capture %}} + +{{% capture steps %}} + +## Objectives + +* Install a single master Kubernetes cluster or [high availability cluster](/docs/setup/production-environment/tools/kubeadm/high-availability/) +* Install a Pod network on the cluster so that your Pods can + talk to each other + +## Instructions + +### Installing kubeadm on your hosts + +See ["Installing kubeadm"](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). + +{{< note >}} +If you have already installed kubeadm, run `apt-get update && +apt-get upgrade` or `yum update` to get the latest version of kubeadm. + +When you upgrade, the kubelet restarts every few seconds as it waits in a crashloop for +kubeadm to tell it what to do. This crashloop is expected and normal. +After you initialize your master, the kubelet runs normally. +{{< /note >}} + +### Initializing your control-plane node + +The control-plane node is the machine where the control plane components run, including +etcd (the cluster database) and the API server (which the kubectl CLI +communicates with). + +1. Choose a pod network add-on, and verify whether it requires any arguments to +be passed to kubeadm initialization. Depending on which +third-party provider you choose, you might need to set the `--pod-network-cidr` to +a provider-specific value. See [Installing a pod network add-on](#pod-network). +1. (Optional) Since version 1.14, kubeadm will try to detect the container runtime on Linux +by using a list of well known domain socket paths. To use different container runtime or +if there are more than one installed on the provisioned node, specify the `--cri-socket` +argument to `kubeadm init`. See [Installing runtime](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime). +1. (Optional) Unless otherwise specified, kubeadm uses the network interface associated +with the default gateway to advertise the master's IP. To use a different +network interface, specify the `--apiserver-advertise-address=` argument +to `kubeadm init`. To deploy an IPv6 Kubernetes cluster using IPv6 addressing, you +must specify an IPv6 address, for example `--apiserver-advertise-address=fd00::101` +1. (Optional) Run `kubeadm config images pull` prior to `kubeadm init` to verify +connectivity to gcr.io registries. + +Now run: + +```bash +kubeadm init +``` + +### More information + +For more information about `kubeadm init` arguments, see the [kubeadm reference guide](/docs/reference/setup-tools/kubeadm/kubeadm/). + +For a complete list of configuration options, see the [configuration file documentation](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). + +To customize control plane components, including optional IPv6 assignment to liveness probe for control plane components and etcd server, provide extra arguments to each component as documented in [custom arguments](/docs/admin/kubeadm#custom-args). + +To run `kubeadm init` again, you must first [tear down the cluster](#tear-down). + +If you join a node with a different architecture to your cluster, create a separate +Deployment or DaemonSet for `kube-proxy` and `kube-dns` on the node. This is because the Docker images for these +components do not currently support multi-architecture. + +`kubeadm init` first runs a series of prechecks to ensure that the machine +is ready to run Kubernetes. These prechecks expose warnings and exit on errors. `kubeadm init` +then downloads and installs the cluster control plane components. This may take several minutes. +The output should look like: + +```none +[init] Using Kubernetes version: vX.Y.Z +[preflight] Running pre-flight checks +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Activating the kubelet service +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [kubeadm-master localhost] and IPs [10.138.0.4 127.0.0.1 ::1] +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [kubeadm-master localhost] and IPs [10.138.0.4 127.0.0.1 ::1] +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [kubeadm-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.138.0.4] +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 31.501735 seconds +[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-X.Y" in namespace kube-system with the configuration for the kubelets in the cluster +[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubeadm-master" as an annotation +[mark-control-plane] Marking the node kubeadm-master as control-plane by adding the label "node-role.kubernetes.io/master=''" +[mark-control-plane] Marking the node kubeadm-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + +Your Kubernetes master has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + /docs/concepts/cluster-administration/addons/ + +You can now join any number of machines by running the following on each node +as root: + + kubeadm join : --token --discovery-token-ca-cert-hash sha256: +``` + +To make kubectl work for your non-root user, run these commands, which are +also part of the `kubeadm init` output: + +```bash +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config +``` + +Alternatively, if you are the `root` user, you can run: + +```bash +export KUBECONFIG=/etc/kubernetes/admin.conf +``` + +Make a record of the `kubeadm join` command that `kubeadm init` outputs. You +need this command to [join nodes to your cluster](#join-nodes). + +The token is used for mutual authentication between the control-plane node and the joining +nodes. The token included here is secret. Keep it safe, because anyone with this +token can add authenticated nodes to your cluster. These tokens can be listed, +created, and deleted with the `kubeadm token` command. See the +[kubeadm reference guide](/docs/reference/setup-tools/kubeadm/kubeadm-token/). + +### Installing a pod network add-on {#pod-network} + +{{< caution >}} +This section contains important information about installation and deployment order. Read it carefully before proceeding. +{{< /caution >}} + +You must install a pod network add-on so that your pods can communicate with +each other. + +**The network must be deployed before any applications. Also, CoreDNS will not start up before a network is installed. +kubeadm only supports Container Network Interface (CNI) based networks (and does not support kubenet).** + +Several projects provide Kubernetes pod networks using CNI, some of which also +support [Network Policy](/docs/concepts/services-networking/networkpolicies/). See the [add-ons page](/docs/concepts/cluster-administration/addons/) for a complete list of available network add-ons. +- IPv6 support was added in [CNI v0.6.0](https://github.com/containernetworking/cni/releases/tag/v0.6.0). +- [CNI bridge](https://github.com/containernetworking/plugins/blob/master/plugins/main/bridge/README.md) and [local-ipam](https://github.com/containernetworking/plugins/blob/master/plugins/ipam/host-local/README.md) are the only supported IPv6 network plugins in Kubernetes version 1.9. + +Note that kubeadm sets up a more secure cluster by default and enforces use of [RBAC](/docs/reference/access-authn-authz/rbac/). +Make sure that your network manifest supports RBAC. + +Also, beware, that your Pod network must not overlap with any of the host networks as this can cause issues. +If you find a collision between your network plugin’s preferred Pod network and some of your host networks, you should think of a suitable CIDR replacement and use that during `kubeadm init` with `--pod-network-cidr` and as a replacement in your network plugin’s YAML. + +You can install a pod network add-on with the following command: + +```bash +kubectl apply -f +``` + +You can install only one pod network per cluster. + +{{< tabs name="tabs-pod-install" >}} +{{% tab name="Choose one..." %}} +Please select one of the tabs to see installation instructions for the respective third-party Pod Network Provider. +{{% /tab %}} + +{{% tab name="Calico" %}} +For more information about using Calico, see [Quickstart for Calico on Kubernetes](https://docs.projectcalico.org/latest/getting-started/kubernetes/), [Installing Calico for policy and networking](https://docs.projectcalico.org/latest/getting-started/kubernetes/installation/calico), and other related resources. + +For Calico to work correctly, you need to pass `--pod-network-cidr=192.168.0.0/16` to `kubeadm init` or update the `calico.yml` file to match your Pod network. Note that Calico works on `amd64`, `arm64`, and `ppc64le` only. + +```shell +kubectl apply -f https://docs.projectcalico.org/v3.7/manifests/calico.yaml +``` + +{{% /tab %}} +{{% tab name="Canal" %}} +Canal uses Calico for policy and Flannel for networking. Refer to the Calico documentation for the [official getting started guide](https://docs.projectcalico.org/latest/getting-started/kubernetes/installation/flannel). + +For Canal to work correctly, `--pod-network-cidr=10.244.0.0/16` has to be passed to `kubeadm init`. Note that Canal works on `amd64` only. + +```shell +kubectl apply -f https://docs.projectcalico.org/v3.7/manifests/canal.yaml +``` + +{{% /tab %}} + +{{% tab name="Cilium" %}} +For more information about using Cilium with Kubernetes, see [Kubernetes Install guide for Cilium](https://docs.cilium.io/en/stable/kubernetes/). + +For Cilium to work correctly, you must pass `--pod-network-cidr=10.217.0.0/16` to `kubeadm init`. + +These commands will deploy Cilium with its own etcd managed by etcd operator. + +_Note_: If you are running kubeadm in a single node please untaint it so that +etcd-operator pods can be scheduled in the control-plane node. + +```shell +kubectl taint nodes node-role.kubernetes.io/master:NoSchedule- +``` + +To deploy Cilium you just need to run: + +```shell +kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.5/examples/kubernetes/1.14/cilium.yaml +``` + +Once all Cilium pods are marked as `READY`, you start using your cluster. + +```shell +kubectl get pods -n kube-system --selector=k8s-app=cilium +``` +The output is similar to this: +``` +NAME READY STATUS RESTARTS AGE +cilium-drxkl 1/1 Running 0 18m +``` + +{{% /tab %}} +{{% tab name="Flannel" %}} + +For `flannel` to work correctly, you must pass `--pod-network-cidr=10.244.0.0/16` to `kubeadm init`. + +Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` +to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information +please see [here](/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). + +Make sure that your firewall rules allow UDP ports 8285 and 8472 traffic for all hosts participating in the overlay network. +see [here +](https://coreos.com/flannel/docs/latest/troubleshooting.html#firewalls). + +Note that `flannel` works on `amd64`, `arm`, `arm64`, `ppc64le` and `s390x` under Linux. +Windows (`amd64`) is claimed as supported in v0.11.0 but the usage is undocumented. + +```shell +kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/62e44c867a2846fefb68bd5f178daf4da3095ccb/Documentation/kube-flannel.yml +``` + +For more information about `flannel`, see [the CoreOS flannel repository on GitHub +](https://github.com/coreos/flannel). +{{% /tab %}} + +{{% tab name="Kube-router" %}} +Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` +to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information +please see [here](/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). + +Kube-router relies on kube-controller-manager to allocate pod CIDR for the nodes. Therefore, use `kubeadm init` with the `--pod-network-cidr` flag. + +Kube-router provides pod networking, network policy, and high-performing IP Virtual Server(IPVS)/Linux Virtual Server(LVS) based service proxy. + +For information on setting up Kubernetes cluster with Kube-router using kubeadm, please see official [setup guide](https://github.com/cloudnativelabs/kube-router/blob/master/docs/kubeadm.md). +{{% /tab %}} + +{{% tab name="Romana" %}} +Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` +to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information +please see [here](/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). + +The official Romana set-up guide is [here](https://github.com/romana/romana/tree/master/containerize#using-kubeadm). + +Romana works on `amd64` only. + +```shell +kubectl apply -f https://raw.githubusercontent.com/romana/romana/master/containerize/specs/romana-kubeadm.yml +``` +{{% /tab %}} + +{{% tab name="Weave Net" %}} +Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` +to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information +please see [here](/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). + +The official Weave Net set-up guide is [here](https://www.weave.works/docs/net/latest/kube-addon/). + +Weave Net works on `amd64`, `arm`, `arm64` and `ppc64le` without any extra action required. +Weave Net sets hairpin mode by default. This allows Pods to access themselves via their Service IP address +if they don't know their PodIP. + +```shell +kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" +``` +{{% /tab %}} + +{{% tab name="JuniperContrail/TungstenFabric" %}} +Provides overlay SDN solution, delivering multicloud networking, hybrid cloud networking, +simultaneous overlay-underlay support, network policy enforcement, network isolation, +service chaining and flexible load balancing. + +There are multiple, flexible ways to install JuniperContrail/TungstenFabric CNI. + +Kindly refer to this quickstart: [TungstenFabric](https://tungstenfabric.github.io/website/) +{{% /tab %}} + +{{% tab name="Contiv-VPP" %}} +[Contiv-VPP](https://contivpp.io/) employs a programmable CNF vSwitch based on [FD.io VPP](https://fd.io/), +offering feature-rich & high-performance cloud-native networking and services. + +It implements k8s services and network policies in the user space (on VPP). + +Please refer to this installation guide: [Contiv-VPP Manual Installation](https://github.com/contiv/vpp/blob/master/docs/setup/MANUAL_INSTALL.md) +{{% /tab %}} + +{{< /tabs >}} + + +Once a pod network has been installed, you can confirm that it is working by +checking that the CoreDNS pod is Running in the output of `kubectl get pods --all-namespaces`. +And once the CoreDNS pod is up and running, you can continue by joining your nodes. + +If your network is not working or CoreDNS is not in the Running state, checkout our [troubleshooting docs](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). + +### Control plane node isolation + +By default, your cluster will not schedule pods on the control-plane node for security +reasons. If you want to be able to schedule pods on the control-plane node, e.g. for a +single-machine Kubernetes cluster for development, run: + +```bash +kubectl taint nodes --all node-role.kubernetes.io/master- +``` + +With output looking something like: + +``` +node "test-01" untainted +taint "node-role.kubernetes.io/master:" not found +taint "node-role.kubernetes.io/master:" not found +``` + +This will remove the `node-role.kubernetes.io/master` taint from any nodes that +have it, including the control-plane node, meaning that the scheduler will then be able +to schedule pods everywhere. + +### Joining your nodes {#join-nodes} + +The nodes are where your workloads (containers and pods, etc) run. To add new nodes to your cluster do the following for each machine: + +* SSH to the machine +* Become root (e.g. `sudo su -`) +* Run the command that was output by `kubeadm init`. For example: + +``` bash +kubeadm join --token : --discovery-token-ca-cert-hash sha256: +``` + +If you do not have the token, you can get it by running the following command on the control-plane node: + +``` bash +kubeadm token list +``` + +The output is similar to this: + +``` console +TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS +8ewj1p.9r9hcjoqgajrj4gi 23h 2018-06-12T02:51:28Z authentication, The default bootstrap system: + signing token generated by bootstrappers: + 'kubeadm init'. kubeadm: + default-node-token +``` + +By default, tokens expire after 24 hours. If you are joining a node to the cluster after the current token has expired, +you can create a new token by running the following command on the control-plane node: + +``` bash +kubeadm token create +``` + +The output is similar to this: + +``` console +5didvk.d09sbcov8ph2amjw +``` + +If you don't have the value of `--discovery-token-ca-cert-hash`, you can get it by running the following command chain on the control-plane node: + +``` bash +openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \ + openssl dgst -sha256 -hex | sed 's/^.* //' +``` + +The output is similar to this: + +``` console +8cb2de97839780a412b93877f8507ad6c94f73add17d5d7058e91741c9d5ec78 +``` + +{{< note >}} +To specify an IPv6 tuple for `:`, IPv6 address must be enclosed in square brackets, for example: `[fd00::101]:2073`. +{{< /note >}} + +The output should look something like: + +``` +[preflight] Running pre-flight checks + +... (log output of join workflow) ... + +Node join complete: +* Certificate signing request sent to master and response + received. +* Kubelet informed of new secure connection details. + +Run 'kubectl get nodes' on the master to see this machine join. +``` + +A few seconds later, you should notice this node in the output from `kubectl get +nodes` when run on the control-plane node. + +### (Optional) Controlling your cluster from machines other than the control-plane node + +In order to get a kubectl on some other computer (e.g. laptop) to talk to your +cluster, you need to copy the administrator kubeconfig file from your control-plane node +to your workstation like this: + +``` bash +scp root@:/etc/kubernetes/admin.conf . +kubectl --kubeconfig ./admin.conf get nodes +``` + +{{< note >}} +The example above assumes SSH access is enabled for root. If that is not the +case, you can copy the `admin.conf` file to be accessible by some other user +and `scp` using that other user instead. + +The `admin.conf` file gives the user _superuser_ privileges over the cluster. +This file should be used sparingly. For normal users, it's recommended to +generate an unique credential to which you whitelist privileges. You can do +this with the `kubeadm alpha kubeconfig user --client-name ` +command. That command will print out a KubeConfig file to STDOUT which you +should save to a file and distribute to your user. After that, whitelist +privileges by using `kubectl create (cluster)rolebinding`. +{{< /note >}} + +### (Optional) Proxying API Server to localhost + +If you want to connect to the API Server from outside the cluster you can use +`kubectl proxy`: + +```bash +scp root@:/etc/kubernetes/admin.conf . +kubectl --kubeconfig ./admin.conf proxy +``` + +You can now access the API Server locally at `http://localhost:8001/api/v1` + +## Tear down {#tear-down} + +To undo what kubeadm did, you should first [drain the +node](/docs/reference/generated/kubectl/kubectl-commands#drain) and make +sure that the node is empty before shutting it down. + +Talking to the control-plane node with the appropriate credentials, run: + +```bash +kubectl drain --delete-local-data --force --ignore-daemonsets +kubectl delete node +``` + +Then, on the node being removed, reset all kubeadm installed state: + +```bash +kubeadm reset +``` + +The reset process does not reset or clean up iptables rules or IPVS tables. If you wish to reset iptables, you must do so manually: + +```bash +iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X +``` + +If you want to reset the IPVS tables, you must run the following command: + +```bash +ipvsadm -C +``` + +If you wish to start over simply run `kubeadm init` or `kubeadm join` with the +appropriate arguments. + +More options and information about the +[`kubeadm reset command`](/docs/reference/setup-tools/kubeadm/kubeadm-reset/). + +## Maintaining a cluster {#lifecycle} + +Instructions for maintaining kubeadm clusters (e.g. upgrades,downgrades, etc.) can be found [here.](/docs/tasks/administer-cluster/kubeadm) + +## Explore other add-ons {#other-addons} + +See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to explore other add-ons, +including tools for logging, monitoring, network policy, visualization & +control of your Kubernetes cluster. + +## What's next {#whats-next} + +* Verify that your cluster is running properly with [Sonobuoy](https://github.com/heptio/sonobuoy) +* Learn about kubeadm's advanced usage in the [kubeadm reference documentation](/docs/reference/setup-tools/kubeadm/kubeadm) +* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/user-guide/kubectl-overview/). +* Configure log rotation. You can use **logrotate** for that. When using Docker, you can specify log rotation options for Docker daemon, for example `--log-driver=json-file --log-opt=max-size=10m --log-opt=max-file=5`. See [Configure and troubleshoot the Docker daemon](https://docs.docker.com/engine/admin/) for more details. + +## Feedback {#feedback} + +* For bugs, visit [kubeadm GitHub issue tracker](https://github.com/kubernetes/kubeadm/issues) +* For support, visit kubeadm Slack Channel: + [#kubeadm](https://kubernetes.slack.com/messages/kubeadm/) +* General SIG Cluster Lifecycle Development Slack Channel: + [#sig-cluster-lifecycle](https://kubernetes.slack.com/messages/sig-cluster-lifecycle/) +* SIG Cluster Lifecycle [SIG information](#TODO) +* SIG Cluster Lifecycle Mailing List: + [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) + +## Version skew policy {#version-skew-policy} + +The kubeadm CLI tool of version vX.Y may deploy clusters with a control plane of version vX.Y or vX.(Y-1). +kubeadm CLI vX.Y can also upgrade an existing kubeadm-created cluster of version vX.(Y-1). + +Due to that we can't see into the future, kubeadm CLI vX.Y may or may not be able to deploy vX.(Y+1) clusters. + +Example: kubeadm v1.8 can deploy both v1.7 and v1.8 clusters and upgrade v1.7 kubeadm-created clusters to +v1.8. + +These resources provide more information on supported version skew between kubelets and the control plane, and other Kubernetes components: + +* Kubernetes [version and version-skew policy](/docs/setup/release/version-skew-policy/) +* Kubeadm-specific [installation guide](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl) + +## kubeadm works on multiple platforms {#multi-platform} + +kubeadm deb/rpm packages and binaries are built for amd64, arm (32-bit), arm64, ppc64le, and s390x +following the [multi-platform +proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/multi-platform.md). + +Multiplatform container images for the control plane and addons are also supported since v1.12. + +Only some of the network providers offer solutions for all platforms. Please consult the list of +network providers above or the documentation from each provider to figure out whether the provider +supports your chosen platform. + +## Limitations {#limitations} + +The cluster created here has a single control-plane node, with a single etcd database +running on it. This means that if the control-plane node fails, your cluster may lose +data and may need to be recreated from scratch. + +Workarounds: + +* Regularly [back up etcd](https://coreos.com/etcd/docs/latest/admin_guide.html). The + etcd data directory configured by kubeadm is at `/var/lib/etcd` on the control-plane node. + +* Use multiple control-plane nodes by completing the + [HA setup](/docs/setup/independent/ha-topology) instead. + +## Troubleshooting {#troubleshooting} + +If you are running into difficulties with kubeadm, please consult our [troubleshooting docs](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/ha-topology.md b/content/en/docs/setup/production-environment/tools/kubeadm/ha-topology.md index 472d5b0bc9509..23806e3d206fe 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/ha-topology.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/ha-topology.md @@ -43,7 +43,7 @@ plane instance are lost, and redundancy is compromised. You can mitigate this ri You should therefore run a minimum of three stacked control plane nodes for an HA cluster. This is the default topology in kubeadm. A local etcd member is created automatically -on control plane nodes when using `kubeadm init` and `kubeadm join --experimental-control-plane`. +on control plane nodes when using `kubeadm init` and `kubeadm join --control-plane`. ![Stacked etcd topology](/images/kubeadm/kubeadm-ha-topology-stacked-etcd.svg) diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md index 72119ed797dd1..ea63a381a606a 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md @@ -19,12 +19,10 @@ control plane nodes and etcd members are separated. Before proceeding, you should carefully consider which approach best meets the needs of your applications and environment. [This comparison topic](/docs/setup/production-environment/tools/kubeadm/ha-topology/) outlines the advantages and disadvantages of each. -You should also be aware that setting up HA clusters with kubeadm is still experimental and will be further -simplified in future versions. You might encounter issues with upgrading your clusters, for example. -We encourage you to try either approach, and provide us with feedback in the kubeadm -[issue tracker](https://github.com/kubernetes/kubeadm/issues/new). +If you encounter issues with setting up the HA cluster, please provide us with feedback +in the kubeadm [issue tracker](https://github.com/kubernetes/kubeadm/issues/new). -See also [The upgrade documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14). +See also [The upgrade documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15). {{< caution >}} This page does not address running your cluster on a cloud provider. In a cloud @@ -104,7 +102,7 @@ option. Your cluster requirements may need a different configuration. 1. On the first control plane node, create a configuration file called `kubeadm-config.yaml`: - apiVersion: kubeadm.k8s.io/v1beta1 + apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: stable controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" @@ -124,7 +122,7 @@ the `networking` object of `ClusterConfiguration`. 1. Initialize the control plane: ```sh - sudo kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs + sudo kubeadm init --config=kubeadm-config.yaml --upload-certs ``` - The `--experimental-upload-certs` flag is used to upload the certificates that should be shared across all the control-plane instances to the cluster. If instead, you prefer to copy certs across @@ -146,15 +144,27 @@ the `networking` object of `ClusterConfiguration`. ``` - Copy this output to a text file. You will need it later to join control plane and worker nodes to the cluster. - - When `--experimental-upload-certs` is used with `kubeadm init`, the certificates of the primary control plane + - When `--upload-certs` is used with `kubeadm init`, the certificates of the primary control plane are encrypted and uploaded in the `kubeadm-certs` Secret. - To re-upload the certificates and generate a new decryption key, use the following command on a control plane node that is already joined to the cluster: ```sh - sudo kubeadm init phase upload-certs --experimental-upload-certs + sudo kubeadm init phase upload-certs --upload-certs + ``` + + - You can also specify a custom `--certificate-key` during `init` that can later be used by `join`. + To generate such a key you can use the following command: + + ```sh + kubeadm alpha certs certificate-key ``` +{{< note >}} +The `kubeadm init` flags `--config` and `--certificate-key` cannot be mixed, therefore if you want +to use the [kubeadm configuration](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) you must add the `certificateKey` field in the appropriate config locations (under `InitConfiguration` and `JoinConfiguration: controlPlane`). +{{< /note >}} + {{< note >}} The `kubeadm-certs` Secret and decryption key expire after two hours. {{< /note >}} @@ -180,9 +190,11 @@ As stated in the command output, the certificate-key gives access to cluster sen ### Steps for the rest of the control plane nodes -{{< caution >}} -You must join new control plane nodes sequentially, only after the first node has finished initializing. -{{< /caution >}} +{{< note >}} +Since kubeadm version 1.15 you can join multiple control-plane nodes in parallel. +Prior to this version, you must join new control plane nodes sequentially, only after +the first node has finished initializing. +{{< /note >}} For each additional control plane node you should: @@ -190,10 +202,10 @@ For each additional control plane node you should: It should look something like this: ```sh - sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --experimental-control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 + sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 ``` - - The `--experimental-control-plane` flag tells `kubeadm join` to create a new control plane. + - The `--control-plane` flag tells `kubeadm join` to create a new control plane. - The `--certificate-key ...` will cause the control plane certificates to be downloaded from the `kubeadm-certs` Secret in the cluster and be decrypted using the given key. @@ -224,7 +236,7 @@ in the kubeadm config file. 1. Create a file called `kubeadm-config.yaml` with the following contents: - apiVersion: kubeadm.k8s.io/v1beta1 + apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: stable controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" @@ -254,7 +266,7 @@ etcd topology this is managed automatically. The following steps are exactly the same as described for stacked etcd setup: -1. Run `sudo kubeadm init --config kubeadm-config.yaml --experimental-upload-certs` on this node. +1. Run `sudo kubeadm init --config kubeadm-config.yaml --upload-certs` on this node. 1. Write the output join commands that are returned to a text file for later use. @@ -286,7 +298,7 @@ sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery ## Manual certificate distribution {#manual-certs} -If you choose to not use `kubeadm init` with the `--experimental-upload-certs` flag this means that +If you choose to not use `kubeadm init` with the `--upload-certs` flag this means that you are going to have to manually copy the certificates from the primary control plane node to the joining control plane nodes. diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md.orig b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md.orig new file mode 100644 index 0000000000000..ea63a381a606a --- /dev/null +++ b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md.orig @@ -0,0 +1,379 @@ +--- +reviewers: +- sig-cluster-lifecycle +title: Creating Highly Available clusters with kubeadm +content_template: templates/task +weight: 60 +--- + +{{% capture overview %}} + +This page explains two different approaches to setting up a highly available Kubernetes +cluster using kubeadm: + +- With stacked control plane nodes. This approach requires less infrastructure. The etcd members +and control plane nodes are co-located. +- With an external etcd cluster. This approach requires more infrastructure. The +control plane nodes and etcd members are separated. + +Before proceeding, you should carefully consider which approach best meets the needs of your applications +and environment. [This comparison topic](/docs/setup/production-environment/tools/kubeadm/ha-topology/) outlines the advantages and disadvantages of each. + +If you encounter issues with setting up the HA cluster, please provide us with feedback +in the kubeadm [issue tracker](https://github.com/kubernetes/kubeadm/issues/new). + +See also [The upgrade documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15). + +{{< caution >}} +This page does not address running your cluster on a cloud provider. In a cloud +environment, neither approach documented here works with Service objects of type +LoadBalancer, or with dynamic PersistentVolumes. +{{< /caution >}} + +{{% /capture %}} + +{{% capture prerequisites %}} + +For both methods you need this infrastructure: + +- Three machines that meet [kubeadm's minimum requirements](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) for + the masters +- Three machines that meet [kubeadm's minimum + requirements](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) for the workers +- Full network connectivity between all machines in the cluster (public or + private network) +- sudo privileges on all machines +- SSH access from one device to all nodes in the system +- `kubeadm` and `kubelet` installed on all machines. `kubectl` is optional. + +For the external etcd cluster only, you also need: + +- Three additional machines for etcd members + +{{% /capture %}} + +{{% capture steps %}} + +## First steps for both methods + +### Create load balancer for kube-apiserver + +{{< note >}} +There are many configurations for load balancers. The following example is only one +option. Your cluster requirements may need a different configuration. +{{< /note >}} + +1. Create a kube-apiserver load balancer with a name that resolves to DNS. + + - In a cloud environment you should place your control plane nodes behind a TCP + forwarding load balancer. This load balancer distributes traffic to all + healthy control plane nodes in its target list. The health check for + an apiserver is a TCP check on the port the kube-apiserver listens on + (default value `:6443`). + + - It is not recommended to use an IP address directly in a cloud environment. + + - The load balancer must be able to communicate with all control plane nodes + on the apiserver port. It must also allow incoming traffic on its + listening port. + + - [HAProxy](http://www.haproxy.org/) can be used as a load balancer. + + - Make sure the address of the load balancer always matches + the address of kubeadm's `ControlPlaneEndpoint`. + +1. Add the first control plane nodes to the load balancer and test the + connection: + + ```sh + nc -v LOAD_BALANCER_IP PORT + ``` + + - A connection refused error is expected because the apiserver is not yet + running. A timeout, however, means the load balancer cannot communicate + with the control plane node. If a timeout occurs, reconfigure the load + balancer to communicate with the control plane node. + +1. Add the remaining control plane nodes to the load balancer target group. + +## Stacked control plane and etcd nodes + +### Steps for the first control plane node + +1. On the first control plane node, create a configuration file called `kubeadm-config.yaml`: + + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + kubernetesVersion: stable + controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" + + - `kubernetesVersion` should be set to the Kubernetes version to use. This + example uses `stable`. + - `controlPlaneEndpoint` should match the address or DNS and port of the load balancer. + - It's recommended that the versions of kubeadm, kubelet, kubectl and Kubernetes match. + +{{< note >}} +Some CNI network plugins like Calico require a CIDR such as `192.168.0.0/16` and +some like Weave do not. See the [CNI network documentation](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network). +To add a pod CIDR set the `podSubnet: 192.168.0.0/16` field under +the `networking` object of `ClusterConfiguration`. +{{< /note >}} + +1. Initialize the control plane: + + ```sh + sudo kubeadm init --config=kubeadm-config.yaml --upload-certs + ``` + - The `--experimental-upload-certs` flag is used to upload the certificates that should be shared + across all the control-plane instances to the cluster. If instead, you prefer to copy certs across + control-plane nodes manually or using automation tools, please remove this flag and refer to [Manual + certificate distribution](#manual-certs) section bellow. + + After the command completes you should see something like so: + + ```sh + ... + You can now join any number of control-plane node by running the following command on each as a root: + kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --experimental-control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 + + Please note that the certificate-key gives access to cluster sensitive data, keep it secret! + As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use kubeadm init phase upload-certs to reload certs afterward. + + Then you can join any number of worker nodes by running the following on each as root: + kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 + ``` + + - Copy this output to a text file. You will need it later to join control plane and worker nodes to the cluster. + - When `--upload-certs` is used with `kubeadm init`, the certificates of the primary control plane + are encrypted and uploaded in the `kubeadm-certs` Secret. + - To re-upload the certificates and generate a new decryption key, use the following command on a control plane + node that is already joined to the cluster: + + ```sh + sudo kubeadm init phase upload-certs --upload-certs + ``` + + - You can also specify a custom `--certificate-key` during `init` that can later be used by `join`. + To generate such a key you can use the following command: + + ```sh + kubeadm alpha certs certificate-key + ``` + +{{< note >}} +The `kubeadm init` flags `--config` and `--certificate-key` cannot be mixed, therefore if you want +to use the [kubeadm configuration](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) you must add the `certificateKey` field in the appropriate config locations (under `InitConfiguration` and `JoinConfiguration: controlPlane`). +{{< /note >}} + +{{< note >}} +The `kubeadm-certs` Secret and decryption key expire after two hours. +{{< /note >}} + +{{< caution >}} +As stated in the command output, the certificate-key gives access to cluster sensitive data, keep it secret! +{{< /caution >}} + +1. Apply the CNI plugin of your choice: + [Follow these instructions](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network) to install the CNI provider. Make sure the configuration corresponds to the Pod CIDR specified in the kubeadm configuration file if applicable. + + In this example we are using Weave Net: + + ```sh + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + ``` + +1. Type the following and watch the pods of the control plane components get started: + + ```sh + kubectl get pod -n kube-system -w + ``` + +### Steps for the rest of the control plane nodes + +{{< note >}} +Since kubeadm version 1.15 you can join multiple control-plane nodes in parallel. +Prior to this version, you must join new control plane nodes sequentially, only after +the first node has finished initializing. +{{< /note >}} + +For each additional control plane node you should: + +1. Execute the join command that was previously given to you by the `kubeadm init` output on the first node. + It should look something like this: + + ```sh + sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 + ``` + + - The `--control-plane` flag tells `kubeadm join` to create a new control plane. + - The `--certificate-key ...` will cause the control plane certificates to be downloaded + from the `kubeadm-certs` Secret in the cluster and be decrypted using the given key. + +## External etcd nodes + +Setting up a cluster with external etcd nodes is similar to the procedure used for stacked etcd +with the exception that you should setup etcd first, and you should pass the etcd information +in the kubeadm config file. + +### Set up the etcd cluster + +1. Follow [these instructions](/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/) to set up the etcd cluster. + +1. Setup SSH as described [here](#manual-certs). + +1. Copy the following files from any etcd node in the cluster to the first control plane node: + + ```sh + export CONTROL_PLANE="ubuntu@10.0.0.7" + scp /etc/kubernetes/pki/etcd/ca.crt "${CONTROL_PLANE}": + scp /etc/kubernetes/pki/apiserver-etcd-client.crt "${CONTROL_PLANE}": + scp /etc/kubernetes/pki/apiserver-etcd-client.key "${CONTROL_PLANE}": + ``` + + - Replace the value of `CONTROL_PLANE` with the `user@host` of the first control plane machine. + +### Set up the first control plane node + +1. Create a file called `kubeadm-config.yaml` with the following contents: + + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + kubernetesVersion: stable + controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" + etcd: + external: + endpoints: + - https://ETCD_0_IP:2379 + - https://ETCD_1_IP:2379 + - https://ETCD_2_IP:2379 + caFile: /etc/kubernetes/pki/etcd/ca.crt + certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt + keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key + +{{< note >}} +The difference between stacked etcd and external etcd here is that we are using +the `external` field for `etcd` in the kubeadm config. In the case of the stacked +etcd topology this is managed automatically. +{{< /note >}} + + - Replace the following variables in the config template with the appropriate values for your cluster: + + - `LOAD_BALANCER_DNS` + - `LOAD_BALANCER_PORT` + - `ETCD_0_IP` + - `ETCD_1_IP` + - `ETCD_2_IP` + +The following steps are exactly the same as described for stacked etcd setup: + +1. Run `sudo kubeadm init --config kubeadm-config.yaml --upload-certs` on this node. + +1. Write the output join commands that are returned to a text file for later use. + +1. Apply the CNI plugin of your choice. The given example is for Weave Net: + + ```sh + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + ``` + +### Steps for the rest of the control plane nodes + +The steps are the same as for the stacked etcd setup: + +- Make sure the first control plane node is fully initialized. +- Join each control plane node with the join command you saved to a text file. It's recommended +to join the control plane nodes one at a time. +- Don't forget that the decryption key from `--certificate-key` expires after two hours, by default. + +## Common tasks after bootstrapping control plane + +### Install workers + +Worker nodes can be joined to the cluster with the command you stored previously +as the output from the `kubeadm init` command: + +```sh +sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 +``` + +## Manual certificate distribution {#manual-certs} + +If you choose to not use `kubeadm init` with the `--upload-certs` flag this means that +you are going to have to manually copy the certificates from the primary control plane node to the +joining control plane nodes. + +There are many ways to do this. In the following example we are using `ssh` and `scp`: + +SSH is required if you want to control all nodes from a single machine. + +1. Enable ssh-agent on your main device that has access to all other nodes in + the system: + + ``` + eval $(ssh-agent) + ``` + +1. Add your SSH identity to the session: + + ``` + ssh-add ~/.ssh/path_to_private_key + ``` + +1. SSH between nodes to check that the connection is working correctly. + + - When you SSH to any node, make sure to add the `-A` flag: + + ``` + ssh -A 10.0.0.7 + ``` + + - When using sudo on any node, make sure to preserve the environment so SSH + forwarding works: + + ``` + sudo -E -s + ``` + +1. After configuring SSH on all the nodes you should run the following script on the first control plane node after + running `kubeadm init`. This script will copy the certificates from the first control plane node to the other + control plane nodes: + + In the following example, replace `CONTROL_PLANE_IPS` with the IP addresses of the + other control plane nodes. + ```sh + USER=ubuntu # customizable + CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" + for host in ${CONTROL_PLANE_IPS}; do + scp /etc/kubernetes/pki/ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.pub "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt + scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key + done + ``` + +{{< caution >}} +Copy only the certificates in the above list. kubeadm will take care of generating the rest of the certificates +with the required SANs for the joining control-plane instances. If you copy all the certificates by mistake, +the creation of additional nodes could fail due to a lack of required SANs. +{{< /caution >}} + +1. Then on each joining control plane node you have to run the following script before running `kubeadm join`. + This script will move the previously copied certificates from the home directory to `/etc/kubernetes/pki`: + + ```sh + USER=ubuntu # customizable + mkdir -p /etc/kubernetes/pki/etcd + mv /home/${USER}/ca.crt /etc/kubernetes/pki/ + mv /home/${USER}/ca.key /etc/kubernetes/pki/ + mv /home/${USER}/sa.pub /etc/kubernetes/pki/ + mv /home/${USER}/sa.key /etc/kubernetes/pki/ + mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/ + mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/ + mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt + mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key + ``` +{{% /capture %}} diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index bf55fba35d3e1..b7315a74d76a2 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -53,7 +53,7 @@ route, we recommend you add IP route(s) so Kubernetes cluster addresses go via t ## Check required ports -### Master node(s) +### Control-plane node(s) | Protocol | Direction | Port Range | Purpose | Used By | |----------|-----------|------------|-------------------------|---------------------------| @@ -75,7 +75,7 @@ route, we recommend you add IP route(s) so Kubernetes cluster addresses go via t Any port numbers marked with * are overridable, so you will need to ensure any custom ports you provide are also open. -Although etcd ports are included in master nodes, you can also host your own +Although etcd ports are included in control-plane nodes, you can also host your own etcd cluster externally or on custom ports. The pod network plugin you use (see below) may also require certain ports to be @@ -202,7 +202,7 @@ systemctl enable --now kubelet Install CNI plugins (required for most pod network): ```bash -CNI_VERSION="v0.6.0" +CNI_VERSION="v0.7.5" mkdir -p /opt/cni/bin curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz ``` @@ -210,7 +210,7 @@ curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_ Install crictl (required for kubeadm / Kubelet Container Runtime Interface (CRI)) ```bash -CRICTL_VERSION="v1.11.1" +CRICTL_VERSION="v1.12.0" mkdir -p /opt/bin curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz ``` @@ -242,7 +242,7 @@ systemctl enable --now kubelet The kubelet is now restarting every few seconds, as it waits in a crashloop for kubeadm to tell it what to do. -## Configure cgroup driver used by kubelet on Master Node +## Configure cgroup driver used by kubelet on control-plane node When using Docker, kubeadm will automatically detect the cgroup driver for the kubelet and set it in the `/var/lib/kubelet/kubeadm-flags.env` file during runtime. @@ -267,6 +267,9 @@ systemctl daemon-reload systemctl restart kubelet ``` +The automatic detection of cgroup driver for other container runtimes +like CRI-O and containerd is work in progress. + ## Troubleshooting diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md.orig b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md.orig new file mode 100644 index 0000000000000..b7315a74d76a2 --- /dev/null +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md.orig @@ -0,0 +1,282 @@ +--- +title: Installing kubeadm +content_template: templates/task +weight: 10 +card: + name: setup + weight: 20 + title: Install the kubeadm setup tool +--- + +{{% capture overview %}} + +This page shows how to install the `kubeadm` toolbox. +For information how to create a cluster with kubeadm once you have performed this installation process, see the [Using kubeadm to Create a Cluster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) page. + +{{% /capture %}} + +{{% capture prerequisites %}} + +* One or more machines running one of: + - Ubuntu 16.04+ + - Debian 9 + - CentOS 7 + - RHEL 7 + - Fedora 25/26 (best-effort) + - HypriotOS v1.0.1+ + - Container Linux (tested with 1800.6.0) +* 2 GB or more of RAM per machine (any less will leave little room for your apps) +* 2 CPUs or more +* Full network connectivity between all machines in the cluster (public or private network is fine) +* Unique hostname, MAC address, and product_uuid for every node. See [here](#verify-the-mac-address-and-product-uuid-are-unique-for-every-node) for more details. +* Certain ports are open on your machines. See [here](#check-required-ports) for more details. +* Swap disabled. You **MUST** disable swap in order for the kubelet to work properly. + +{{% /capture %}} + +{{% capture steps %}} + +## Verify the MAC address and product_uuid are unique for every node + +* You can get the MAC address of the network interfaces using the command `ip link` or `ifconfig -a` +* The product_uuid can be checked by using the command `sudo cat /sys/class/dmi/id/product_uuid` + +It is very likely that hardware devices will have unique addresses, although some virtual machines may have +identical values. Kubernetes uses these values to uniquely identify the nodes in the cluster. +If these values are not unique to each node, the installation process +may [fail](https://github.com/kubernetes/kubeadm/issues/31). + +## Check network adapters + +If you have more than one network adapter, and your Kubernetes components are not reachable on the default +route, we recommend you add IP route(s) so Kubernetes cluster addresses go via the appropriate adapter. + +## Check required ports + +### Control-plane node(s) + +| Protocol | Direction | Port Range | Purpose | Used By | +|----------|-----------|------------|-------------------------|---------------------------| +| TCP | Inbound | 6443* | Kubernetes API server | All | +| TCP | Inbound | 2379-2380 | etcd server client API | kube-apiserver, etcd | +| TCP | Inbound | 10250 | Kubelet API | Self, Control plane | +| TCP | Inbound | 10251 | kube-scheduler | Self | +| TCP | Inbound | 10252 | kube-controller-manager | Self | + +### Worker node(s) + +| Protocol | Direction | Port Range | Purpose | Used By | +|----------|-----------|-------------|-----------------------|-------------------------| +| TCP | Inbound | 10250 | Kubelet API | Self, Control plane | +| TCP | Inbound | 30000-32767 | NodePort Services** | All | + +** Default port range for [NodePort Services](/docs/concepts/services-networking/service/). + +Any port numbers marked with * are overridable, so you will need to ensure any +custom ports you provide are also open. + +Although etcd ports are included in control-plane nodes, you can also host your own +etcd cluster externally or on custom ports. + +The pod network plugin you use (see below) may also require certain ports to be +open. Since this differs with each pod network plugin, please see the +documentation for the plugins about what port(s) those need. + +## Installing runtime {#installing-runtime} + +Since v1.6.0, Kubernetes has enabled the use of CRI, Container Runtime Interface, by default. + +Since v1.14.0, kubeadm will try to automatically detect the container runtime on Linux nodes +by scanning through a list of well known domain sockets. The detectable runtimes and the +socket paths, that are used, can be found in the table below. + +| Runtime | Domain Socket | +|------------|----------------------------------| +| Docker | /var/run/docker.sock | +| containerd | /run/containerd/containerd.sock | +| CRI-O | /var/run/crio/crio.sock | + +If both Docker and containerd are detected together, Docker takes precedence. This is +needed, because Docker 18.09 ships with containerd and both are detectable. +If any other two or more runtimes are detected, kubeadm will exit with an appropriate +error message. + +On non-Linux nodes the container runtime used by default is Docker. + +If the container runtime of choice is Docker, it is used through the built-in +`dockershim` CRI implementation inside of the `kubelet`. + +Other CRI-based runtimes include: + +- [containerd](https://github.com/containerd/cri) (CRI plugin built into containerd) +- [cri-o](https://github.com/kubernetes-incubator/cri-o) +- [frakti](https://github.com/kubernetes/frakti) + +Refer to the [CRI installation instructions](/docs/setup/cri) for more information. + +## Installing kubeadm, kubelet and kubectl + +You will install these packages on all of your machines: + +* `kubeadm`: the command to bootstrap the cluster. + +* `kubelet`: the component that runs on all of the machines in your cluster + and does things like starting pods and containers. + +* `kubectl`: the command line util to talk to your cluster. + +kubeadm **will not** install or manage `kubelet` or `kubectl` for you, so you will +need to ensure they match the version of the Kubernetes control plane you want +kubeadm to install for you. If you do not, there is a risk of a version skew occurring that +can lead to unexpected, buggy behaviour. However, _one_ minor version skew between the +kubelet and the control plane is supported, but the kubelet version may never exceed the API +server version. For example, kubelets running 1.7.0 should be fully compatible with a 1.8.0 API server, +but not vice versa. + +For information about installing `kubectl`, see [Install and set up kubectl](/docs/tasks/tools/install-kubectl/). + +{{< warning >}} +These instructions exclude all Kubernetes packages from any system upgrades. +This is because kubeadm and Kubernetes require +[special attention to upgrade](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14/). +{{}} + +For more information on version skews, see: + +* Kubernetes [version and version-skew policy](/docs/setup/release/version-skew-policy/) +* Kubeadm-specific [version skew policy](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#version-skew-policy) + +{{< tabs name="k8s_install" >}} +{{% tab name="Ubuntu, Debian or HypriotOS" %}} +```bash +apt-get update && apt-get install -y apt-transport-https curl +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - +cat </etc/apt/sources.list.d/kubernetes.list +deb https://apt.kubernetes.io/ kubernetes-xenial main +EOF +apt-get update +apt-get install -y kubelet kubeadm kubectl +apt-mark hold kubelet kubeadm kubectl +``` +{{% /tab %}} +{{% tab name="CentOS, RHEL or Fedora" %}} +```bash +cat < /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +exclude=kube* +EOF + +# Set SELinux in permissive mode (effectively disabling it) +setenforce 0 +sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config + +yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes + +systemctl enable --now kubelet +``` + + **Note:** + + - Setting SELinux in permissive mode by running `setenforce 0` and `sed ...` effectively disables it. + This is required to allow containers to access the host filesystem, which is needed by pod networks for example. + You have to do this until SELinux support is improved in the kubelet. + - Some users on RHEL/CentOS 7 have reported issues with traffic being routed incorrectly due to iptables being bypassed. You should ensure + `net.bridge.bridge-nf-call-iptables` is set to 1 in your `sysctl` config, e.g. + + ```bash + cat < /etc/sysctl.d/k8s.conf + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + EOF + sysctl --system + ``` + - Make sure that the `br_netfilter` module is loaded before this step. This can be done by running `lsmod | grep br_netfilter`. To load it explicitly call `modprobe br_netfilter`. +{{% /tab %}} +{{% tab name="Container Linux" %}} +Install CNI plugins (required for most pod network): + +```bash +CNI_VERSION="v0.7.5" +mkdir -p /opt/cni/bin +curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz +``` + +Install crictl (required for kubeadm / Kubelet Container Runtime Interface (CRI)) + +```bash +CRICTL_VERSION="v1.12.0" +mkdir -p /opt/bin +curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz +``` + +Install `kubeadm`, `kubelet`, `kubectl` and add a `kubelet` systemd service: + +```bash +RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" + +mkdir -p /opt/bin +cd /opt/bin +curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} +chmod +x {kubeadm,kubelet,kubectl} + +curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service +mkdir -p /etc/systemd/system/kubelet.service.d +curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +``` + +Enable and start `kubelet`: + +```bash +systemctl enable --now kubelet +``` +{{% /tab %}} +{{< /tabs >}} + + +The kubelet is now restarting every few seconds, as it waits in a crashloop for +kubeadm to tell it what to do. + +## Configure cgroup driver used by kubelet on control-plane node + +When using Docker, kubeadm will automatically detect the cgroup driver for the kubelet +and set it in the `/var/lib/kubelet/kubeadm-flags.env` file during runtime. + +If you are using a different CRI, you have to modify the file +`/etc/default/kubelet` with your `cgroup-driver` value, like so: + +```bash +KUBELET_EXTRA_ARGS=--cgroup-driver= +``` + +This file will be used by `kubeadm init` and `kubeadm join` to source extra +user defined arguments for the kubelet. + +Please mind, that you **only** have to do that if the cgroup driver of your CRI +is not `cgroupfs`, because that is the default value in the kubelet already. + +Restarting the kubelet is required: + +```bash +systemctl daemon-reload +systemctl restart kubelet +``` + +The automatic detection of cgroup driver for other container runtimes +like CRI-O and containerd is work in progress. + + +## Troubleshooting + +If you are running into difficulties with kubeadm, please consult our [troubleshooting docs](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). + +{{% capture whatsnext %}} + +* [Using kubeadm to Create a Cluster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) + +{{% /capture %}} diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md b/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md new file mode 100644 index 0000000000000..090b5efd5b590 --- /dev/null +++ b/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md @@ -0,0 +1,63 @@ +--- +reviewers: +- sig-cluster-lifecycle +title: Configuring your kubernetes cluster to self-host the control plane +content_template: templates/concept +weight: 100 +--- + +{{% capture overview %}} + +### Self-hosting the Kubernetes control plane {#self-hosting} + +As of 1.8, you can experimentally create a _self-hosted_ Kubernetes control +plane. This means that key components such as the API server, controller +manager, and scheduler run as [DaemonSet pods](/docs/concepts/workloads/controllers/daemonset/) +configured via the Kubernetes API instead of [static pods](/docs/tasks/administer-cluster/static-pod/) +configured in the kubelet via static files. + +To create a self-hosted cluster see the `kubeadm alpha selfhosting pivot` command. + +#### Caveats + +{{< caution >}} +This feature pivots your cluster into an unsupported state, rendering kubeadm unable +to manage you cluster any longer. This includes `kubeadm upagrade`. +{{< /caution >}} + +1. Self-hosting in 1.8 and later has some important limitations. In particular, a + self-hosted cluster _cannot recover from a reboot of the control-plane node_ + without manual intervention. + +1. By default, self-hosted control plane Pods rely on credentials loaded from + [`hostPath`](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) + volumes. Except for initial creation, these credentials are not managed by + kubeadm. + +1. The self-hosted portion of the control plane does not include etcd, + which still runs as a static Pod. + +#### Process + +The self-hosting bootstrap process is documented in the [kubeadm design +document](https://github.com/kubernetes/kubeadm/blob/master/docs/design/design_v1.9.md#optional-self-hosting). + +In summary, `kubeadm alpha selfhosting` works as follows: + + 1. Waits for this bootstrap static control plane to be running and + healthy. This is identical to the `kubeadm init` process without self-hosting. + + 1. Uses the static control plane Pod manifests to construct a set of + DaemonSet manifests that will run the self-hosted control plane. + It also modifies these manifests where necessary, for example adding new volumes + for secrets. + + 1. Creates DaemonSets in the `kube-system` namespace and waits for the + resulting Pods to be running. + + 1. Once self-hosted Pods are operational, their associated static Pods are deleted + and kubeadm moves on to install the next component. This triggers kubelet to + stop those static Pods. + + 1. When the original static control plane stops, the new self-hosted control + plane is able to bind to listening ports and become active. diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index a6ff15f52fe86..abe560e87f12a 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -82,7 +82,7 @@ this example. HOST=${ETCDHOSTS[$i]} NAME=${NAMES[$i]} cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml - apiVersion: "kubeadm.k8s.io/v1beta1" + apiVersion: "kubeadm.k8s.io/v1beta2" kind: ClusterConfiguration etcd: local: diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md.orig b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md.orig new file mode 100644 index 0000000000000..5cb4a4357074c --- /dev/null +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md.orig @@ -0,0 +1,321 @@ +--- +title: Troubleshooting kubeadm +content_template: templates/concept +weight: 20 +--- + +{{% capture overview %}} + +As with any program, you might run into an error installing or running kubeadm. +This page lists some common failure scenarios and have provided steps that can help you understand and fix the problem. + +If your problem is not listed below, please follow the following steps: + +- If you think your problem is a bug with kubeadm: + - Go to [github.com/kubernetes/kubeadm](https://github.com/kubernetes/kubeadm/issues) and search for existing issues. + - If no issue exists, please [open one](https://github.com/kubernetes/kubeadm/issues/new) and follow the issue template. + +- If you are unsure about how kubeadm works, you can ask on [Slack](http://slack.k8s.io/) in #kubeadm, or open a question on [StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes). Please include + relevant tags like `#kubernetes` and `#kubeadm` so folks can help you. + +{{% /capture %}} + +{{% capture body %}} + +## `ebtables` or some similar executable not found during installation + +If you see the following warnings while running `kubeadm init` + +```sh +[preflight] WARNING: ebtables not found in system path +[preflight] WARNING: ethtool not found in system path +``` + +Then you may be missing `ebtables`, `ethtool` or a similar executable on your node. You can install them with the following commands: + +- For Ubuntu/Debian users, run `apt install ebtables ethtool`. +- For CentOS/Fedora users, run `yum install ebtables ethtool`. + +## kubeadm blocks waiting for control plane during installation + +If you notice that `kubeadm init` hangs after printing out the following line: + +```sh +[apiclient] Created API client, waiting for the control plane to become ready +``` + +This may be caused by a number of problems. The most common are: + +- network connection problems. Check that your machine has full network connectivity before continuing. +- the default cgroup driver configuration for the kubelet differs from that used by Docker. + Check the system log file (e.g. `/var/log/message`) or examine the output from `journalctl -u kubelet`. If you see something like the following: + + ```shell + error: failed to run Kubelet: failed to create kubelet: + misconfiguration: kubelet cgroup driver: "systemd" is different from docker cgroup driver: "cgroupfs" + ``` + + There are two common ways to fix the cgroup driver problem: + + 1. Install Docker again following instructions + [here](/docs/setup/production-environment/container-runtimes/#docker). + + 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to + [Configure cgroup driver used by kubelet on Master Node](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-master-node) + +- control plane Docker containers are crashlooping or hanging. You can check this by running `docker ps` and investigating each container by running `docker logs`. + +## kubeadm blocks when removing managed containers + +The following could happen if Docker halts and does not remove any Kubernetes-managed containers: + +```bash +sudo kubeadm reset +[preflight] Running pre-flight checks +[reset] Stopping the kubelet service +[reset] Unmounting mounted directories in "/var/lib/kubelet" +[reset] Removing kubernetes-managed containers +(block) +``` + +A possible solution is to restart the Docker service and then re-run `kubeadm reset`: + +```bash +sudo systemctl restart docker.service +sudo kubeadm reset +``` + +Inspecting the logs for docker may also be useful: + +```sh +journalctl -ul docker +``` + +## Pods in `RunContainerError`, `CrashLoopBackOff` or `Error` state + +Right after `kubeadm init` there should not be any pods in these states. + +- If there are pods in one of these states _right after_ `kubeadm init`, please open an + issue in the kubeadm repo. `coredns` (or `kube-dns`) should be in the `Pending` state + until you have deployed the network solution. +- If you see Pods in the `RunContainerError`, `CrashLoopBackOff` or `Error` state + after deploying the network solution and nothing happens to `coredns` (or `kube-dns`), + it's very likely that the Pod Network solution that you installed is somehow broken. + You might have to grant it more RBAC privileges or use a newer version. Please file + an issue in the Pod Network providers' issue tracker and get the issue triaged there. +- If you install a version of Docker older than 1.12.1, remove the `MountFlags=slave` option + when booting `dockerd` with `systemd` and restart `docker`. You can see the MountFlags in `/usr/lib/systemd/system/docker.service`. + MountFlags can interfere with volumes mounted by Kubernetes, and put the Pods in `CrashLoopBackOff` state. + The error happens when Kubernetes does not find `var/run/secrets/kubernetes.io/serviceaccount` files. + +## `coredns` (or `kube-dns`) is stuck in the `Pending` state + +This is **expected** and part of the design. kubeadm is network provider-agnostic, so the admin +should [install the pod network solution](/docs/concepts/cluster-administration/addons/) +of choice. You have to install a Pod Network +before CoreDNS may be deployed fully. Hence the `Pending` state before the network is set up. + +## `HostPort` services do not work + +The `HostPort` and `HostIP` functionality is available depending on your Pod Network +provider. Please contact the author of the Pod Network solution to find out whether +`HostPort` and `HostIP` functionality are available. + +Calico, Canal, and Flannel CNI providers are verified to support HostPort. + +For more information, see the [CNI portmap documentation](https://github.com/containernetworking/plugins/blob/master/plugins/meta/portmap/README.md). + +If your network provider does not support the portmap CNI plugin, you may need to use the [NodePort feature of +services](/docs/concepts/services-networking/service/#nodeport) or use `HostNetwork=true`. + +## Pods are not accessible via their Service IP + +- Many network add-ons do not yet enable [hairpin mode](/docs/tasks/debug-application-cluster/debug-service/#a-pod-cannot-reach-itself-via-service-ip) + which allows pods to access themselves via their Service IP. This is an issue related to + [CNI](https://github.com/containernetworking/cni/issues/476). Please contact the network + add-on provider to get the latest status of their support for hairpin mode. + +- If you are using VirtualBox (directly or via Vagrant), you will need to + ensure that `hostname -i` returns a routable IP address. By default the first + interface is connected to a non-routable host-only network. A work around + is to modify `/etc/hosts`, see this [Vagrantfile](https://github.com/errordeveloper/k8s-playground/blob/22dd39dfc06111235620e6c4404a96ae146f26fd/Vagrantfile#L11) + for an example. + +## TLS certificate errors + +The following error indicates a possible certificate mismatch. + +```none +# kubectl get pods +Unable to connect to the server: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes") +``` + +- Verify that the `$HOME/.kube/config` file contains a valid certificate, and + regenerate a certificate if necessary. The certificates in a kubeconfig file + are base64 encoded. The `base64 -d` command can be used to decode the certificate + and `openssl x509 -text -noout` can be used for viewing the certificate information. +- Unset the `KUBECONFIG` environment variable using: + + ```sh + unset KUBECONFIG + ``` + + Or set it to the default `KUBECONFIG` location: + + ```sh + export KUBECONFIG=/etc/kubernetes/admin.conf + ``` + +- Another workaround is to overwrite the existing `kubeconfig` for the "admin" user: + + ```sh + mv $HOME/.kube $HOME/.kube.bak + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + ``` + +## Default NIC When using flannel as the pod network in Vagrant + +The following error might indicate that something was wrong in the pod network: + +```sh +Error from server (NotFound): the server could not find the requested resource +``` + +- If you're using flannel as the pod network inside Vagrant, then you will have to specify the default interface name for flannel. + + Vagrant typically assigns two interfaces to all VMs. The first, for which all hosts are assigned the IP address `10.0.2.15`, is for external traffic that gets NATed. + + This may lead to problems with flannel, which defaults to the first interface on a host. This leads to all hosts thinking they have the same public IP address. To prevent this, pass the `--iface eth1` flag to flannel so that the second interface is chosen. + +## Non-public IP used for containers + +In some situations `kubectl logs` and `kubectl run` commands may return with the following errors in an otherwise functional cluster: + +```sh +Error from server: Get https://10.19.0.41:10250/containerLogs/default/mysql-ddc65b868-glc5m/mysql: dial tcp 10.19.0.41:10250: getsockopt: no route to host +``` + +- This may be due to Kubernetes using an IP that can not communicate with other IPs on the seemingly same subnet, possibly by policy of the machine provider. +- Digital Ocean assigns a public IP to `eth0` as well as a private one to be used internally as anchor for their floating IP feature, yet `kubelet` will pick the latter as the node's `InternalIP` instead of the public one. + + Use `ip addr show` to check for this scenario instead of `ifconfig` because `ifconfig` will not display the offending alias IP address. Alternatively an API endpoint specific to Digital Ocean allows to query for the anchor IP from the droplet: + + ```sh + curl http://169.254.169.254/metadata/v1/interfaces/public/0/anchor_ipv4/address + ``` + + The workaround is to tell `kubelet` which IP to use using `--node-ip`. When using Digital Ocean, it can be the public one (assigned to `eth0`) or the private one (assigned to `eth1`) should you want to use the optional private network. The [`KubeletExtraArgs` section of the kubeadm `NodeRegistrationOptions` structure](https://github.com/kubernetes/kubernetes/blob/release-1.13/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go) can be used for this. + + Then restart `kubelet`: + + ```sh + systemctl daemon-reload + systemctl restart kubelet + ``` + +## `coredns` pods have `CrashLoopBackOff` or `Error` state + +If you have nodes that are running SELinux with an older version of Docker you might experience a scenario +where the `coredns` pods are not starting. To solve that you can try one of the following options: + +- Upgrade to a [newer version of Docker](/docs/setup/production-environment/container-runtimes/#docker). + +- [Disable SELinux](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/security-enhanced_linux/sect-security-enhanced_linux-enabling_and_disabling_selinux-disabling_selinux). +- Modify the `coredns` deployment to set `allowPrivilegeEscalation` to `true`: + +```bash +kubectl -n kube-system get deployment coredns -o yaml | \ + sed 's/allowPrivilegeEscalation: false/allowPrivilegeEscalation: true/g' | \ + kubectl apply -f - +``` + +Another cause for CoreDNS to have `CrashLoopBackOff` is when a CoreDNS Pod deployed in Kubernetes detects a loop. [A number of workarounds](https://github.com/coredns/coredns/tree/master/plugin/loop#troubleshooting-loops-in-kubernetes-clusters) +are available to avoid Kubernetes trying to restart the CoreDNS Pod every time CoreDNS detects the loop and exits. + +{{< warning >}} +Disabling SELinux or setting `allowPrivilegeEscalation` to `true` can compromise +the security of your cluster. +{{< /warning >}} + +## etcd pods restart continually + +If you encounter the following error: + +``` +rpc error: code = 2 desc = oci runtime error: exec failed: container_linux.go:247: starting container process caused "process_linux.go:110: decoding init error from pipe caused \"read parent: connection reset by peer\"" +``` + +this issue appears if you run CentOS 7 with Docker 1.13.1.84. +This version of Docker can prevent the kubelet from executing into the etcd container. + +To work around the issue, choose one of these options: + +- Roll back to an earlier version of Docker, such as 1.13.1-75 +``` +yum downgrade docker-1.13.1-75.git8633870.el7.centos.x86_64 docker-client-1.13.1-75.git8633870.el7.centos.x86_64 docker-common-1.13.1-75.git8633870.el7.centos.x86_64 +``` + +- Install one of the more recent recommended versions, such as 18.06: +```bash +sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo +yum install docker-ce-18.06.1.ce-3.el7.x86_64 +``` + +## Not possible to pass a comma separated list of values to arguments inside a `--component-extra-args` flag + +`kubeadm init` flags such as `--component-extra-args` allow you to pass custom arguments to a control-plane +component like the kube-apiserver. However, this mechanism is limited due to the underlying type used for parsing +the values (`mapStringString`). + +If you decide to pass an argument that supports multiple, comma-separated values such as +`--apiserver-extra-args "enable-admission-plugins=LimitRanger,NamespaceExists"` this flag will fail with +`flag: malformed pair, expect string=string`. This happens because the list of arguments for +`--apiserver-extra-args` expects `key=value` pairs and in this case `NamespacesExists` is considered +as a key that is missing a value. + +Alternatively, you can try separating the `key=value` pairs like so: +`--apiserver-extra-args "enable-admission-plugins=LimitRanger,enable-admission-plugins=NamespaceExists"` +but this will result in the key `enable-admission-plugins` only having the value of `NamespaceExists`. + +A known workaround is to use the kubeadm [configuration file](/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#apiserver-flags). + +## kube-proxy scheduled before node is initialized by cloud-controller-manager + +In cloud provider scenarios, kube-proxy can end up being scheduled on new worker nodes before +the cloud-controller-manager has initialized the node addresses. This causes kube-proxy to fail +to pick up the node's IP address properly and has knock-on effects to the proxy function managing +load balancers. + +The following error can be seen in kube-proxy Pods: +``` +server.go:610] Failed to retrieve node IP: host IP unknown; known addresses: [] +proxier.go:340] invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP +``` + +A known solution is to patch the kube-proxy DaemonSet to allow scheduling it on control-plane +nodes regardless of their conditions, keeping it off of other nodes until their initial guarding +conditions abate: +``` +kubectl -n kube-system patch ds kube-proxy -p='{ "spec": { "template": { "spec": { "tolerations": [ { "key": "CriticalAddonsOnly", "operator": "Exists" }, { "effect": "NoSchedule", "key": "node-role.kubernetes.io/master" } ] } } } }' +``` + +The tracking issue for this problem is [here](https://github.com/kubernetes/kubeadm/issues/1027). + +## The NodeRegistration.Taints field is omitted when marshalling kubeadm configuration + +*Note: This [issue](https://github.com/kubernetes/kubeadm/issues/1358) only applies to tools that marshal kubeadm types (e.g. to a YAML configuration file). It will be fixed in kubeadm API v1beta2.* + +By default, kubeadm applies the `role.kubernetes.io/master:NoSchedule` taint to control-plane nodes. +If you prefer kubeadm to not taint the control-plane node, and set `InitConfiguration.NodeRegistration.Taints` to an empty slice, +the field will be omitted when marshalling. When the field is omitted, kubeadm applies the default taint. + +There are at least two workarounds: + +1. Use the `role.kubernetes.io/master:PreferNoSchedule` taint instead of an empty slice. [Pods will get scheduled on masters](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/), unless other nodes have capacity. + +2. Remove the taint after kubeadm init exits: +```bash +kubectl taint nodes NODE_NAME role.kubernetes.io/master:NoSchedule- +``` +{{% /capture %}} diff --git a/content/en/docs/setup/release/notes.md b/content/en/docs/setup/release/notes.md index 66caaffc904b1..55535c65d9bc2 100644 --- a/content/en/docs/setup/release/notes.md +++ b/content/en/docs/setup/release/notes.md @@ -412,7 +412,7 @@ New "APPLY" value for the "verb" metric label which indicates a PATCH with "Cont - Fixed dockershim panic issues when deleting docker images. ([#75367](https://github.com/kubernetes/kubernetes/pull/75367), [@feiskyer](https://github.com/feiskyer)) - Kubelet no longer watches `ConfigMaps` and `Secrets` for terminated pods, in worst scenario causing it to not be able to send other requests to kube-apiserver ([#74809](https://github.com/kubernetes/kubernetes/pull/74809), [@oxddr](https://github.com/oxddr)) - A new `TaintNodesByCondition` admission plugin taints newly created Node objects as "not ready", to fix a race condition that could cause pods to be scheduled on new nodes before their taints were updated to accurately reflect their reported conditions. This admission plugin is enabled by default if the `TaintNodesByCondition` feature is enabled. ([#73097](https://github.com/kubernetes/kubernetes/pull/73097), [@bsalamat](https://github.com/bsalamat)) -- kubelet now accepts `pid=` in the `--system-reserved` and `--kube-reserved` options to ensure that the specified number of process IDs will be reserved for the system as a whole and for Kubernetes system daemons respectively. Please reference `Kube Reserved` and `System Reserved` in `Reserve Compute Resources for System Daemons` in the Kubernetes documentation for general discussion of resource reservation. To utilize this functionality, you must set the feature gate `SupportNodePidsLimit=true` ([#73651](https://github.com/kubernetes/kubernetes/pull/73651) +- Node-level support for pid limiting via `pid=` in the `--system-reserved` and `--kube-reserved` has been graduated to beta and no longer requires setting the feature gate `SupportNodePidsLimit=true` explicitly. Please reference `Kube Reserved` and `System Reserved` in `Reserve Compute Resources for System Daemons` in the Kubernetes documentation for general discussion of resource reservation. ([#73651](https://github.com/kubernetes/kubernetes/pull/73651) ### Scheduling @@ -892,7 +892,7 @@ filename | sha512 hash * Breaking changes in client-go: ([#72214](https://github.com/kubernetes/kubernetes/pull/72214), [@caesarxuchao](https://github.com/caesarxuchao)) * The disk-cached discovery client is moved from k8s.io/client-go/discovery to k8s.io/client-go/discovery/cached/disk. * The memory-cached discovery client is moved from k8s.io/client-go/discovery/cached to k8s.io/client-go/discovery/cached/memory. -* kubelet now accepts `pid=` in the `--system-reserved` and `--kube-reserved` options to ensure that the specified number of process IDs will be reserved for the system as a whole and for Kubernetes system daemons respectively. Please reference `Kube Reserved` and `System Reserved` in `Reserve Compute Resources for System Daemons` in the Kubernetes documentation for general discussion of resource reservation. To utilize this functionality, you must set the feature gate `SupportNodePidsLimit=true` ([#73651](https://github.com/kubernetes/kubernetes/pull/73651), [@RobertKrawitz](https://github.com/RobertKrawitz)) +* Node-level support for pid limiting via `pid=` in the `--system-reserved` and `--kube-reserved` has been graduated to beta and no longer requires setting the feature gate `SupportNodePidsLimit=true` explicitly. Please reference `Kube Reserved` and `System Reserved` in `Reserve Compute Resources for System Daemons` in the Kubernetes documentation for general discussion of resource reservation ([#73651](https://github.com/kubernetes/kubernetes/pull/73651), [@RobertKrawitz](https://github.com/RobertKrawitz)) * The apiserver, including both the kube-apiserver and apiservers built with the generic apiserver library, will now return 413 RequestEntityTooLarge error if a json patch contains more than 10,000 operations. ([#74000](https://github.com/kubernetes/kubernetes/pull/74000), [@caesarxuchao](https://github.com/caesarxuchao)) * kubeadm: allow the usage of --kubeconfig-dir and --config flags on kubeadm init ([#73998](https://github.com/kubernetes/kubernetes/pull/73998), [@yagonobre](https://github.com/yagonobre)) * when pleg channel is full, discard events and record its count ([#72709](https://github.com/kubernetes/kubernetes/pull/72709), [@changyaowei](https://github.com/changyaowei)) diff --git a/content/en/docs/setup/release/version-skew-policy.md b/content/en/docs/setup/release/version-skew-policy.md index 959c97c6de12e..99501c8d1a7f7 100644 --- a/content/en/docs/setup/release/version-skew-policy.md +++ b/content/en/docs/setup/release/version-skew-policy.md @@ -113,8 +113,8 @@ Pre-requisites: * The `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` instances that communicate with this server are at version **1.n** (this ensures they are not newer than the existing API server version, and are within 1 minor version of the new API server version) * `kubelet` instances on all nodes are at version **1.n** or **1.(n-1)** (this ensures they are not newer than the existing API server version, and are within 2 minor versions of the new API server version) * Registered admission webhooks are able to handle the data the new `kube-apiserver` instance will send them: - * `ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration` objects are updated to include any new versions of REST resources added in **1.(n+1)** - * The webhooks are able to handle any new versions of REST resources that will be sent to them, and any new fields added to existing versions in **1.(n+1)** + * `ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration` objects are updated to include any new versions of REST resources added in **1.(n+1)** (or use the [`matchPolicy: Equivalent` option](/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy) available in v1.15+) + * The webhooks are able to handle any new versions of REST resources that will be sent to them, and any new fields added to existing versions in **1.(n+1)** Upgrade `kube-apiserver` to **1.(n+1)** diff --git a/content/en/docs/setup/release/version-skew-policy.md.orig b/content/en/docs/setup/release/version-skew-policy.md.orig new file mode 100644 index 0000000000000..99501c8d1a7f7 --- /dev/null +++ b/content/en/docs/setup/release/version-skew-policy.md.orig @@ -0,0 +1,148 @@ +--- +reviewers: +- sig-api-machinery +- sig-architecture +- sig-cli +- sig-cluster-lifecycle +- sig-node +- sig-release +title: Kubernetes version and version skew support policy +content_template: templates/concept +weight: 30 +--- + +{{% capture overview %}} +This document describes the maximum version skew supported between various Kubernetes components. +Specific cluster deployment tools may place additional restrictions on version skew. +{{% /capture %}} + +{{% capture body %}} + +## Supported versions + +Kubernetes versions are expressed as **x.y.z**, +where **x** is the major version, **y** is the minor version, and **z** is the patch version, following [Semantic Versioning](http://semver.org/) terminology. +For more information, see [Kubernetes Release Versioning](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#kubernetes-release-versioning). + +The Kubernetes project maintains release branches for the most recent three minor releases. + +Applicable fixes, including security fixes, may be backported to those three release branches, depending on severity and feasibility. +Patch releases are cut from those branches at a regular cadence, or as needed. +This decision is owned by the [patch release manager](https://github.com/kubernetes/sig-release/blob/master/release-team/role-handbooks/patch-release-manager/README.md#release-timing). +The patch release manager is a member of the [release team for each release](https://github.com/kubernetes/sig-release/tree/master/releases/). + +Minor releases occur approximately every 3 months, so each minor release branch is maintained for approximately 9 months. + +## Supported version skew + +### kube-apiserver + +In [highly-available (HA) clusters](/docs/setup/production-environment/tools/independent/high-availability/), the newest and oldest `kube-apiserver` instances must be within one minor version. + +Example: + +* newest `kube-apiserver` is at **1.13** +* other `kube-apiserver` instances are supported at **1.13** and **1.12** + +### kubelet + +`kubelet` must not be newer than `kube-apiserver`, and may be up to two minor versions older. + +Example: + +* `kube-apiserver` is at **1.13** +* `kubelet` is supported at **1.13**, **1.12**, and **1.11** + +{{< note >}} +If version skew exists between `kube-apiserver` instances in an HA cluster, this narrows the allowed `kubelet` versions. +{{}} + +Example: + +* `kube-apiserver` instances are at **1.13** and **1.12** +* `kubelet` is supported at **1.12**, and **1.11** (**1.13** is not supported because that would be newer than the `kube-apiserver` instance at version **1.12**) + +### kube-controller-manager, kube-scheduler, and cloud-controller-manager + +`kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` must not be newer than the `kube-apiserver` instances they communicate with. They are expected to match the `kube-apiserver` minor version, but may be up to one minor version older (to allow live upgrades). + +Example: + +* `kube-apiserver` is at **1.13** +* `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` are supported at **1.13** and **1.12** + +{{< note >}} +If version skew exists between `kube-apiserver` instances in an HA cluster, and these components can communicate with any `kube-apiserver` instance in the cluster (for example, via a load balancer), this narrows the allowed versions of these components. +{{< /note >}} + +Example: + +* `kube-apiserver` instances are at **1.13** and **1.12** +* `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` communicate with a load balancer that can route to any `kube-apiserver` instance +* `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` are supported at **1.12** (**1.13** is not supported because that would be newer than the `kube-apiserver` instance at version **1.12**) + +### kubectl + +`kubectl` is supported within one minor version (older or newer) of `kube-apiserver`. + +Example: + +* `kube-apiserver` is at **1.13** +* `kubectl` is supported at **1.14**, **1.13**, and **1.12** + +{{< note >}} +If version skew exists between `kube-apiserver` instances in an HA cluster, this narrows the supported `kubectl` versions. +{{< /note >}} + +Example: + +* `kube-apiserver` instances are at **1.13** and **1.12** +* `kubectl` is supported at **1.13** and **1.12** (other versions would be more than one minor version skewed from one of the `kube-apiserver` components) + +## Supported component upgrade order + +The supported version skew between components has implications on the order in which components must be upgraded. +This section describes the order in which components must be upgraded to transition an existing cluster from version **1.n** to version **1.(n+1)**. + +### kube-apiserver + +Pre-requisites: + +* In a single-instance cluster, the existing `kube-apiserver` instance is **1.n** +* In an HA cluster, all `kube-apiserver` instances are at **1.n** or **1.(n+1)** (this ensures maximum skew of 1 minor version between the oldest and newest `kube-apiserver` instance) +* The `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` instances that communicate with this server are at version **1.n** (this ensures they are not newer than the existing API server version, and are within 1 minor version of the new API server version) +* `kubelet` instances on all nodes are at version **1.n** or **1.(n-1)** (this ensures they are not newer than the existing API server version, and are within 2 minor versions of the new API server version) +* Registered admission webhooks are able to handle the data the new `kube-apiserver` instance will send them: + * `ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration` objects are updated to include any new versions of REST resources added in **1.(n+1)** (or use the [`matchPolicy: Equivalent` option](/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy) available in v1.15+) + * The webhooks are able to handle any new versions of REST resources that will be sent to them, and any new fields added to existing versions in **1.(n+1)** + +Upgrade `kube-apiserver` to **1.(n+1)** + +{{< note >}} +Project policies for [API deprecation](/docs/reference/using-api/deprecation-policy/) and +[API change guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/api_changes.md) +require `kube-apiserver` to not skip minor versions when upgrading, even in single-instance clusters. +{{< /note >}} + +### kube-controller-manager, kube-scheduler, and cloud-controller-manager + +Pre-requisites: + +* The `kube-apiserver` instances these components communicate with are at **1.(n+1)** (in HA clusters in which these control plane components can communicate with any `kube-apiserver` instance in the cluster, all `kube-apiserver` instances must be upgraded before upgrading these components) + +Upgrade `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` to **1.(n+1)** + +### kubelet + +Pre-requisites: + +* The `kube-apiserver` instances the `kubelet` communicates with are at **1.(n+1)** + +Optionally upgrade `kubelet` instances to **1.(n+1)** (or they can be left at **1.n** or **1.(n-1)**) + +{{< warning >}} +Running a cluster with `kubelet` instances that are persistently two minor versions behind `kube-apiserver` is not recommended: + +* they must be upgraded within one minor version of `kube-apiserver` before the control plane can be upgraded +* it increases the likelihood of running `kubelet` versions older than the three maintained minor releases +{{}} diff --git a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md index ce1d0271e9ca7..b923b532b9bde 100644 --- a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md +++ b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md @@ -194,6 +194,25 @@ Known issues: {{% capture discussion %}} +## Garbage Collecting Load Balancers + +In usual case, the correlating load balancer resources in cloud provider should +be cleaned up soon after a LoadBalancer type Service is deleted. But it is known +that there are various corner cases where cloud resources are orphaned after the +associated Service is deleted. Finalizer Protection for Service LoadBalancers was +introduced to prevent this from happening. By using finalizers, a Service resource +will never be deleted until the correlating load balancer resources are also deleted. + +Specifically, if a Service has Type=LoadBalancer, the service controller will attach +a finalizer named `service.kubernetes.io/load-balancer-cleanup`. +The finalizer will only be removed after the load balancer resource is cleaned up. +This prevents dangling load balancer resources even in corner cases such as the +service controller crashing. + +This feature was introduced as alpha in Kubernetes v1.15. You can start using it by +enabling the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +`ServiceLoadBalancerFinalizer`. + ## External Load Balancer Providers It is important to note that the datapath for this functionality is provided by a load balancer external to the Kubernetes cluster. diff --git a/content/en/docs/tasks/access-kubernetes-api/configure-aggregation-layer.md b/content/en/docs/tasks/access-kubernetes-api/configure-aggregation-layer.md index df2aecb5abc32..a71fffaebfd26 100644 --- a/content/en/docs/tasks/access-kubernetes-api/configure-aggregation-layer.md +++ b/content/en/docs/tasks/access-kubernetes-api/configure-aggregation-layer.md @@ -224,6 +224,55 @@ If you are not running kube-proxy on a host running the API server, then you mus {{% /capture %}} +### Register APIService objects + +You can dynamically configure what client requests are proxied to extension +apiserver. The following is an example registration: + +```yaml + +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: +spec: + group: + version: + groupPriorityMinimum: + versionPriority: + service: + namespace: + name: + caBundle: +``` + +#### Contacting the extension apiserver + +Once the Kubernetes apiserver has determined a request should be sent to a extension apiserver, +it needs to know how to contact it. + +The `service` stanza is a reference to the service for a extension apiserver. +The service namespace and name are required. The port is optional and defaults to 443. +The path is optional and defaults to "/". + +Here is an example of an extension apiserver that is configured to be called on port "1234" +at the subpath "/my-path", and to verify the TLS connection against the ServerName +`my-service-name.my-service-namespace.svc` using a custom CA bundle. + +```yaml +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +... +spec: + ... + service: + namespace: my-service-namespace + name: my-service-name + port: 1234 + caBundle: "Ci0tLS0tQk......tLS0K" +... +``` + {{% capture whatsnext %}} * [Setup an extension api-server](/docs/tasks/access-kubernetes-api/setup-extension-api-server/) to work with the aggregation layer. diff --git a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md b/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md index 87fc115165ba1..8e7b16fe8d93b 100644 --- a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md +++ b/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md @@ -29,6 +29,8 @@ level of your CustomResourceDefinitions or advance your API to a new version wit ## Overview +{{< feature-state state="beta" for_kubernetes_version="1.15" >}} + The CustomResourceDefinition API supports a `versions` field that you can use to support multiple versions of custom resources that you have developed. Versions can have different schemas with a conversion webhook to convert custom resources between versions. @@ -147,9 +149,11 @@ the version. ## Webhook conversion +{{< feature-state state="beta" for_kubernetes_version="1.15" >}} + {{< note >}} -Webhook conversion is introduced in Kubernetes 1.13 as an alpha feature. To use it, the -`CustomResourceWebhookConversion` feature should be enabled. Please refer to the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) documentation for more information. +Webhook conversion is available as beta since 1.15, and as alpha since Kubernetes 1.13. The +`CustomResourceWebhookConversion` feature must be enabled, which is the case automatically for many clusters for beta features. Please refer to the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) documentation for more information. {{< /note >}} The above example has a None conversion between versions which only sets the `apiVersion` field @@ -182,6 +186,10 @@ mutual TLS or other ways to authenticate the clients, see how to [authenticate API servers](/docs/reference/access-authn-authz/extensible-admission-controllers/#authenticate-apiservers). {{< /note >}} +#### Permissible mutations + +A conversion webhook must not mutate anything inside of `metadata` of the converted object other than `labels` and `annotations`. Attempted changes to `name`, `UID` and `namespace` are rejected and fail the request which caused the conversion. All other changes are just ignored. + ### Deploy the conversion webhook service Documentation for deploying the conversion webhook is the same as for the [admission webhook example service](/docs/reference/access-authn-authz/extensible-admission-controllers/#deploy_the_admission_webhook_service). @@ -242,7 +250,6 @@ spec: service: namespace: default name: example-conversion-webhook-server - # path is the url the API server will call. It should match what the webhook is serving at. The default is '/'. path: /crdconvert caBundle: # either Namespaced or Cluster @@ -259,11 +266,6 @@ spec: - ct ``` -{{< note >}} -When using `clientConfig.service`, the server cert must be valid for -`..svc`. -{{< /note >}} - You can save the CustomResourceDefinition in a YAML file, then use `kubectl apply` to apply it. @@ -273,6 +275,82 @@ kubectl apply -f my-versioned-crontab-with-conversion.yaml Make sure the conversion service is up and running before applying new changes. +### Contacting the webhook + +Once the API server has determined a request should be sent to a conversion webhook, +it needs to know how to contact the webhook. This is specified in the `webhookClientConfig` +stanza of the webhook configuration. + +Conversion webhooks can either be called via a URL or a service reference, +and can optionally include a custom CA bundle to use to verify the TLS connection. + +### URL + +`url` gives the location of the webhook, in standard URL form +(`scheme://host:port/path`). + +The `host` should not refer to a service running in the cluster; use +a service reference by specifying the `service` field instead. +The host might be resolved via external DNS in some apiservers +(i.e., `kube-apiserver` cannot resolve in-cluster DNS as that would +be a layering violation). `host` may also be an IP address. + +Please note that using `localhost` or `127.0.0.1` as a `host` is +risky unless you take great care to run this webhook on all hosts +which run an apiserver which might need to make calls to this +webhook. Such installs are likely to be non-portable, i.e., not easy +to turn up in a new cluster. + +The scheme must be "https"; the URL must begin with "https://". + +Attempting to use a user or basic auth e.g. "user:password@" is not allowed. +Fragments ("#...") and query parameters ("?...") are also not allowed. + +Here is an example of a conversion webhook configured to call a URL +(and expects the TLS certificate to be verified using system trust roots, so does not specify a caBundle): + +```yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +... +spec: + ... + conversion: + strategy: Webhook + webhookClientConfig: + url: "https://my-webhook.example.com:9443/my-webhook-path" +... +``` + +### Service Reference + +The `service` stanza inside `webhookClientConfig` is a reference to the service for a conversion webhook. +If the webhook is running within the cluster, then you should use `service` instead of `url`. +The service namespace and name are required. The port is optional and defaults to 443. +The path is optional and defaults to "/". + +Here is an example of a webhook that is configured to call a service on port "1234" +at the subpath "/my-path", and to verify the TLS connection against the ServerName +`my-service-name.my-service-namespace.svc` using a custom CA bundle. + +```yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +... +spec: + ... + conversion: + strategy: Webhook + webhookClientConfig: + service: + namespace: my-service-namespace + name: my-service-name + path: /my-path + port: 1234 + caBundle: "Ci0tLS0tQk......tLS0K" +... +``` + ## Writing, reading, and updating versioned CustomResourceDefinition objects When an object is written, it is persisted at the version designated as the diff --git a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md b/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md index 00b9a7919b1fe..a48691add0216 100644 --- a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md +++ b/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md @@ -65,6 +65,20 @@ spec: # shortNames allow shorter string to match your resource on the CLI shortNames: - ct + preserveUnknownFields: false + validation: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + image: + type: string + replicas: + type: integer ``` And create it: @@ -178,6 +192,315 @@ Error from server (NotFound): Unable to list {"stable.example.com" "v1" "crontab If you later recreate the same CustomResourceDefinition, it will start out empty. +## Specifying a structural schema + +{{< feature-state state="beta" for_kubernetes_version="1.15" >}} + +CustomResources traditionally store arbitrary JSON (next to `apiVersion`, `kind` and `metadata`, which is validated by the API server implicitly). With [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) a schema can be specified, which is validated during creation and updates, compare below for details and limits of such a schema. + +With `apiextensions.k8s.io/v1` the definition of a structural schema will be mandatory for CustomResourceDefinitions, while in `v1beta1` this is still optional. + +A structural schema is an [OpenAPI v3.0 validation schema](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) which: + +1. specifies a non-empty type (via `type` in OpenAPI) for the root, for each specified field of an object node (via `properties` or `additionalProperties` in OpenAPI) and for each item in an array node (via `items` in OpenAPI), with the exception of: + * a node with `x-kubernetes-int-or-string: true` + * a node with `x-kubernetes-preserve-unknown-fields: true` +2. for each each field in an object and each item in an array which is specified within any of `allOf`, `anyOf`, `oneOf` or `not`, the schema also specifies the field/item outside of those logical junctors (compare example 1 and 2). +3. does not set `description`, `type`, `default`, `additionProperties`, `nullable` within an `allOf`, `anyOf`, `oneOf` or `not`, with the exception of the two pattern for `x-kubernetes-int-or-string: true` (see below). +4. if `metadata` is specified, then only restrictions on `metadata.name` and `metadata.generateName` are allowed. + + +Non-Structural Example 1: +```yaml +allOf: +- properties: + foo: + ... +``` +conflicts with rule 2. The following would be correct: +```yaml +properties: + foo: + ... +allOf: +- properties: + foo: + ... +``` + +Non-Structural Example 2: +```yaml +allOf: +- items: + properties: + foo: + ... +``` +conflicts with rule 2. The following would be correct: +```yaml +items: + properties: + foo: + ... +allOf: +- items: + properties: + foo: + ... +``` + +Non-Structural Example 3: +```yaml +properties: + foo: + pattern: "abc" + metadata: + type: object + properties: + name: + type: string + pattern: "^a" + finalizers: + type: array + items: + type: string + pattern: "my-finalizer" +anyOf: +- properties: + bar: + type: integer + minimum: 42 + required: ["bar"] + description: "foo bar object" +``` +is not a structural schema because of the following violations: + +* the type at the root is missing (rule 1). +* the type of `foo` is missing (rule 1). +* `bar` inside of `anyOf` is not specified outside (rule 2). +* `bar`'s `type` is within `anyOf` (rule 3). +* the description is set within `anyOf` (rule 3). +* `metadata.finalizer` might not be restricted (rule 4). + +In contrast, the following, corresponding schema is structural: +```yaml +type: object +description: "foo bar object" +properties: + foo: + type: string + pattern: "abc" + bar: + type: integer + metadata: + type: object + properties: + name: + type: string + pattern: "^a" +anyOf: +- properties: + bar: + minimum: 42 + required: ["bar"] +``` + +Violations of the structural schema rules are reported in the `NonStructural` condition in the CustomResourceDefinition. + +Not being structural disables the following features: + +* [Validation Schema Publishing](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#publish-validation-schema-in-openapi-v2) +* [Webhook Conversion](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning/#webhook-conversion) +* [Validation Schema Defaulting](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#defaulting) +* [Pruning](#preserving-unknown-fields) + +and possibly more features in the future. + +### Pruning versus preserving unknown fields + +{{< feature-state state="beta" for_kubernetes_version="1.15" >}} + +CustomResourceDefinitions traditionally store any (possibly validated) JSON as is in etcd. This means that unspecified fields (if there is a [OpenAPI v3.0 validation schema](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) at all) are persisted. This is in contrast to native Kubernetes resources like e.g. a pod where unknown fields are dropped before being persisted to etcd. We call this "pruning" of unknown fields. + +If a [structural OpenAPI v3 validation schema](#specifying-a-structural-schema) is defined (either in the global `spec.validation.openAPIV3Schema` or for each version) in a CustomResourceDefinition, pruning can be enabled by setting `spec.preserveUnknownFields` to `false`. Then unspecified fields on creation and on update are dropped. + +Compare the CustomResourceDefinition `crontabs.stable.example.com` above. It has pruning enabled. Hence, if you save the following YAML to `my-crontab.yaml`: + +```yaml +apiVersion: "stable.example.com/v1" +kind: CronTab +metadata: + name: my-new-cron-object +spec: + cronSpec: "* * * * */5" + image: my-awesome-cron-image + someRandomField: 42 +``` + +and create it: + +```shell +kubectl create --validate=false -f my-crontab.yaml -o yaml +``` + +you should get: + +```console +apiVersion: stable.example.com/v1 +kind: CronTab +metadata: + creationTimestamp: 2017-05-31T12:56:35Z + generation: 1 + name: my-new-cron-object + namespace: default + resourceVersion: "285" + selfLink: /apis/stable.example.com/v1/namespaces/default/crontabs/my-new-cron-object + uid: 9423255b-4600-11e7-af6a-28d2447dc82b +spec: + cronSpec: '* * * * */5' + image: my-awesome-cron-image +``` + +The field `someRandomField` has been pruned. + +Note that the `kubectl create` call uses `--validate=false` to skip client-side validation. Because the [OpenAPI validation schemas are also published](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#publish-validation-schema-in-openapi-v2) to kubectl, it will also check for unknown fields and reject those objects long before they are sent to the API server. + +In `apiextensions.k8s.io/v1beta1`, pruning is disabled by default, i.e. `spec.preserveUnknownFields` defaults to `true`. In `apiextensions.k8s.io/v1` no new CustomResourceDefinitions with `spec.preserveUnknownFields: true` will be allowed to be created. + +### Controlling pruning + +With `spec.preserveUnknownField: false` in the CustomResourceDefinition, pruning is enabled for all custom resources of that type and in all versions. It is possible though to opt-out of that for JSON sub-trees via `x-kubernetes-preserve-unknown-fields: true` in the [structural OpenAPI v3 validation schema](#specifying-a-structural-schema): + +```yaml +type: object +properties: + json: + x-kubernetes-preserve-unknown-fields: true +``` + +The field `json` can store any JSON value, without anything being pruned. + +It is possible to partially specify the permitted JSON, e.g.: + +```yaml +type: object +properties: + json: + x-kubernetes-preserve-unknown-fields: true + type: object + description: this is arbitrary JSON +``` + +With this only object type values are allowed. + +Pruning is enabled again for each specified property (or `additionalProperties`): + +```yaml +type: object +properties: + json: + x-kubernetes-preserve-unknown-fields: true + type: object + properties: + spec: + type: object + properties: + foo: + type: string + bar: + type: string +``` + +With this, the value: + +```yaml +json: + spec: + foo: abc + bar: def + something: x + status: + something: x +``` + +is pruned to: + +```yaml +json: + spec: + foo: abc + bar: def + status: + something: x +``` + +This means that the `something` field in the specified `spec` object is pruned, but everything outside is not. + +### IntOrString + +Nodes in a schema with `x-kubernetes-int-or-string: true` are excluded from rule 1, such that the following is structural: + +```yaml +type: object +properties: + foo: + x-kubernetes-int-or-string: true +``` + +Also those nodes are partially excluded from rule 3 in the sense that the following two patterns are allowed (exactly those, without variations in order to additional fields): + +```yaml +x-kubernetes-int-or-string: true +anyOf: +- type: integer +- type: string +... +``` + +and + +```yaml +x-kubernetes-int-or-string: true +allOf: +- anyOf: + - type: integer + - type: string +- ... # zero or more +... +``` + +With one of those specification, both an integer and a string validate. + +In [Validation Schema Publishing](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#publish-validation-schema-in-openapi-v2), `x-kubernetes-int-or-string: true` is unfolded to one of the two patterns shown above. + +### RawExtension + +RawExtensions (as in `runtime.RawExtension` defined in [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery/blob/03ac7a9ade429d715a1a46ceaa3724c18ebae54f/pkg/runtime/types.go#L94)) holds complete Kubernetes objects, i.e. with `apiVersion` and `kind` fields. + +It is possible to specify those embedded objects (both completely without constraints or partially specified) by setting `x-kubernetes-embedded-resource: true`. For example: + +```yaml +type: object +properties: + foo: + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true +``` + +Here, the field `foo` holds a complete object, e.g.: + +```yaml +foo: + apiVersion: v1 + kind: Pod + spec: + ... +``` + +Because `x-kubernetes-preserve-unknown-fields: true` is specified alongside, nothing is pruned. The use of `x-kubernetes-preserve-unknown-fields: true` is optional though. + +With `x-kubernetes-embedded-resource: true`, the `apiVersion`, `kind` and `metadata` are implicitly specified and validated. + ## Serving multiple versions of a CRD See [Custom resource definition versioning](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning/) @@ -231,17 +554,31 @@ Validation of custom objects is possible via [OpenAPI v3 schema](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#schemaObject) or [validatingadmissionwebhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook). Additionally, the following restrictions are applied to the schema: -- The fields `default`, `nullable`, `discriminator`, `readOnly`, `writeOnly`, `xml`, -`deprecated` and `$ref` cannot be set. +- These fields cannot be set: + - `definitions`, + - `dependencies`, + - `deprecated`, + - `discriminator`, + - `id`, + - `patternProperties`, + - `readOnly`, + - `writeOnly`, + - `xml`, + - `$ref`. - The field `uniqueItems` cannot be set to true. - The field `additionalProperties` cannot be set to false. +- The field `additionalProperties` is mutually exclusive with `properties`. -You can disable this feature using the `CustomResourceValidation` feature gate on -the [kube-apiserver](/docs/admin/kube-apiserver): +These fields can only be set with specific features enabled: -``` ---feature-gates=CustomResourceValidation=false -``` +- `default`: the `CustomResourceDefaulting` feature gate must be enabled, compare [Validation Schema Defaulting](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#defaulting). + +Note: compare with [structural schemas](#specifying-a-structural-schema) for further restriction required for certain CustomResourceDefinition features. + +{{< note >}} +OpenAPI v3 validation is available as beta. The +`CustomResourceValidation` feature must be enabled, which is the case automatically for many clusters for beta features. Please refer to the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) documentation for more information. +{{< /note >}} The schema is defined in the CustomResourceDefinition. In the following example, the CustomResourceDefinition applies the following validations on the custom object: @@ -273,8 +610,10 @@ spec: validation: # openAPIV3Schema is the schema for validating custom objects. openAPIV3Schema: + type: object properties: spec: + type: object properties: cronSpec: type: string @@ -347,25 +686,106 @@ kubectl apply -f my-crontab.yaml crontab "my-new-cron-object" created ``` -### Publish Validation Schema in OpenAPI v2 +### Defaulting + +{{< feature-state state="alpha" for_kubernetes_version="1.15" >}} + +{{< note >}} +Defaulting is available as alpha since 1.15. It is disabled by default and can be enabled via the `CustomResourceDefaulting` feature gate. Please refer to the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) documentation for more information. + +Defaulting also requires a structural schema and pruning. +{{< /note >}} -{{< feature-state state="alpha" for_kubernetes_version="1.14" >}} +Defaulting allows to specify default values in the [OpenAPI v3 validation schema](#validation): -Starting with Kubernetes 1.14, [custom resource validation schema](#validation) can be published as part -of [OpenAPI v2 spec](/docs/concepts/overview/kubernetes-api/#openapi-and-swagger-definitions) from -Kubernetes API server. +```yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: crontabs.stable.example.com +spec: + group: stable.example.com + versions: + - name: v1 + served: true + storage: true + version: v1 + scope: Namespaced + names: + plural: crontabs + singular: crontab + kind: CronTab + shortNames: + - ct + preserveUnknownFields: false + validation: + # openAPIV3Schema is the schema for validating custom objects. + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + default: "5 0 * * *" + image: + type: string + replicas: + type: integer + minimum: 1 + maximum: 10 + default: 1 +``` -[kubectl](/docs/reference/kubectl/overview) consumes the published schema to perform client-side validation -(`kubectl create` and `kubectl apply`), schema explanation (`kubectl explain`) on custom resources. -The published schema can be consumed for other purposes. The feature is Alpha in 1.14 and disabled by default. -You can enable the feature using the `CustomResourcePublishOpenAPI` feature gate on the -[kube-apiserver](/docs/admin/kube-apiserver): +With this both `cronSpec` and `replicas` are defaulted: +```yaml +apiVersion: "stable.example.com/v1" +kind: CronTab +metadata: + name: my-new-cron-object +spec: + image: my-awesome-cron-image ``` ---feature-gates=CustomResourcePublishOpenAPI=true + +leads to + +```yaml +apiVersion: "stable.example.com/v1" +kind: CronTab +metadata: + name: my-new-cron-object +spec: + cronSpec: "5 0 * * *" + image: my-awesome-cron-image + replaces: 1 ``` -Custom resource validation schema will be converted to OpenAPI v2 schema, and +Note that defaulting happens on the object + +* in the request to the API server using the request version defaults +* when reading from etcd using the storage version defaults +* after mutating admission plugins with non-empty patches using the admission webhook object version defaults. + +Note that defaults applied when reading data from etcd are not automatically written back to etcd. An update request via the API is required to persist those defaults back into etcd. + +### Publish Validation Schema in OpenAPI v2 + +{{< feature-state state="beta" for_kubernetes_version="1.15" >}} + +{{< note >}} +OpenAPI v2 Publishing is available as beta since 1.15, and as alpha since 1.14. The +`CustomResourcePublishOpenAPI` feature must be enabled, which is the case automatically for many clusters for beta features. Please refer to the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) documentation for more information. +{{< /note >}} + +With the OpenAPI v2 Publishing feature enabled, CustomResourceDefinition [OpenAPI v3 validation schemas](#validation) which are [structural](#specifying-a-structural-schema) are published as part +of the [OpenAPI v2 spec](/docs/concepts/overview/kubernetes-api/#openapi-and-swagger-definitions) from Kubernetes API server. + +[kubectl](/docs/reference/kubectl/overview) consumes the published schema to perform client-side validation (`kubectl create` and `kubectl apply`), schema explanation (`kubectl explain`) on custom resources. The published schema can be consumed for other purposes as well, like client generation or documentation. + +The OpenAPI v3 validation schema is converted to OpenAPI v2 schema, and show up in `definitions` and `paths` fields in the [OpenAPI v2 spec](/docs/concepts/overview/kubernetes-api/#openapi-and-swagger-definitions). The following modifications are applied during the conversion to keep backwards compatiblity with kubectl in previous 1.13 version. These modifications prevent kubectl from being over-strict and rejecting @@ -373,31 +793,8 @@ valid OpenAPI schemas that it doesn't understand. The conversion won't modify th and therefore won't affect [validation](#validation) in the API server. 1. The following fields are removed as they aren't supported by OpenAPI v2 (in future versions OpenAPI v3 will be used without these restrictions) - - The fields `oneOf`, `anyOf` and `not` are removed -2. The following fields are removed as they aren't allowed by kubectl in - previous 1.13 version - - For a schema with a `$ref` - - the fields `properties` and `type` are removed - - if the `$ref` is outside of the `definitions`, the field `$ref` is removed - - For a schema of a primitive data type (which means the field `type` has two elements: one type and one format) - - if any one of the two elements is `null`, the field `type` is removed - - otherwise, the fields `type` and `properties` are removed - - For a schema of more than two types - - the fields `type` and `properties` are removed - - For a schema of `null` type - - the field `type` is removed - - For a schema of `array` type - - if the schema doesn't have exactly one item, the fields `type` and `items` are - removed - - For a schema with no type specified - - the field `properties` is removed -3. The following fields are removed as they aren't supported by the OpenAPI protobuf implementation - - The fields `id`, `schema`, `definitions`, `additionalItems`, `dependencies`, - and `patternProperties` are removed - - For a schema with a `externalDocs` - - if the `externalDocs` has `url` defined, the field `externalDocs` is removed - - For a schema with `items` defined - - if the field `items` has multiple schemas, the field `items` is removed + - The fields `allOf`, `anyOf`, `oneOf` and `not` are removed +2. If `nullable: true` is set, we drop `type`, `nullable`, `items` and `properties` because OpenAPI v2 is not able to express nullable. To avoid kubectl to reject good objects, this is necessary. ### Additional printer columns @@ -569,9 +966,10 @@ the status replica value in the `/scale` subresource will default to 0. - It is an optional value. - It must be set to work with HPA. - - Only JSONPaths under `.status` and with the dot notation are allowed. + - Only JSONPaths under `.status` or `.spec` and with the dot notation are allowed. - If there is no value under the `LabelSelectorPath` in the custom resource, the status selector value in the `/scale` subresource will default to the empty string. + - The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. In the following example, both status and scale subresources are enabled. @@ -660,6 +1058,8 @@ kubectl get crontabs my-new-cron-object -o jsonpath='{.spec.replicas}' 5 ``` +You can use a [PodDisruptionBudget](docs/tasks/run-application/configure-pdb/) to protect custom resources that have the scale subresource enabled. + ### Categories Categories is a list of grouped resources the custom resource belongs to (eg. `all`). diff --git a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md index ce3be649df05d..185482f5ccf66 100644 --- a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md +++ b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -78,11 +78,10 @@ data: health kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure - upstream fallthrough in-addr.arpa ip6.arpa } prometheus :9153 - proxy . /etc/resolv.conf + forward . /etc/resolv.conf cache 30 loop reload @@ -97,10 +96,8 @@ The Corefile configuration includes the following [plugins](https://coredns.io/p > The `pods insecure` option is provided for backward compatibility with kube-dns. You can use the `pods verified` option, which returns an A record only if there exists a pod in same namespace with matching IP. The `pods disabled` option can be used if you don't use pod records. -> `Upstream` is used for resolving services that point to external hosts (External Services). - * [prometheus](https://coredns.io/plugins/prometheus/): Metrics of CoreDNS are available at http://localhost:9153/metrics in [Prometheus](https://prometheus.io/) format. -* [proxy](https://coredns.io/plugins/proxy/): Any queries that are not within the cluster domain of Kubernetes will be forwarded to predefined resolvers (/etc/resolv.conf). +* [forward](https://coredns.io/plugins/forward/): Any queries that are not within the cluster domain of Kubernetes will be forwarded to predefined resolvers (/etc/resolv.conf). * [cache](https://coredns.io/plugins/cache/): This enables a frontend cache. * [loop](https://coredns.io/plugins/loop/): Detects simple forwarding loops and halts the CoreDNS process if a loop is found. * [reload](https://coredns.io/plugins/reload): Allows automatic reload of a changed Corefile. After you edit the ConfigMap configuration, allow two minutes for your changes to take effect. @@ -110,7 +107,7 @@ You can modify the default CoreDNS behavior by modifying the ConfigMap. ### Configuration of Stub-domain and upstream nameserver using CoreDNS -CoreDNS has the ability to configure stubdomains and upstream nameservers using the [proxy plugin](https://coredns.io/plugins/proxy/). +CoreDNS has the ability to configure stubdomains and upstream nameservers using the [forward plugin](https://coredns.io/plugins/forward/). #### Example If a cluster operator has a [Consul](https://www.consul.io/) domain server located at 10.150.0.1, and all Consul names have the suffix .consul.local. To configure it in CoreDNS, the cluster administrator creates the following stanza in the CoreDNS ConfigMap. @@ -119,18 +116,15 @@ If a cluster operator has a [Consul](https://www.consul.io/) domain server locat consul.local:53 { errors cache 30 - proxy . 10.150.0.1 + forward . 10.150.0.1 } ``` -To explicitly force all non-cluster DNS lookups to go through a specific nameserver at 172.16.0.1, point the `proxy` and `upstream` to the nameserver instead of `/etc/resolv.conf` +To explicitly force all non-cluster DNS lookups to go through a specific nameserver at 172.16.0.1, point the `forward` to the nameserver instead of `/etc/resolv.conf` ``` -proxy . 172.16.0.1 +forward . 172.16.0.1 ``` -``` -upstream 172.16.0.1 -``` The final ConfigMap along with the default `Corefile` configuration looks like: @@ -147,11 +141,10 @@ data: health kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure - upstream 172.16.0.1 fallthrough in-addr.arpa ip6.arpa } prometheus :9153 - proxy . 172.16.0.1 + forward . 172.16.0.1 cache 30 loop reload @@ -160,10 +153,12 @@ data: consul.local:53 { errors cache 30 - proxy . 10.150.0.1 + forward . 10.150.0.1 } ``` In Kubernetes version 1.10 and later, kubeadm supports automatic translation of the CoreDNS ConfigMap from the kube-dns ConfigMap. +***Note: While kube-dns accepts an FQDN for stubdomain and nameserver (eg: ns.foo.com), CoreDNS does not support this feature. +During translation, all FQDN nameservers will be omitted from the CoreDNS config.*** ## Kube-dns @@ -308,7 +303,7 @@ data: ## CoreDNS configuration equivalent to kube-dns CoreDNS supports the features of kube-dns and more. -A ConfigMap created for kube-dns to support `StubDomains`and `upstreamNameservers` translates to the `proxy` plugin in CoreDNS. +A ConfigMap created for kube-dns to support `StubDomains`and `upstreamNameservers` translates to the `forward` plugin in CoreDNS. Similarly, the `Federations` plugin in kube-dns translates to the `federation` plugin in CoreDNS. ### Example @@ -341,12 +336,12 @@ federation cluster.local { abc.com:53 { errors cache 30 - proxy . 1.2.3.4 + forward . 1.2.3.4 } my.cluster.local:53 { errors cache 30 - proxy . 2.3.4.5 + forward . 2.3.4.5 } ``` @@ -357,7 +352,6 @@ The complete Corefile with the default plugins: errors health kubernetes cluster.local in-addr.arpa ip6.arpa { - upstream 8.8.8.8 8.8.4.4 pods insecure fallthrough in-addr.arpa ip6.arpa } @@ -365,18 +359,18 @@ The complete Corefile with the default plugins: foo foo.feddomain.com } prometheus :9153 - proxy . 8.8.8.8 8.8.4.4 + forward . 8.8.8.8 8.8.4.4 cache 30 } abc.com:53 { errors cache 30 - proxy . 1.2.3.4 + forward . 1.2.3.4 } my.cluster.local:53 { errors cache 30 - proxy . 2.3.4.5 + forward . 2.3.4.5 } ``` diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index 1e9e04e190314..c9a95fe2e41c7 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -7,13 +7,15 @@ content_template: templates/task {{% capture overview %}} -This page explains how to manage certificates manually with kubeadm. +{{< feature-state for_k8s_version="v1.15" state="stable" >}} + +Client certificates generated by [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) expire after 1 year. This page explains how to manage certificate renewals with kubeadm. {{% /capture %}} {{% capture prerequisites %}} -These are advanced topics for users who need to integrate their organization's certificate infrastructure into a kubeadm-built cluster. If kubeadm with the default configuration satisfies your needs, you should let kubeadm manage certificates instead. +Be familiar with [PKI certificates and requirements in Kubernetes](/docs/setup/certificates/). You should be familiar with [PKI certificates and requirements in Kubernetes](/docs/setup/best-practices/certificates/). @@ -21,35 +23,101 @@ You should be familiar with [PKI certificates and requirements in Kubernetes](/d {{% capture steps %}} -## Renew certificates with the certificates API +## Check certificate expiration + +`check-expiration` can be used to check certificate expiration. + +``` +kubeadm alpha certs check-expiration +``` + +The output is similar to this: + +``` +CERTIFICATE EXPIRES RESIDUAL TIME EXTERNALLY MANAGED +admin.conf May 15, 2020 13:03 UTC 364d false +apiserver May 15, 2020 13:00 UTC 364d false +apiserver-etcd-client May 15, 2020 13:00 UTC 364d false +apiserver-kubelet-client May 15, 2020 13:00 UTC 364d false +controller-manager.conf May 15, 2020 13:03 UTC 364d false +etcd-healthcheck-client May 15, 2020 13:00 UTC 364d false +etcd-peer May 15, 2020 13:00 UTC 364d false +etcd-server May 15, 2020 13:00 UTC 364d false +front-proxy-client May 15, 2020 13:00 UTC 364d false +scheduler.conf May 15, 2020 13:03 UTC 364d false +``` + +The command shows expiration/residual time for the client certificates in the `/etc/kubernetes/pki` folder and for the client certificate embedded in the KUBECONFIG files used by kubeadm (`admin.conf`, `controller-manager.conf` and `scheduler.conf`). + +Additionally, kubeadm informs the user if the certificate is externally managed; in this case, the user should take care of managing certificate renewal manually/using other tools. + +{{< warning >}} +`kubeadm` cannot manage certificates signed by an external CA. +{{< /warning >}} + +{{< note >}} +`kubelet.conf` is not included in the list above because kubeadm configures kubelet for automatic certificate renewal. +{{< /note >}} + +## Automatic certificate renewal + +`kubeadm` renews all the certificates during control plane [upgrade](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15/). + +This feature is designed for addressing the simplest use cases; +if you don't have specific requirements on certificate renewal and perform Kubernetes version upgrades regularly (less than 1 year in between each upgrade), kubeadm will take care of keeping your cluster up to date and reasonably secure. + +{{< note >}} +It is a best practice to upgrade your cluster frequently in order to stay secure. +{{< /note >}} + +If you have more complex requirements for certificate renewal, you can opt out from the default behavior by passing `--certificate-renewal=false` to `kubeadm upgrade apply` or to `kubeadm upgrade node`. + + +## Manual certificate renewal + +You can renew your certificates manually at any time with the `kubeadm alpha certs renew` command. + +This command performs the renewal using CA (or front-proxy-CA) certificate and key stored in `/etc/kubernetes/pki`. + +{{< warning >}} +If you are running an HA cluster, this command needs to be executed on all the control-plane nodes. +{{< /warning >}} + +{{< note >}} +`alpha certs renew` uses the existing certificates as the authoritative source for attributes (Common Name, Organization, SAN, etc.) instead of the kubeadm-config ConfigMap. It is strongly recommended to keep them both in sync. +{{< /note >}} + +`kubeadm alpha certs renew` provides the following options: The Kubernetes certificates normally reach their expiration date after one year. -Kubeadm can renew certificates with the `kubeadm alpha certs renew` commands; you should run these commands on control-plane nodes only. +- `--csr-only` can be used to renew certificats with an external CA by generating certificate signing requests (without actually renewing certificates in place); see next paragraph for more information. -Typically this is done by loading on-disk CA certificates and keys and using them to issue new certificates. -This approach works well if your certificate tree is self-contained. However, if your certificates are externally -managed, you might need a different approach. +- It's also possible to renew a single certificate instead of all. -As an alternative, Kubernetes provides its own [API for managing certificates][manage-tls]. -With kubeadm, you can use this API by running `kubeadm alpha certs renew --use-api`. +## Renew certificates with the Kubernetes certificates API -## Set up a signer +This section provide more details about how to execute manual certificate renewal using the Kubernetes certificates API. + +{{< caution >}} +These are advanced topics for users who need to integrate their organization's certificate infrastructure into a kubeadm-built cluster. If the default kubeadm configuration satisfies your needs, you should let kubeadm manage certificates instead. +{{< /caution >}} + +### Set up a signer The Kubernetes Certificate Authority does not work out of the box. You can configure an external signer such as [cert-manager][cert-manager-issuer], or you can use the build-in signer. The built-in signer is part of [`kube-controller-manager`][kcm]. To activate the build-in signer, you pass the `--cluster-signing-cert-file` and `--cluster-signing-key-file` arguments. -You pass these arguments in any of the following ways: +The built-in signer is part of [`kube-controller-manager`][kcm]. -* Edit `/etc/kubernetes/manifests/kube-controller-manager.yaml` to add the arguments to the command. - Remember that your changes could be overwritten when you upgrade. +To activate the build-in signer, you must pass the `--cluster-signing-cert-file` and `--cluster-signing-key-file` flags. -* If you're creating a new cluster, you can use a kubeadm [configuration file][config]: +If you're creating a new cluster, you can use a kubeadm [configuration file][config]: ```yaml - apiVersion: kubeadm.k8s.io/v1beta1 + apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration controllerManager: extraArgs: @@ -57,14 +125,13 @@ You pass these arguments in any of the following ways: cluster-signing-key-file: /etc/kubernetes/pki/ca.key ``` -* You can also upload a config file using [`kubeadm config upload from-files`][config-upload] - [cert-manager-issuer]: https://cert-manager.readthedocs.io/en/latest/tutorials/ca/creating-ca-issuer.html [kcm]: /docs/reference/command-line-tools-reference/kube-controller-manager/ -[config]: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 -[config-upload]: /docs/reference/setup-tools/kubeadm/kubeadm-config/#cmd-config-from-file +[config]: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 -### Approve requests +### Create certificate signing requests (CSR) + +You can create the certificate signing requests for the Kubernetes certificates API with `kubeadm alpha certs renew --use-api`. If you set up an external signer such as [cert-manager][cert-manager], certificate signing requests (CSRs) are automatically approved. Otherwise, you must manually approve certificates with the [`kubectl certificate`][certs] command. @@ -73,42 +140,53 @@ The following kubeadm command outputs the name of the certificate to approve, th ```shell sudo kubeadm alpha certs renew apiserver --use-api & ``` +The output is similar to this: ``` [1] 2890 [certs] certificate request "kubeadm-cert-kube-apiserver-ld526" created ``` + +### Approve certificate signing requests (CSR) + +If you set up an external signer, certificate signing requests (CSRs) are automatically approved. + +Otherwise, you must manually approve certificates with the [`kubectl certificate`][certs] command. e.g. + ```shell kubectl certificate approve kubeadm-cert-kube-apiserver-ld526 +``` +The output is similar to this: +```shell certificatesigningrequest.certificates.k8s.io/kubeadm-cert-kube-apiserver-ld526 approved -[1]+ Done sudo kubeadm alpha certs renew apiserver --use-api ``` You can view a list of pending certificates with `kubectl get csr`. -[manage-tls]: /docs/tasks/tls/managing-tls-in-a-cluster/ -[cert-manager]: https://github.com/jetstack/cert-manager -[certs]: /docs/reference/generated/kubectl/kubectl-commands#certificate +## Renew certificates with external CA -## Certificate requests with kubeadm +This section provide more details about how to execute manual certificate renewal using an external CA. To better integrate with external CAs, kubeadm can also produce certificate signing requests (CSRs). A CSR represents a request to a CA for a signed certificate for a client. In kubeadm terms, any certificate that would normally be signed by an on-disk CA can be produced as a CSR instead. A CA, however, cannot be produced as a CSR. -You can create an individual CSR with `kubeadm init phase certs apiserver --csr-only`. -The `--csr-only` flag can be applied only to individual phases. After [all certificates are in place][certs], you can run `kubeadm init --external-ca`. +### Create certificate signing requests (CSR) You can pass in a directory with `--csr-dir` to output the CSRs to the specified location. If `--csr-dir` is not specified, the default certificate directory (`/etc/kubernetes/pki`) is used. Both the CSR and the accompanying private key are given in the output. After a certificate is signed, the certificate and the private key must be copied to the PKI directory (by default `/etc/kubernetes/pki`). -### Renew certificates +A CSR represents a request to a CA for a signed certificate for a client. + +You can create certificate signing requests with `kubeadm alpha certs renew --csr-only`. + +Both the CSR and the accompanying private key are given in the output; you can pass in a directory with `--csr-dir` to output the CSRs to the specified location. Certificates can be renewed with `kubeadm alpha certs renew --csr-only`. As with `kubeadm init`, an output directory can be specified with the `--csr-dir` flag. To use the new certificates, copy the signed certificate and private key into the PKI directory (by default `/etc/kubernetes/pki`) -## Cert usage +A CSR contains a certificate's name, domain(s), and IPs, but it does not specify usages. A CSR contains a certificate's name, domains, and IPs, but it does not specify usages. It is the responsibility of the CA to specify [the correct cert usages][cert-table] when issuing a certificate. @@ -116,9 +194,7 @@ It is the responsibility of the CA to specify [the correct cert usages][cert-tab * In `openssl` this is done with the [`openssl ca` command][openssl-ca]. * In `cfssl` you specify [usages in the config file][cfssl-usages] -## CA selection - -Kubeadm sets up [three CAs][cert-cas] by default. Make sure to sign the CSRs with a corresponding CA. +After a certificate is signed using your preferred method, the certificate and the private key must be copied to the PKI directory (by default `/etc/kubernetes/pki`). [openssl-ca]: https://superuser.com/questions/738612/openssl-ca-keyusage-extension [cfssl-usages]: https://github.com/cloudflare/cfssl/blob/master/doc/cmd/cfssl.txt#L170 diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md.orig b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md.orig new file mode 100644 index 0000000000000..c9a95fe2e41c7 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md.orig @@ -0,0 +1,205 @@ +--- +reviewers: +- sig-cluster-lifecycle +title: Certificate Management with kubeadm +content_template: templates/task +--- + +{{% capture overview %}} + +{{< feature-state for_k8s_version="v1.15" state="stable" >}} + +Client certificates generated by [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) expire after 1 year. This page explains how to manage certificate renewals with kubeadm. + +{{% /capture %}} + +{{% capture prerequisites %}} + +Be familiar with [PKI certificates and requirements in Kubernetes](/docs/setup/certificates/). + +You should be familiar with [PKI certificates and requirements in Kubernetes](/docs/setup/best-practices/certificates/). + +{{% /capture %}} + +{{% capture steps %}} + +## Check certificate expiration + +`check-expiration` can be used to check certificate expiration. + +``` +kubeadm alpha certs check-expiration +``` + +The output is similar to this: + +``` +CERTIFICATE EXPIRES RESIDUAL TIME EXTERNALLY MANAGED +admin.conf May 15, 2020 13:03 UTC 364d false +apiserver May 15, 2020 13:00 UTC 364d false +apiserver-etcd-client May 15, 2020 13:00 UTC 364d false +apiserver-kubelet-client May 15, 2020 13:00 UTC 364d false +controller-manager.conf May 15, 2020 13:03 UTC 364d false +etcd-healthcheck-client May 15, 2020 13:00 UTC 364d false +etcd-peer May 15, 2020 13:00 UTC 364d false +etcd-server May 15, 2020 13:00 UTC 364d false +front-proxy-client May 15, 2020 13:00 UTC 364d false +scheduler.conf May 15, 2020 13:03 UTC 364d false +``` + +The command shows expiration/residual time for the client certificates in the `/etc/kubernetes/pki` folder and for the client certificate embedded in the KUBECONFIG files used by kubeadm (`admin.conf`, `controller-manager.conf` and `scheduler.conf`). + +Additionally, kubeadm informs the user if the certificate is externally managed; in this case, the user should take care of managing certificate renewal manually/using other tools. + +{{< warning >}} +`kubeadm` cannot manage certificates signed by an external CA. +{{< /warning >}} + +{{< note >}} +`kubelet.conf` is not included in the list above because kubeadm configures kubelet for automatic certificate renewal. +{{< /note >}} + +## Automatic certificate renewal + +`kubeadm` renews all the certificates during control plane [upgrade](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15/). + +This feature is designed for addressing the simplest use cases; +if you don't have specific requirements on certificate renewal and perform Kubernetes version upgrades regularly (less than 1 year in between each upgrade), kubeadm will take care of keeping your cluster up to date and reasonably secure. + +{{< note >}} +It is a best practice to upgrade your cluster frequently in order to stay secure. +{{< /note >}} + +If you have more complex requirements for certificate renewal, you can opt out from the default behavior by passing `--certificate-renewal=false` to `kubeadm upgrade apply` or to `kubeadm upgrade node`. + + +## Manual certificate renewal + +You can renew your certificates manually at any time with the `kubeadm alpha certs renew` command. + +This command performs the renewal using CA (or front-proxy-CA) certificate and key stored in `/etc/kubernetes/pki`. + +{{< warning >}} +If you are running an HA cluster, this command needs to be executed on all the control-plane nodes. +{{< /warning >}} + +{{< note >}} +`alpha certs renew` uses the existing certificates as the authoritative source for attributes (Common Name, Organization, SAN, etc.) instead of the kubeadm-config ConfigMap. It is strongly recommended to keep them both in sync. +{{< /note >}} + +`kubeadm alpha certs renew` provides the following options: + +The Kubernetes certificates normally reach their expiration date after one year. + +- `--csr-only` can be used to renew certificats with an external CA by generating certificate signing requests (without actually renewing certificates in place); see next paragraph for more information. + +- It's also possible to renew a single certificate instead of all. + +## Renew certificates with the Kubernetes certificates API + +This section provide more details about how to execute manual certificate renewal using the Kubernetes certificates API. + +{{< caution >}} +These are advanced topics for users who need to integrate their organization's certificate infrastructure into a kubeadm-built cluster. If the default kubeadm configuration satisfies your needs, you should let kubeadm manage certificates instead. +{{< /caution >}} + +### Set up a signer + +The Kubernetes Certificate Authority does not work out of the box. +You can configure an external signer such as [cert-manager][cert-manager-issuer], or you can use the build-in signer. +The built-in signer is part of [`kube-controller-manager`][kcm]. +To activate the build-in signer, you pass the `--cluster-signing-cert-file` and `--cluster-signing-key-file` arguments. + +The built-in signer is part of [`kube-controller-manager`][kcm]. + +To activate the build-in signer, you must pass the `--cluster-signing-cert-file` and `--cluster-signing-key-file` flags. + +If you're creating a new cluster, you can use a kubeadm [configuration file][config]: + + ```yaml + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + controllerManager: + extraArgs: + cluster-signing-cert-file: /etc/kubernetes/pki/ca.crt + cluster-signing-key-file: /etc/kubernetes/pki/ca.key + ``` + +[cert-manager-issuer]: https://cert-manager.readthedocs.io/en/latest/tutorials/ca/creating-ca-issuer.html +[kcm]: /docs/reference/command-line-tools-reference/kube-controller-manager/ +[config]: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 + +### Create certificate signing requests (CSR) + +You can create the certificate signing requests for the Kubernetes certificates API with `kubeadm alpha certs renew --use-api`. + +If you set up an external signer such as [cert-manager][cert-manager], certificate signing requests (CSRs) are automatically approved. +Otherwise, you must manually approve certificates with the [`kubectl certificate`][certs] command. +The following kubeadm command outputs the name of the certificate to approve, then blocks and waits for approval to occur: + +```shell +sudo kubeadm alpha certs renew apiserver --use-api & +``` +The output is similar to this: +``` +[1] 2890 +[certs] certificate request "kubeadm-cert-kube-apiserver-ld526" created +``` + +### Approve certificate signing requests (CSR) + +If you set up an external signer, certificate signing requests (CSRs) are automatically approved. + +Otherwise, you must manually approve certificates with the [`kubectl certificate`][certs] command. e.g. + +```shell +kubectl certificate approve kubeadm-cert-kube-apiserver-ld526 +``` +The output is similar to this: +```shell +certificatesigningrequest.certificates.k8s.io/kubeadm-cert-kube-apiserver-ld526 approved +``` + +You can view a list of pending certificates with `kubectl get csr`. + +## Renew certificates with external CA + +This section provide more details about how to execute manual certificate renewal using an external CA. + +To better integrate with external CAs, kubeadm can also produce certificate signing requests (CSRs). +A CSR represents a request to a CA for a signed certificate for a client. +In kubeadm terms, any certificate that would normally be signed by an on-disk CA can be produced as a CSR instead. A CA, however, cannot be produced as a CSR. + +### Create certificate signing requests (CSR) + +You can pass in a directory with `--csr-dir` to output the CSRs to the specified location. +If `--csr-dir` is not specified, the default certificate directory (`/etc/kubernetes/pki`) is used. +Both the CSR and the accompanying private key are given in the output. After a certificate is signed, the certificate and the private key must be copied to the PKI directory (by default `/etc/kubernetes/pki`). + +A CSR represents a request to a CA for a signed certificate for a client. + +You can create certificate signing requests with `kubeadm alpha certs renew --csr-only`. + +Both the CSR and the accompanying private key are given in the output; you can pass in a directory with `--csr-dir` to output the CSRs to the specified location. + +Certificates can be renewed with `kubeadm alpha certs renew --csr-only`. +As with `kubeadm init`, an output directory can be specified with the `--csr-dir` flag. +To use the new certificates, copy the signed certificate and private key into the PKI directory (by default `/etc/kubernetes/pki`) + +A CSR contains a certificate's name, domain(s), and IPs, but it does not specify usages. + +A CSR contains a certificate's name, domains, and IPs, but it does not specify usages. +It is the responsibility of the CA to specify [the correct cert usages][cert-table] when issuing a certificate. + +* In `openssl` this is done with the [`openssl ca` command][openssl-ca]. +* In `cfssl` you specify [usages in the config file][cfssl-usages] + +After a certificate is signed using your preferred method, the certificate and the private key must be copied to the PKI directory (by default `/etc/kubernetes/pki`). + +[openssl-ca]: https://superuser.com/questions/738612/openssl-ca-keyusage-extension +[cfssl-usages]: https://github.com/cloudflare/cfssl/blob/master/doc/cmd/cfssl.txt#L170 +[certs]: /docs/setup/best-practices/certificates/ +[cert-cas]: /docs/setup/best-practices/certificates/#single-root-ca +[cert-table]: /docs/setup/best-practices/certificates/#all-certificates + +{{% /capture %}} diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-12.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-12.md deleted file mode 100644 index 94d005b4ccd8a..0000000000000 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-12.md +++ /dev/null @@ -1,303 +0,0 @@ ---- -reviewers: -- sig-cluster-lifecycle -title: Upgrading kubeadm clusters from v1.11 to v1.12 -content_template: templates/task ---- - -{{% capture overview %}} - -This page explains how to upgrade a Kubernetes cluster created with `kubeadm` from version 1.11.x to version 1.12.x, and from version 1.12.x to 1.12.y, where `y > x`. - -{{% /capture %}} - -{{% capture prerequisites %}} - -- You need to have a `kubeadm` Kubernetes cluster running version 1.11.0 or later. - [Swap must be disabled][swap]. - The cluster should use a static control plane and etcd pods. -- Make sure you read the [release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md) carefully. -- Make sure to back up any important components, such as app-level state stored in a database. - `kubeadm upgrade` does not touch your workloads, only components internal to Kubernetes, but backups are always a best practice. - - -[swap]: https://serverfault.com/questions/684771/best-way-to-disable-swap-in-linux -### Additional information - -- All containers are restarted after upgrade, because the container spec hash value is changed. -- You can upgrade only from one minor version to the next minor version. - That is, you cannot skip versions when you upgrade. - For example, you can upgrade only from 1.10 to 1.11, not from 1.9 to 1.11. - -{{% /capture %}} - -{{% capture steps %}} - -## Upgrade the control plane - -1. On your master node, upgrade kubeadm: - - {{< tabs name="k8s_install" >}} - {{% tab name="Ubuntu, Debian or HypriotOS" %}} - # replace "x" with the latest patch version - apt-mark unhold kubeadm && \ - apt-get update && apt-get upgrade -y kubeadm=1.12.x-00 && \ - apt-mark hold kubeadm - {{% /tab %}} - {{% tab name="CentOS, RHEL or Fedora" %}} - # replace "x" with the latest patch version - yum upgrade -y kubeadm-1.12.x --disableexcludes=kubernetes - {{% /tab %}} - {{< /tabs >}} - -1. Verify that the download works and has the expected version: - - ```shell - kubeadm version - ``` - -1. On the master node, run: - - ```shell - kubeadm upgrade plan - ``` - - You should see output similar to this: - - ```shell - [preflight] Running pre-flight checks. - [upgrade] Making sure the cluster is healthy: - [upgrade/config] Making sure the configuration is correct: - [upgrade/config] Reading configuration from the cluster... - [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' - [upgrade] Fetching available versions to upgrade to - [upgrade/versions] Cluster version: v1.11.3 - [upgrade/versions] kubeadm version: v1.12.0 - [upgrade/versions] Latest stable version: v1.11.3 - [upgrade/versions] Latest version in the v1.11 series: v1.11.3 - [upgrade/versions] Latest experimental version: v1.13.0-alpha.0 - - Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': - COMPONENT CURRENT AVAILABLE - Kubelet 2 x v1.11.1 v1.12.0 - 1 x v1.11.3 v1.12.0 - - Upgrade to the latest experimental version: - - COMPONENT CURRENT AVAILABLE - API Server v1.11.3 v1.12.0 - Controller Manager v1.11.3 v1.12.0 - Scheduler v1.11.3 v1.12.0 - Kube Proxy v1.11.3 v1.12.0 - CoreDNS 1.1.3 1.2.2 - Etcd 3.2.18 3.2.24 - - You can now apply the upgrade by executing the following command: - - kubeadm upgrade apply v1.12.0 - - _____________________________________________________________________ - - ``` - - This command checks that your cluster can be upgraded, and fetches the versions you can upgrade to. - -1. Choose a version to upgrade to, and run the appropriate command. For example: - - ```shell - kubeadm upgrade apply v1.12.0 - ``` - - You should see output similar to this: - - - - ```shell - [preflight] Running pre-flight checks. - [upgrade] Making sure the cluster is healthy: - [upgrade/config] Making sure the configuration is correct: - [upgrade/config] Reading configuration from the cluster... - [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' - [upgrade/apply] Respecting the --cri-socket flag that is set with higher priority than the config file. - [upgrade/version] You have chosen to change the cluster version to "v1.12.0" - [upgrade/versions] Cluster version: v1.11.3 - [upgrade/versions] kubeadm version: v1.12.0 - [upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: y - [upgrade/prepull] Will prepull images for components [kube-apiserver kube-controller-manager kube-scheduler etcd] - [upgrade/prepull] Prepulling image for component etcd. - [upgrade/prepull] Prepulling image for component kube-apiserver. - [upgrade/prepull] Prepulling image for component kube-controller-manager. - [upgrade/prepull] Prepulling image for component kube-scheduler. - [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-etcd - [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-apiserver - [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-scheduler - [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-controller-manager - [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-etcd - [upgrade/prepull] Prepulled image for component kube-apiserver. - [upgrade/prepull] Prepulled image for component kube-controller-manager. - [upgrade/prepull] Prepulled image for component kube-scheduler. - [upgrade/prepull] Prepulled image for component etcd. - [upgrade/prepull] Successfully prepulled the images for all the control plane components - [upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.12.0"... - Static pod: kube-apiserver-ip-172-31-80-76 hash: d9b7af93990d702b3ee9a2beca93384b - Static pod: kube-controller-manager-ip-172-31-80-76 hash: 44a081fb5d26e90773ceb98b4e16fe10 - Static pod: kube-scheduler-ip-172-31-80-76 hash: 009228e74aef4d7babd7968782118d5e - Static pod: etcd-ip-172-31-80-76 hash: 997fcf3d8d974c98abc14556cc02617e - [etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests661777755/etcd.yaml" - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/etcd.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2018-09-19-18-58-14/etcd.yaml" - [upgrade/staticpods] Waiting for the kubelet to restart the component - [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s - Static pod: etcd-ip-172-31-80-76 hash: 997fcf3d8d974c98abc14556cc02617e - - [apiclient] Found 1 Pods for label selector component=etcd - [upgrade/staticpods] Component "etcd" upgraded successfully! - [upgrade/etcd] Waiting for etcd to become available - [util/etcd] Waiting 0s for initial delay - [util/etcd] Attempting to see if all cluster endpoints are available 1/10 - [upgrade/staticpods] Writing new Static Pod manifests to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests661777755" - [controlplane] wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests661777755/kube-apiserver.yaml" - [controlplane] wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests661777755/kube-controller-manager.yaml" - [controlplane] wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests661777755/kube-scheduler.yaml" - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-apiserver.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2018-09-19-18-58-14/kube-apiserver.yaml" - [upgrade/staticpods] Waiting for the kubelet to restart the component - [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s - - Static pod: kube-apiserver-ip-172-31-80-76 hash: 854a5a8468f899093c6a967bb81dcfbc - [apiclient] Found 1 Pods for label selector component=kube-apiserver - [upgrade/staticpods] Component "kube-apiserver" upgraded successfully! - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-controller-manager.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2018-09-19-18-58-14/kube-controller-manager.yaml" - [upgrade/staticpods] Waiting for the kubelet to restart the component - [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s - Static pod: kube-controller-manager-ip-172-31-80-76 hash: 44a081fb5d26e90773ceb98b4e16fe10 - Static pod: kube-controller-manager-ip-172-31-80-76 hash: b651f83474ae70031d5fb2cab73bd366 - [apiclient] Found 1 Pods for label selector component=kube-controller-manager - [upgrade/staticpods] Component "kube-controller-manager" upgraded successfully! - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-scheduler.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2018-09-19-18-58-14/kube-scheduler.yaml" - [upgrade/staticpods] Waiting for the kubelet to restart the component - [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s - Static pod: kube-scheduler-ip-172-31-80-76 hash: 009228e74aef4d7babd7968782118d5e - Static pod: kube-scheduler-ip-172-31-80-76 hash: da406e5a49adfbbeb90fe2a0cf8fd8d1 - [apiclient] Found 1 Pods for label selector component=kube-scheduler - [upgrade/staticpods] Component "kube-scheduler" upgraded successfully! - [uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace - [kubelet] Creating a ConfigMap "kubelet-config-1.12" in namespace kube-system with the configuration for the kubelets in the cluster - [kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.12" ConfigMap in the kube-system namespace - [kubelet] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" - [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "ip-172-31-80-76" as an annotation - [bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials - [bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token - [bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster - [addons] Applied essential addon: CoreDNS - [addons] Applied essential addon: kube-proxy - - [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.12.0". Enjoy! - - [upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so. - ``` - -1. Manually upgrade your Software Defined Network (SDN). - - Your Container Network Interface (CNI) provider may have its own upgrade instructions to follow. - Check the [addons](/docs/concepts/cluster-administration/addons/) page to - find your CNI provider and see whether additional upgrade steps are required. - -## Upgrade master and node packages - -1. Prepare each node for maintenance, marking it unschedulable and evicting the workloads: - - ```shell - kubectl drain $NODE --ignore-daemonsets - ``` - - On the master node, you must add `--ignore-daemonsets`: - - ```shell - kubectl drain ip-172-31-85-18 - node "ip-172-31-85-18" cordoned - error: unable to drain node "ip-172-31-85-18", aborting command... - - There are pending nodes to be drained: - ip-172-31-85-18 - error: DaemonSet-managed pods (use --ignore-daemonsets to ignore): calico-node-5798d, kube-proxy-thjp9 - ``` - - ``` - kubectl drain ip-172-31-85-18 --ignore-daemonsets - node "ip-172-31-85-18" already cordoned - WARNING: Ignoring DaemonSet-managed pods: calico-node-5798d, kube-proxy-thjp9 - node "ip-172-31-85-18" drained - ``` - -1. Upgrade the Kubernetes package version on each `$NODE` node by running the Linux package manager for your distribution: - - {{< tabs name="k8s_upgrade" >}} - {{% tab name="Ubuntu, Debian or HypriotOS" %}} - # replace "x" with the latest patch version - apt-mark unhold kubelet kubeadm - apt-get update - apt-get upgrade -y kubelet=1.12.x-00 kubeadm=1.12.x-00 - apt-mark hold kubelet kubeadm - {{% /tab %}} - {{% tab name="CentOS, RHEL or Fedora" %}} - # replace "x" with the latest patch version - yum upgrade -y kubelet-1.12.x kubeadm-1.12.x --disableexcludes=kubernetes - {{% /tab %}} - {{< /tabs >}} - -## Upgrade kubelet on each node - -1. On each node except the master node, upgrade the kubelet config: - - ```shell - sudo kubeadm upgrade node config --kubelet-version $(kubelet --version | cut -d ' ' -f 2) - ``` - -1. Restart the kubelet process: - - ```shell - sudo systemctl restart kubelet - ``` - -1. Verify that the new version of the `kubelet` is running on the node: - - ```shell - systemctl status kubelet - ``` - -1. Bring the node back online by marking it schedulable: - - ```shell - kubectl uncordon $NODE - ``` - - -1. After the kubelet is upgraded on all nodes, verify that all nodes are available again by running the following command from anywhere kubectl can access the cluster: - - ```shell - kubectl get nodes - ``` - - The `STATUS` column should show `Ready` for all your nodes, and the version number should be updated. - -{{% /capture %}} - -## Recovering from a failure state - -If `kubeadm upgrade` fails and does not roll back, for example because of an unexpected shutdown during execution, you can run `kubeadm upgrade` again. -This command is idempotent and eventually makes sure that the actual state is the desired state you declare. - -To recover from a bad state, you can also run `kubeadm upgrade --force` without changing the version that your cluster is running. - -## How it works - -`kubeadm upgrade apply` does the following: - -- Checks that your cluster is in an upgradeable state: - - The API server is reachable - - All nodes are in the `Ready` state - - The control plane is healthy -- Enforces the version skew policies. -- Makes sure the control plane images are available or available to pull to the machine. -- Upgrades the control plane components or rollbacks if any of them fails to come up. -- Applies the new `kube-dns` and `kube-proxy` manifests and enforces that all necessary RBAC rules are created. -- Creates new certificate and key files of the API server and backs up old files if they're about to expire in 180 days. diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15.md new file mode 100644 index 0000000000000..22a6b8f633120 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15.md @@ -0,0 +1,383 @@ +--- +reviewers: +- sig-cluster-lifecycle +title: Upgrading kubeadm clusters from v1.14 to v1.15 +content_template: templates/task +--- + +{{% capture overview %}} + +This page explains how to upgrade a Kubernetes cluster created with kubeadm from version +1.14.x to version 1.15.x, and from version 1.15.x to 1.15.y (where `y > x`). + +The upgrade workflow at high level is the following: + +1. Upgrade the primary control plane node. +1. Upgrade additional control plane nodes. +1. Upgrade worker nodes. + +{{% /capture %}} + +{{% capture prerequisites %}} + +- You need to have a kubeadm Kubernetes cluster running version 1.14.0 or later. +- [Swap must be disabled](https://serverfault.com/questions/684771/best-way-to-disable-swap-in-linux). +- The cluster should use a static control plane and etcd pods or external etcd. +- Make sure you read the [release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.15.md) carefully. +- Make sure to back up any important components, such as app-level state stored in a database. + `kubeadm upgrade` does not touch your workloads, only components internal to Kubernetes, but backups are always a best practice. + +### Additional information + +- All containers are restarted after upgrade, because the container spec hash value is changed. +- You only can upgrade from one MINOR version to the next MINOR version, + or between PATCH versions of the same MINOR. That is, you cannot skip MINOR versions when you upgrade. + For example, you can upgrade from 1.y to 1.y+1, but not from 1.y to 1.y+2. + +{{% /capture %}} + +{{% capture steps %}} + +## Determine which version to upgrade to + +1. Find the latest stable 1.15 version: + + {{< tabs name="k8s_install_versions" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + apt update + apt-cache policy kubeadm + # find the latest 1.15 version in the list + # it should look like 1.15.x-00, where x is the latest patch + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + yum list --showduplicates kubeadm --disableexcludes=kubernetes + # find the latest 1.15 version in the list + # it should look like 1.15.x-0, where x is the latest patch + {{% /tab %}} + {{< /tabs >}} + +## Upgrade the first control plane node + +1. On your first control plane node, upgrade kubeadm: + + {{< tabs name="k8s_install_kubeadm_first_cp" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + # replace x in 1.15.x-00 with the latest patch version + apt-mark unhold kubeadm && \ + apt-get update && apt-get install -y kubeadm=1.15.x-00 && \ + apt-mark hold kubeadm + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + # replace x in 1.15.x-0 with the latest patch version + yum install -y kubeadm-1.15.x-0 --disableexcludes=kubernetes + {{% /tab %}} + {{< /tabs >}} + +1. Verify that the download works and has the expected version: + + ```shell + kubeadm version + ``` + +1. On the control plane node, run: + + ```shell + sudo kubeadm upgrade plan + ``` + + You should see output similar to this: + + ```shell + [upgrade/config] Making sure the configuration is correct: + [upgrade/config] Reading configuration from the cluster... + [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' + [preflight] Running pre-flight checks. + [upgrade] Making sure the cluster is healthy: + [upgrade] Fetching available versions to upgrade to + [upgrade/versions] Cluster version: v1.14.2 + [upgrade/versions] kubeadm version: v1.15.0 + + Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': + COMPONENT CURRENT AVAILABLE + Kubelet 1 x v1.14.2 v1.15.0 + + Upgrade to the latest version in the v1.15 series: + + COMPONENT CURRENT AVAILABLE + API Server v1.14.2 v1.15.0 + Controller Manager v1.14.2 v1.15.0 + Scheduler v1.14.2 v1.15.0 + Kube Proxy v1.14.2 v1.15.0 + CoreDNS 1.3.1 1.3.1 + Etcd 3.3.10 3.3.10 + + You can now apply the upgrade by executing the following command: + + kubeadm upgrade apply v1.15.0 + + _____________________________________________________________________ + ``` + + This command checks that your cluster can be upgraded, and fetches the versions you can upgrade to. + + {{< note >}} + With the release of Kubernetes v1.15, `kubeadm upgrade` also automatically renews + the certificates that it manages on this node. To opt-out of certificate renewal the flag `--certificate-renewal=false` can be used. + For more information see the [certificate management guide](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs). + {{}} + +1. Choose a version to upgrade to, and run the appropriate command. For example: + + ```shell + sudo kubeadm upgrade apply v1.15.x + ``` + + - Replace `x` with the patch version you picked for this upgrade. + + You should see output similar to this: + + ```shell + [preflight] Running pre-flight checks. + [upgrade] Making sure the cluster is healthy: + [upgrade/config] Making sure the configuration is correct: + [upgrade/config] Reading configuration from the cluster... + [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' + [upgrade/version] You have chosen to change the cluster version to "v1.15.0" + [upgrade/versions] Cluster version: v1.14.2 + [upgrade/versions] kubeadm version: v1.15.0 + [upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: y + [upgrade/prepull] Will prepull images for components [kube-apiserver kube-controller-manager kube-scheduler etcd] + [upgrade/prepull] Prepulling image for component etcd. + [upgrade/prepull] Prepulling image for component kube-apiserver. + [upgrade/prepull] Prepulling image for component kube-controller-manager. + [upgrade/prepull] Prepulling image for component kube-scheduler. + [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-kube-scheduler + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-apiserver + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-controller-manager + [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-etcd + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-scheduler + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-etcd + [upgrade/prepull] Prepulled image for component etcd. + [upgrade/prepull] Prepulled image for component kube-controller-manager. + [upgrade/prepull] Prepulled image for component kube-apiserver. + [upgrade/prepull] Prepulled image for component kube-scheduler. + [upgrade/prepull] Successfully prepulled the images for all the control plane components + [upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.15.0"... + Static pod: kube-apiserver-luboitvbox hash: 8d931c2296a38951e95684cbcbe3b923 + Static pod: kube-controller-manager-luboitvbox hash: 2480bf6982ad2103c05f6764e20f2787 + Static pod: kube-scheduler-luboitvbox hash: 9b290132363a92652555896288ca3f88 + [upgrade/etcd] Upgrading to TLS for etcd + [upgrade/staticpods] Writing new Static Pod manifests to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests446257614" + [upgrade/staticpods] Preparing for "kube-apiserver" upgrade + [upgrade/staticpods] Renewing "apiserver-etcd-client" certificate + [upgrade/staticpods] Renewing "apiserver" certificate + [upgrade/staticpods] Renewing "apiserver-kubelet-client" certificate + [upgrade/staticpods] Renewing "front-proxy-client" certificate + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-apiserver.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-06-05-23-38-03/kube-apiserver.yaml" + [upgrade/staticpods] Waiting for the kubelet to restart the component + [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) + Static pod: kube-apiserver-luboitvbox hash: 8d931c2296a38951e95684cbcbe3b923 + Static pod: kube-apiserver-luboitvbox hash: 1b4e2b09a408c844f9d7b535e593ead9 + [apiclient] Found 1 Pods for label selector component=kube-apiserver + [upgrade/staticpods] Component "kube-apiserver" upgraded successfully! + [upgrade/staticpods] Preparing for "kube-controller-manager" upgrade + [upgrade/staticpods] Renewing certificate embedded in "controller-manager.conf" + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-controller-manager.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-06-05-23-38-03/kube-controller-manager.yaml" + [upgrade/staticpods] Waiting for the kubelet to restart the component + [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) + Static pod: kube-controller-manager-luboitvbox hash: 2480bf6982ad2103c05f6764e20f2787 + Static pod: kube-controller-manager-luboitvbox hash: 6617d53423348aa619f1d6e568bb894a + [apiclient] Found 1 Pods for label selector component=kube-controller-manager + [upgrade/staticpods] Component "kube-controller-manager" upgraded successfully! + [upgrade/staticpods] Preparing for "kube-scheduler" upgrade + [upgrade/staticpods] Renewing certificate embedded in "scheduler.conf" + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-scheduler.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-06-05-23-38-03/kube-scheduler.yaml" + [upgrade/staticpods] Waiting for the kubelet to restart the component + [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) + Static pod: kube-scheduler-luboitvbox hash: 9b290132363a92652555896288ca3f88 + Static pod: kube-scheduler-luboitvbox hash: edf58ab819741a5d1eb9c33de756e3ca + [apiclient] Found 1 Pods for label selector component=kube-scheduler + [upgrade/staticpods] Component "kube-scheduler" upgraded successfully! + [upgrade/staticpods] Renewing certificate embedded in "admin.conf" + [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace + [kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster + [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace + [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" + [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials + [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token + [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster + [addons] Applied essential addon: CoreDNS + [addons] Applied essential addon: kube-proxy + + [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.15.0". Enjoy! + + [upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so. + ``` + +1. Manually upgrade your CNI provider plugin. + + Your Container Network Interface (CNI) provider may have its own upgrade instructions to follow. + Check the [addons](/docs/concepts/cluster-administration/addons/) page to + find your CNI provider and see whether additional upgrade steps are required. + +1. Upgrade the kubelet and kubectl on the control plane node: + + {{< tabs name="k8s_install_kubelet" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + # replace x in 1.15.x-00 with the latest patch version + apt-mark unhold kubelet && \ + apt-get update && apt-get install -y kubelet=1.15.x-00 kubectl=1.15.x-00 && \ + apt-mark hold kubelet + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + # replace x in 1.15.x-0 with the latest patch version + yum install -y kubelet-1.15.x-0 kubectl-1.15.x-0 --disableexcludes=kubernetes + {{% /tab %}} + {{< /tabs >}} + +1. Restart the kubelet + + ```shell + sudo systemctl restart kubelet + ``` + +## Upgrade additional control plane nodes + +1. Same as the first control plane node but use: + +``` +sudo kubeadm upgrade node +``` + +instead of: + +``` +sudo kubeadm upgrade apply +``` + +Also `sudo kubeadm upgrade plan` is not needed. + +## Upgrade worker nodes + +The upgrade procedure on worker nodes should be executed one node at a time or few nodes at a time, +without compromising the minimum required capacity for running your workloads. + +### Upgrade kubeadm + +1. Upgrade kubeadm on all worker nodes: + + {{< tabs name="k8s_install_kubeadm_worker_nodes" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + # replace x in 1.15.x-00 with the latest patch version + apt-mark unhold kubeadm && \ + apt-get update && apt-get install -y kubeadm=1.15.x-00 && \ + apt-mark hold kubeadm + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + # replace x in 1.15.x-0 with the latest patch version + yum install -y kubeadm-1.15.x-0 --disableexcludes=kubernetes + {{% /tab %}} + {{< /tabs >}} + +### Cordon the node + +1. Prepare the node for maintenance by marking it unschedulable and evicting the workloads. Run: + + ```shell + kubectl drain $NODE --ignore-daemonsets + ``` + + You should see output similar to this: + + ```shell + kubectl drain ip-172-31-85-18 + node "ip-172-31-85-18" cordoned + error: unable to drain node "ip-172-31-85-18", aborting command... + + There are pending nodes to be drained: + ip-172-31-85-18 + error: DaemonSet-managed pods (use --ignore-daemonsets to ignore): calico-node-5798d, kube-proxy-thjp9 + ``` + +### Upgrade the kubelet configuration + +1. Call the following command: + + ```shell + sudo kubeadm upgrade node + ``` + +### Upgrade kubelet and kubectl + +1. Upgrade the Kubernetes package version by running the Linux package manager for your distribution: + + {{< tabs name="k8s_kubelet_and_kubectl" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + # replace x in 1.15.x-00 with the latest patch version + apt-get update + apt-get install -y kubelet=1.15.x-00 kubectl=1.15.x-00 + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + # replace x in 1.15.x-0 with the latest patch version + yum install -y kubelet-1.15.x-0 kubectl-1.15.x-0 --disableexcludes=kubernetes + {{% /tab %}} + {{< /tabs >}} + +1. Restart the kubelet + + ```shell + sudo systemctl restart kubelet + ``` + +### Uncordon the node + +1. Bring the node back online by marking it schedulable: + + ```shell + kubectl uncordon $NODE + ``` + +## Verify the status of the cluster + +After the kubelet is upgraded on all nodes verify that all nodes are available again by running the following command from anywhere kubectl can access the cluster: + +```shell +kubectl get nodes +``` + +The `STATUS` column should show `Ready` for all your nodes, and the version number should be updated. + +{{% /capture %}} + +## Recovering from a failure state + +If `kubeadm upgrade` fails and does not roll back, for example because of an unexpected shutdown during execution, you can run `kubeadm upgrade` again. +This command is idempotent and eventually makes sure that the actual state is the desired state you declare. + +To recover from a bad state, you can also run `kubeadm upgrade --force` without changing the version that your cluster is running. + +## How it works + +`kubeadm upgrade apply` does the following: + +- Checks that your cluster is in an upgradeable state: + - The API server is reachable + - All nodes are in the `Ready` state + - The control plane is healthy +- Enforces the version skew policies. +- Makes sure the control plane images are available or available to pull to the machine. +- Upgrades the control plane components or rollbacks if any of them fails to come up. +- Applies the new `kube-dns` and `kube-proxy` manifests and makes sure that all necessary RBAC rules are created. +- Creates new certificate and key files of the API server and backs up old files if they're about to expire in 180 days. + +`kubeadm upgrade node` does the following on additional control plane nodes: + +- Fetches the kubeadm `ClusterConfiguration` from the cluster. +- Optionally backups the kube-apiserver certificate. +- Upgrades the static Pod manifests for the control plane components. +- Upgrades the kubelet configuration for this node. + +`kubeadm upgrade node` does the following on worker nodes: + +- Fetches the kubeadm `ClusterConfiguration` from the cluster. +- Upgrades the kubelet configuration for this node. diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-ha-1-12.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-ha-1-12.md deleted file mode 100644 index ced1351b4dbfb..0000000000000 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-ha-1-12.md +++ /dev/null @@ -1,247 +0,0 @@ ---- -reviewers: -- jamiehannaford -- luxas -- timothysc -- jbeda -title: Upgrading kubeadm HA clusters from v1.11 to v1.12 -content_template: templates/task ---- - -{{% capture overview %}} - -This page explains how to upgrade a highly available (HA) Kubernetes cluster created with `kubeadm` from version 1.11.x to version 1.12.x. In addition to upgrading, you must also follow the instructions in [Creating HA clusters with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/). - -{{% /capture %}} - -{{% capture prerequisites %}} - -Before proceeding: - -- You need to have a `kubeadm` HA cluster running version 1.11 or higher. -- Make sure you read the [release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md) carefully. -- Make sure to back up any important components, such as app-level state stored in a database. `kubeadm upgrade` does not touch your workloads, only components internal to Kubernetes, but backups are always a best practice. -- Check the prerequisites for [Upgrading/downgrading kubeadm clusters between v1.11 to v1.12](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-12/). - -{{< note >}} -All commands on any control plane or etcd node should be run as root. -{{< /note >}} - -{{% /capture %}} - -{{% capture steps %}} - -## Prepare for both methods - -Upgrade `kubeadm` to the version that matches the version of Kubernetes that you are upgrading to: - -```shell -apt-mark unhold kubeadm && \ -apt-get update && apt-get upgrade -y kubeadm && \ -apt-mark hold kubeadm -``` - -Check prerequisites and determine the upgrade versions: - -```shell -kubeadm upgrade plan -``` - -You should see something like the following: - - Upgrade to the latest stable version: - - COMPONENT CURRENT AVAILABLE - API Server v1.11.3 v1.12.0 - Controller Manager v1.11.3 v1.12.0 - Scheduler v1.11.3 v1.12.0 - Kube Proxy v1.11.3 v1.12.0 - CoreDNS 1.1.3 1.2.2 - Etcd 3.2.18 3.2.24 - -## Stacked control plane nodes - -### Upgrade the first control plane node - -Modify `configmap/kubeadm-config` for this control plane node: - -```shell -kubectl get configmap -n kube-system kubeadm-config -o yaml > kubeadm-config-cm.yaml -``` - -Open the file in an editor and replace the following values: - -- `api.advertiseAddress` - - This should be set to the local node's IP address. - -- `etcd.local.extraArgs.advertise-client-urls` - - This should be updated to the local node's IP address. - -- `etcd.local.extraArgs.initial-advertise-peer-urls` - - This should be updated to the local node's IP address. - -- `etcd.local.extraArgs.listen-client-urls` - - This should be updated to the local node's IP address. - -- `etcd.local.extraArgs.listen-peer-urls` - - This should be updated to the local node's IP address. - -- `etcd.local.extraArgs.initial-cluster` - - This should be updated to include the hostname and IP address pairs for each control plane node in the cluster. For example: - - "ip-172-31-92-42=https://172.31.92.42:2380,ip-172-31-89-186=https://172.31.89.186:2380,ip-172-31-90-42=https://172.31.90.42:2380" - -You must also pass an additional argument (`initial-cluster-state: existing`) to etcd.local.extraArgs. - -```shell -kubectl apply -f kubeadm-config-cm.yaml --force -``` - -Start the upgrade: - -```shell -kubeadm upgrade apply v -``` - -You should see something like the following: - - [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.12.0". Enjoy! - -The `kubeadm-config` ConfigMap is now updated from `v1alpha2` version to `v1alpha3`. - -### Upgrading additional control plane nodes - -Each additional control plane node requires modifications that are different from the first control plane node. Run: - -```shell -kubectl get configmap -n kube-system kubeadm-config -o yaml > kubeadm-config-cm.yaml -``` - -Open the file in an editor and replace the following values for `ClusterConfiguration`: - -- `etcd.local.extraArgs.advertise-client-urls` - - This should be updated to the local node's IP address. - -- `etcd.local.extraArgs.initial-advertise-peer-urls` - - This should be updated to the local node's IP address. - -- `etcd.local.extraArgs.listen-client-urls` - - This should be updated to the local node's IP address. - -- `etcd.local.extraArgs.listen-peer-urls` - - This should be updated to the local node's IP address. - -You must also modify the `ClusterStatus` to add a mapping for the current host under apiEndpoints. - -Add an annotation for the cri-socket to the current node, for example to use Docker: - -```shell -kubectl annotate node kubeadm.alpha.kubernetes.io/cri-socket=/var/run/dockershim.sock -``` - -Apply the modified kubeadm-config on the node: - -```shell -kubectl apply -f kubeadm-config-cm.yaml --force -``` - -Start the upgrade: - -```shell -kubeadm upgrade apply v -``` - -You should see something like the following: - - [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.12.0". Enjoy! - -## External etcd - -### Upgrade each control plane - -Get a copy of the kubeadm config used to create this cluster. The config should be the same for every node. The config must exist on every control plane node before the upgrade begins. - -``` -# on each control plane node -kubectl get configmap -n kube-system kubeadm-config -o jsonpath={.data.MasterConfiguration} > kubeadm-config.yaml -``` - -Open the file in an editor and set `api.advertiseAddress` to the local node's IP address. - -Now run the upgrade on each control plane node one at a time. - -``` -kubeadm upgrade apply v1.12.0 --config kubeadm-config.yaml -``` - -### Upgrade etcd - -Kubernetes v1.11 to v1.12 only changed the patch version of etcd from v3.2.18 to v3.2.24. This is a rolling upgrade with no downtime, because you can run both versions in the same cluster. - -On the first host, modify the etcd manifest: - -```shell -sed -i 's/3.2.18/3.2.24/' /etc/kubernetes/manifests/etcd.yaml -``` - -Wait for the etcd process to reconnect. There will be error warnings in the other etcd node logs. This is expected. - -Repeat this step on the other etcd hosts. - -## Next steps - -### Manually upgrade your CNI provider - -Your Container Network Interface (CNI) provider might have its own upgrade instructions to follow. Check the [addons](/docs/concepts/cluster-administration/addons/) page to find your CNI provider and see whether you need to take additional upgrade steps. - -### Update kubelet and kubectl packages - -Upgrade the kubelet and kubectl by running the following on each node: - -```shell -# use your distro's package manager, e.g. 'apt-get' on Debian-based systems -# for the versions stick to kubeadm's output (see above) -apt-mark unhold kubelet kubectl && \ -apt-get update && \ -apt-get install kubelet= kubectl= && \ -apt-mark hold kubelet kubectl && \ -systemctl restart kubelet -``` - -In this example a _deb_-based system is assumed and `apt-get` is used for installing the upgraded software. On rpm-based systems the command is `yum install =` for all packages. - -Verify that the new version of the kubelet is running: - -```shell -systemctl status kubelet -``` - -Verify that the upgraded node is available again by running the following command from wherever you run `kubectl`: - -```shell -kubectl get nodes -``` - -If the `STATUS` column shows `Ready` for the upgraded host, you can continue. You might need to repeat the command until the node shows `Ready`. - -## If something goes wrong - -If the upgrade fails, see whether one of the following scenarios applies: - -- If `kubeadm upgrade apply` failed to upgrade the cluster, it will try to perform a rollback. If this is the case on the first master, the cluster is probably still intact. - - You can run `kubeadm upgrade apply` again, because it is idempotent and should eventually make sure the actual state is the desired state you are declaring. You can run `kubeadm upgrade apply` to change a running cluster with `x.x.x --> x.x.x` with `--force` to recover from a bad state. - -- If `kubeadm upgrade apply` on one of the secondary masters failed, the cluster is upgraded and working, but the secondary masters are in an undefined state. You need to investigate further and join the secondaries manually. - -{{% /capture %}} diff --git a/content/en/docs/tasks/administer-cluster/nodelocaldns.jpg b/content/en/docs/tasks/administer-cluster/nodelocaldns.jpg new file mode 100644 index 0000000000000..e42b86ea0247a Binary files /dev/null and b/content/en/docs/tasks/administer-cluster/nodelocaldns.jpg differ diff --git a/content/en/docs/tasks/administer-cluster/nodelocaldns.md b/content/en/docs/tasks/administer-cluster/nodelocaldns.md new file mode 100644 index 0000000000000..318a8d522055f --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/nodelocaldns.md @@ -0,0 +1,60 @@ +--- +reviewers: +- bowei +- zihongz +title: Using NodeLocal DNSCache in Kubernetes clusters +content_template: templates/task +--- + +{{% capture overview %}} +This page provides an overview of NodeLocal DNSCache feature in Kubernetes. +{{% /capture %}} + +{{% capture body %}} + +## Introduction + +NodeLocal DNSCache improves Cluster DNS performance by running a dns caching agent on cluster nodes as a Daemonset. In today's architecture, pods in ClusterFirst DNS mode reach out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS endpoint via iptables rules added by kube-proxy. With this new architecture, pods will reach out to the dns caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query kube-dns service for cache misses of cluster hostnames(cluster.local suffix by default). + + +## Motivation + +* With the current DNS architecture, it is possible that pods with the highest DNS QPS have to reach out to a different node, if there is no local kube-dns/CoreDNS instance. +Having a local cache will help improve the latency in such scenarios. + +* Skipping iptables DNAT and connection tracking will help reduce [conntrack races](https://github.com/kubernetes/kubernetes/issues/56903) and avoid UDP DNS entries filling up conntrack table. + +* Connections from local caching agent to kube-dns servie can be upgraded to TCP. TCP conntrack entries will be removed on connection close in contrast with UDP entries that have to timeout ([default](https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt) `nf_conntrack_udp_timeout` is 30 seconds) + +* Upgrading DNS queries from UDP to TCP would reduce tail latency attributed to dropped UDP packets and DNS timeouts usually up to 30s (3 retries + 10s timeout). Since the nodelocal cache listens for UDP DNS queries, applications don't need to be changed. + +* Metrics & visibility into dns requests at a node level. + +* Negative caching can be re-enabled, thereby reducing number of queries to kube-dns service. + +## Architecture Diagram + +This is the path followed by DNS Queries after NodeLocal DNSCache is enabled: + +![ ](nodelocaldns.jpg "NodeLocal DNSCache") + +## Configuration + +This feature can be enabled using the command: + +`KUBE_ENABLE_NODELOCAL_DNS=true go run hack/e2e.go -v --up` + +This works for e2e clusters created on GCE. On all other environments, the following steps will setup NodeLocal DNSCache: +* A yaml similar to [this](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml) can be applied using `kubectl create -f` command. +* --cluster-dns flag to kubelet needs to be modified to use the LOCAL_DNS IP that NodeLocal DNSCache is listening on (169.254.20.10 by default) + +Once enabled, node-local-dns pods will run in the kube-system namespace on each of the cluster nodes. This pod runs [CoreDNS](https://github.com/coredns/coredns) in cache mode, so all CoreDNS metrics exposed by the different plugins will be available on a per-node basis. + +### Feature availability + +The addon can be applied using the yaml specified above in any k8s version. The feature support is as described: + +| k8s version | Feature support | +| :---------: |:-----------:| +| 1.15 | Beta(Not enabled by default) | +| 1.13 | Alpha(Not enabled by default) | diff --git a/content/en/docs/tasks/debug-application-cluster/audit.md b/content/en/docs/tasks/debug-application-cluster/audit.md index c8e53f2439ffe..03ceecc4771f7 100644 --- a/content/en/docs/tasks/debug-application-cluster/audit.md +++ b/content/en/docs/tasks/debug-application-cluster/audit.md @@ -245,6 +245,76 @@ The AuditSink policy differs from the legacy audit runtime policy. This is becau The `level` field applies the given audit level to all requests. The `stages` field is now a whitelist of stages to record. +#### Contacting the webhook + +Once the API server has determined a request should be sent to a audit sink webhook, +it needs to know how to contact the webhook. This is specified in the `clientConfig` +stanza of the webhook configuration. + +Audit sink webhooks can either be called via a URL or a service reference, +and can optionally include a custom CA bundle to use to verify the TLS connection. + +##### URL + +`url` gives the location of the webhook, in standard URL form +(`scheme://host:port/path`). + +The `host` should not refer to a service running in the cluster; use +a service reference by specifying the `service` field instead. +The host might be resolved via external DNS in some apiservers +(i.e., `kube-apiserver` cannot resolve in-cluster DNS as that would +be a layering violation). `host` may also be an IP address. + +Please note that using `localhost` or `127.0.0.1` as a `host` is +risky unless you take great care to run this webhook on all hosts +which run an apiserver which might need to make calls to this +webhook. Such installs are likely to be non-portable, i.e., not easy +to turn up in a new cluster. + +The scheme must be "https"; the URL must begin with "https://". + +Attempting to use a user or basic auth e.g. "user:password@" is not allowed. +Fragments ("#...") and query parameters ("?...") are also not allowed. + +Here is an example of a webhook configured to call a URL +(and expects the TLS certificate to be verified using system trust roots, so does not specify a caBundle): + +```yaml +apiVersion: auditregistration.k8s.io/v1alpha1 +kind: AuditSink +... +spec: + webhook: + clientConfig: + url: "https://my-webhook.example.com:9443/my-webhook-path" +``` + +##### Service Reference + +The `service` stanza inside `clientConfig` is a reference to the service for a audit sink webhook. +If the webhook is running within the cluster, then you should use `service` instead of `url`. +The service namespace and name are required. The port is optional and defaults to 443. +The path is optional and defaults to "/". + +Here is an example of a webhook that is configured to call a service on port "1234" +at the subpath "/my-path", and to verify the TLS connection against the ServerName +`my-service-name.my-service-namespace.svc` using a custom CA bundle. + +```yaml +apiVersion: auditregistration.k8s.io/v1alpha1 +kind: AuditSink +... +spec: + webhook: + clientConfig: + service: + namespace: my-service-namespace + name: my-service-name + path: /my-path + port: 1234 + caBundle: "Ci0tLS0tQk......tLS0K" +``` + #### Security Administrators should be aware that allowing write access to this feature grants read access to all cluster data. Access should be treated as a `cluster-admin` level privilege. diff --git a/content/en/docs/tasks/run-application/configure-pdb.md b/content/en/docs/tasks/run-application/configure-pdb.md index f5d735492d6e3..89ba99a4e8c64 100644 --- a/content/en/docs/tasks/run-application/configure-pdb.md +++ b/content/en/docs/tasks/run-application/configure-pdb.md @@ -49,6 +49,8 @@ specified by one of the built-in Kubernetes controllers: In this case, make a note of the controller's `.spec.selector`; the same selector goes into the PDBs `.spec.selector`. +From version 1.15 PDBs support custom controllers where the [scale subresource](docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#scale-subresource) is enabled. + You can also use PDBs with pods which are not controlled by one of the above controllers, or arbitrary groups of pods, but there are some restrictions, described in [Arbitrary Controllers and Selectors](#arbitrary-controllers-and-selectors). diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index d9dd3d2501a22..444a180b17d85 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -81,7 +81,7 @@ controlled by the php-apache deployment we created in the first step of these in Roughly speaking, HPA will increase and decrease the number of replicas (via the deployment) to maintain an average CPU utilization across all Pods of 50% (since each pod requests 200 milli-cores by [kubectl run](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/docs/user-guide/kubectl/kubectl_run.md), this means average CPU usage of 100 milli-cores). -See [here](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#autoscaling-algorithm) for more details on the algorithm. +See [here](/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details) for more details on the algorithm. ```shell kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10 diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md index 2ec086b20d56e..3320960d5b5f3 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -155,9 +155,12 @@ used. If multiple metrics are specified in a HorizontalPodAutoscaler, this calculation is done for each metric, and then the largest of the desired -replica counts is chosen. If any of those metrics cannot be converted +replica counts is chosen. If any of these metrics cannot be converted into a desired replica count (e.g. due to an error fetching the metrics -from the metrics APIs), scaling is skipped. +from the metrics APIs) and a scale down is suggested by the metrics which +can be fetched, scaling is skipped. This means that the HPA is still capable +of scaling up if one or more metrics give a `desiredReplicas` greater than +the current value. Finally, just before HPA scales the target, the scale recommendation is recorded. The controller considers all recommendations within a configurable window choosing the diff --git a/static/images/docs/scheduling-framework-extensions.png b/static/images/docs/scheduling-framework-extensions.png new file mode 100644 index 0000000000000..b38b3ad8d76da Binary files /dev/null and b/static/images/docs/scheduling-framework-extensions.png differ