From 3f58083d430580d42b8d6a6768464f6dae74d589 Mon Sep 17 00:00:00 2001 From: Yinan Li Date: Mon, 11 Sep 2017 12:39:22 -0700 Subject: [PATCH 1/3] Revamped concepts doc for ReplicaSet --- .../workloads/controllers/frontend.yaml | 12 +- .../workloads/controllers/replicaset.md | 140 ++++++++++++++++-- 2 files changed, 131 insertions(+), 21 deletions(-) diff --git a/docs/concepts/workloads/controllers/frontend.yaml b/docs/concepts/workloads/controllers/frontend.yaml index 2a2b4d13b9e2b..6d35a818dbb06 100644 --- a/docs/concepts/workloads/controllers/frontend.yaml +++ b/docs/concepts/workloads/controllers/frontend.yaml @@ -2,19 +2,13 @@ apiVersion: extensions/v1beta1 kind: ReplicaSet metadata: name: frontend - # these labels can be applied automatically - # from the labels in the pod template if not set - # labels: - # app: guestbook - # tier: frontend + labels: + app: guestbook + tier: frontend spec: # this replicas value is default # modify it according to your case replicas: 3 - # selector can be applied automatically - # from the labels in the pod template if not set, - # but we are specifying the selector here to - # demonstrate its usage. selector: matchLabels: tier: frontend diff --git a/docs/concepts/workloads/controllers/replicaset.md b/docs/concepts/workloads/controllers/replicaset.md index a9247f15aaba3..561f2159cf250 100644 --- a/docs/concepts/workloads/controllers/replicaset.md +++ b/docs/concepts/workloads/controllers/replicaset.md @@ -17,7 +17,6 @@ whereas a Replication Controller only supports equality-based selector requireme {% endcapture %} - {% capture body %} ## How to use a ReplicaSet @@ -52,20 +51,21 @@ use a Deployment instead, and define your application in the spec section. {% include code.html language="yaml" file="frontend.yaml" ghlink="/docs/concepts/workloads/controllers/frontend.yaml" %} -Saving this config into `frontend.yaml` and submitting it to a Kubernetes cluster should +Saving this manifest into `frontend.yaml` and submitting it to a Kubernetes cluster should create the defined ReplicaSet and the pods that it manages. ```shell $ kubectl create -f frontend.yaml replicaset "frontend" created $ kubectl describe rs/frontend -Name: frontend -Namespace: default -Selector: tier=frontend,tier in (frontend) -Labels: app=guestbook,tier=frontend -Annotations: -Replicas: 3 current / 3 desired -Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed +Name: frontend +Namespace: default +Selector: tier=frontend,tier in (frontend) +Labels: app=guestbook + tier=frontend +Annotations: +Replicas: 3 current / 3 desired +Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend @@ -93,7 +93,99 @@ frontend-dnjpy 1/1 Running 0 1m frontend-qhloh 1/1 Running 0 1m ``` -## ReplicaSet as an Horizontal Pod Autoscaler target +## Writing a ReplicaSet Spec + +As with all other Kubernetes API objects, a ReplicaSet needs the `apiVersion`, `kind`, and `metadata` fields. For +general information about working with manifests, see [here](/docs/user-guide/simple-yaml/), +[here](/docs/user-guide/configuring-containers/), and [here](/docs/concepts/tools/kubectl/object-management-overview/). + +A ReplicaSet also needs a [`.spec` section](https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status). + +### Pod Template + +The `.spec.template` is the only required field of the `.spec`. The `.spec.template` is a +[pod template](/docs/concepts/workloads/pods/pod-overview/#pod-templates). It has exactly the same schema as a +[pod](/docs/concepts/workloads/pods/pod/), except that it is nested and does not have an `apiVersion` or `kind`. + +In addition to required fields of a pod, a pod template in a ReplicaSet must specify appropriate +labels and an appropriate restart policy. + +For labels, make sure to not overlap with other controllers. For more information, see [pod selector](#pod-selector). +For restart policy, only a [`.spec.template.spec.restartPolicy`](/docs/concepts/workloads/pods/pod-lifecycle/) +equal to `Always` is allowed, which is the default if not specified. + +For local container restarts, ReplicaSet delegate to an agent on the node, +for example the [Kubelet](/docs/admin/kubelet/) or Docker. + +### Pod Selector + +The `.spec.selector` field is a [label selector](/docs/user-guide/labels/#label-selectors). A ReplicaSet +manages all the pods with labels that match the selector. It does not distinguish +between pods that it created or deleted and pods that another person or process created or +deleted. This allows the ReplicaSet to be replaced without affecting the running pods. + +The `.spec.template.metadata.labels` must match the `.spec.selector`, or it will +be rejected by the API. + +In Kubernetes 1.8 or later, `.spec.selector` and `.metadata.labels` no longer default to `.spec.template.metadata.labels` if not set. So they must be set explicitly. Also note that `.spec.selector` is immutable after creation in Kubernetes 1.8 or later. + +Also you should not normally create any pods whose labels match this selector, either directly, with +another ReplicaSet, or with another controller such as Deployment. If you do so, the ReplicaSet thinks that it +created the other pods. Kubernetes does not stop you from doing this. + +If you do end up with multiple controllers that have overlapping selectors, you +will have to manage the deletion yourself. + +### Labels on a ReplicaSet + +The ReplicaSet can itself have labels (`.metadata.labels`). Typically, you +would set these the same as the `.spec.template.metadata.labels`. However, they are allowed to be +different, and the `.metadata.labels` do not affect the behavior of the ReplicaSet. + +### Replicas + +You can specify how many pods should run concurrently by setting `.spec.replicas` to the number +of pods you would like to have running concurrently. The number running at any time may be higher +or lower, such as if the replicas were just increased or decreased, or if a pod is gracefully +shutdown, and a replacement starts early. + +If you do not specify `.spec.replicas`, then it defaults to 1. + +## Working with ReplicaSets + +### Deleting a ReplicaSet and its Pods + +To delete a ReplicaSet and all its pods, use [`kubectl +delete`](/docs/user-guide/kubectl/{{page.version}}/#delete). Kubectl will scale the ReplicaSet to zero and wait +for it to delete each pod before deleting the ReplicaSet itself. If this kubectl command is interrupted, it can +be restarted. + +When using the REST API or go client library, you need to do the steps explicitly (scale replicas to +0, wait for pod deletions, then delete the ReplicaSet). + +### Deleting just a ReplicaSet + +You can delete a ReplicaSet without affecting any of its pods, using [`kubectl delete`](/docs/user-guide/kubectl/{{page.version}}/#delete) with the `--cascade=false` option. + +When using the REST API or go client library, simply delete the ReplicaSet object. + +Once the original is deleted, you can create a new ReplicaSet to replace it. As long +as the old and new `.spec.selector` are the same, then the new one will adopt the old pods. +However, it will not make any effort to make existing pods match a new, different pod template. +To update pods to a new spec in a controlled way, use a [rolling update](#rolling-updates). + +### Isolating pods from a ReplicaSet + +Pods may be removed from a ReplicaSet's target set by changing their labels. This technique may be used to remove pods +from service for debugging, data recovery, etc. Pods that are removed in this way will be replaced automatically ( + assuming that the number of replicas is not also changed). + +### Scaling a ReplicaSet + +A ReplicaSet can be easily scaled up or down by simply updating the `.spec.replicas` field. The ReplicaSet controller +ensures that that a desired number of pods with a matching label selector are available and operational. + +### ReplicaSet as an Horizontal Pod Autoscaler Target A ReplicaSet can also be a target for [Horizontal Pod Autoscalers (HPA)](/docs/tasks/run-application/horizontal-pod-autoscale/). That is, @@ -102,8 +194,7 @@ the ReplicaSet we created in the previous example. {% include code.html language="yaml" file="hpa-rs.yaml" ghlink="/docs/concepts/workloads/controllers/hpa-rs.yaml" %} - -Saving this config into `hpa-rs.yaml` and submitting it to a Kubernetes cluster should +Saving this manifest into `hpa-rs.yaml` and submitting it to a Kubernetes cluster should create the defined HPA that autoscales the target ReplicaSet depending on the CPU usage of the replicated pods. @@ -118,6 +209,31 @@ Alternatively, you can use the `kubectl autoscale` command to accomplish the sam kubectl autoscale rs frontend ``` +## Alternatives to ReplicaSet + +### Deployment (Recommended) + +[`Deployment`](/docs/concepts/workloads/controllers/deployment/) is a higher-level API object that updates its underlying Replica Sets and their Pods +in a similar fashion as `kubectl rolling-update`. Deployments are recommended if you want this rolling update functionality, +because unlike `kubectl rolling-update`, they are declarative, server-side, and have additional features. For more information on running a stateless +application using a Deployment, please read [Run a Stateless Application Using a Deployment](/docs/tasks/run-application/run-stateless-application-deployment/). + +### Bare Pods + +Unlike in the case where a user directly created pods, a ReplicaSet replaces pods that are deleted or terminated for any reason, such as in the case of node failure or disruptive node maintenance, such as a kernel upgrade. For this reason, we recommend that you use a ReplicaSet even if your application requires only a single pod. Think of it similarly to a process supervisor, only it supervises multiple pods across multiple nodes instead of individual processes on a single node. A ReplicaSet delegates local container restarts to some agent on the node (for example, Kubelet or Docker). + +### Job + +Use a [`Job`](/docs/concepts/jobs/run-to-completion-finite-workloads/) instead of a ReplicaSet for pods that are expected to terminate on their own +(that is, batch jobs). + +### DaemonSet + +Use a [`DaemonSet`](/docs/concepts/workloads/controllers/daemonset/) instead of a ReplicaSet for pods that provide a +machine-level function, such as machine monitoring or machine logging. These pods have a lifetime that is tied +to a machine lifetime: the pod needs to be running on the machine before other pods start, and are +safe to terminate when the machine is otherwise ready to be rebooted/shutdown. + {% endcapture %} {% include templates/concept.md %} From 185b81f5110852db2fd9752de9572ac52be97a2f Mon Sep 17 00:00:00 2001 From: Yinan Li Date: Tue, 19 Sep 2017 10:15:09 -0700 Subject: [PATCH 2/3] Minor changes to call out specific versions for selector defaulting and immutability --- docs/concepts/workloads/controllers/frontend.yaml | 2 +- docs/concepts/workloads/controllers/replicaset.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/concepts/workloads/controllers/frontend.yaml b/docs/concepts/workloads/controllers/frontend.yaml index 6d35a818dbb06..39b7e8dbf307b 100644 --- a/docs/concepts/workloads/controllers/frontend.yaml +++ b/docs/concepts/workloads/controllers/frontend.yaml @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1beta2 # for versions before 1.6.0 use extensions/v1beta1 kind: ReplicaSet metadata: name: frontend diff --git a/docs/concepts/workloads/controllers/replicaset.md b/docs/concepts/workloads/controllers/replicaset.md index 561f2159cf250..76ef560e45cda 100644 --- a/docs/concepts/workloads/controllers/replicaset.md +++ b/docs/concepts/workloads/controllers/replicaset.md @@ -127,7 +127,7 @@ deleted. This allows the ReplicaSet to be replaced without affecting the running The `.spec.template.metadata.labels` must match the `.spec.selector`, or it will be rejected by the API. -In Kubernetes 1.8 or later, `.spec.selector` and `.metadata.labels` no longer default to `.spec.template.metadata.labels` if not set. So they must be set explicitly. Also note that `.spec.selector` is immutable after creation in Kubernetes 1.8 or later. +In Kubernetes 1.8 the API version `apps/v1beta2` on the ReplicaSet kind is the current version and is enabled by default. The API version `extensions/v1beta1` is deprecated. In API version `apps/v1beta2`, `.spec.selector` and `.metadata.labels` no longer default to `.spec.template.metadata.labels` if not set. So they must be set explicitly. Also note that `.spec.selector` is immutable after creation starting in API version `apps/v1beta2`. Also you should not normally create any pods whose labels match this selector, either directly, with another ReplicaSet, or with another controller such as Deployment. If you do so, the ReplicaSet thinks that it From cde27d7feb0e4680223deafba4b1043fe534aec9 Mon Sep 17 00:00:00 2001 From: Yinan Li Date: Tue, 19 Sep 2017 11:39:28 -0700 Subject: [PATCH 3/3] Addressed doc review comments --- .../workloads/controllers/replicaset.md | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/docs/concepts/workloads/controllers/replicaset.md b/docs/concepts/workloads/controllers/replicaset.md index 76ef560e45cda..224b922473b53 100644 --- a/docs/concepts/workloads/controllers/replicaset.md +++ b/docs/concepts/workloads/controllers/replicaset.md @@ -110,11 +110,11 @@ The `.spec.template` is the only required field of the `.spec`. The `.spec.templ In addition to required fields of a pod, a pod template in a ReplicaSet must specify appropriate labels and an appropriate restart policy. -For labels, make sure to not overlap with other controllers. For more information, see [pod selector](#pod-selector). -For restart policy, only a [`.spec.template.spec.restartPolicy`](/docs/concepts/workloads/pods/pod-lifecycle/) -equal to `Always` is allowed, which is the default if not specified. +For labels, make sure to not overlap with other controllers. For more information, see [pod selector](#pod-selector). -For local container restarts, ReplicaSet delegate to an agent on the node, +For [restart policy](/docs/concepts/workloads/pods/pod-lifecycle/), the only allowed value for `.spec.template.spec.restartPolicy` is `Always`, which is the default. + +For local container restarts, ReplicaSet delegates to an agent on the node, for example the [Kubelet](/docs/admin/kubelet/) or Docker. ### Pod Selector @@ -130,7 +130,7 @@ be rejected by the API. In Kubernetes 1.8 the API version `apps/v1beta2` on the ReplicaSet kind is the current version and is enabled by default. The API version `extensions/v1beta1` is deprecated. In API version `apps/v1beta2`, `.spec.selector` and `.metadata.labels` no longer default to `.spec.template.metadata.labels` if not set. So they must be set explicitly. Also note that `.spec.selector` is immutable after creation starting in API version `apps/v1beta2`. Also you should not normally create any pods whose labels match this selector, either directly, with -another ReplicaSet, or with another controller such as Deployment. If you do so, the ReplicaSet thinks that it +another ReplicaSet, or with another controller such as a Deployment. If you do so, the ReplicaSet thinks that it created the other pods. Kubernetes does not stop you from doing this. If you do end up with multiple controllers that have overlapping selectors, you @@ -144,10 +144,9 @@ different, and the `.metadata.labels` do not affect the behavior of the ReplicaS ### Replicas -You can specify how many pods should run concurrently by setting `.spec.replicas` to the number -of pods you would like to have running concurrently. The number running at any time may be higher +You can specify how many pods should run concurrently by setting `.spec.replicas`. The number running at any time may be higher or lower, such as if the replicas were just increased or decreased, or if a pod is gracefully -shutdown, and a replacement starts early. +shut down, and a replacement starts early. If you do not specify `.spec.replicas`, then it defaults to 1. @@ -213,14 +212,14 @@ kubectl autoscale rs frontend ### Deployment (Recommended) -[`Deployment`](/docs/concepts/workloads/controllers/deployment/) is a higher-level API object that updates its underlying Replica Sets and their Pods +[`Deployment`](/docs/concepts/workloads/controllers/deployment/) is a higher-level API object that updates its underlying ReplicaSets and their Pods in a similar fashion as `kubectl rolling-update`. Deployments are recommended if you want this rolling update functionality, because unlike `kubectl rolling-update`, they are declarative, server-side, and have additional features. For more information on running a stateless application using a Deployment, please read [Run a Stateless Application Using a Deployment](/docs/tasks/run-application/run-stateless-application-deployment/). ### Bare Pods -Unlike in the case where a user directly created pods, a ReplicaSet replaces pods that are deleted or terminated for any reason, such as in the case of node failure or disruptive node maintenance, such as a kernel upgrade. For this reason, we recommend that you use a ReplicaSet even if your application requires only a single pod. Think of it similarly to a process supervisor, only it supervises multiple pods across multiple nodes instead of individual processes on a single node. A ReplicaSet delegates local container restarts to some agent on the node (for example, Kubelet or Docker). +Unlike the case where a user directly created pods, a ReplicaSet replaces pods that are deleted or terminated for any reason, such as in the case of node failure or disruptive node maintenance, such as a kernel upgrade. For this reason, we recommend that you use a ReplicaSet even if your application requires only a single pod. Think of it similarly to a process supervisor, only it supervises multiple pods across multiple nodes instead of individual processes on a single node. A ReplicaSet delegates local container restarts to some agent on the node (for example, Kubelet or Docker). ### Job