diff --git a/Makefile b/Makefile index c9dbeeda397dd..56359bd32b77d 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ build: ## Build site with production settings and put deliverables in ./public build-preview: ## Build site with drafts and future posts enabled hugo --buildDrafts --buildFuture -deploy-preview: check-hugo-versions ## Deploy preview site via netlify +deploy-preview: ## Deploy preview site via netlify hugo --enableGitInfo --buildFuture -b $(DEPLOY_PRIME_URL) functions-build: @@ -27,9 +27,9 @@ functions-build: check-headers-file: scripts/check-headers-file.sh -production-build: check-hugo-versions build check-headers-file ## Build the production site and ensure that noindex headers aren't added +production-build: build check-headers-file ## Build the production site and ensure that noindex headers aren't added -non-production-build: check-hugo-versions ## Build the non-production site, which adds noindex headers to prevent indexing +non-production-build: ## Build the non-production site, which adds noindex headers to prevent indexing hugo --enableGitInfo serve: ## Boot the development server. @@ -47,6 +47,3 @@ docker-serve: test-examples: scripts/test_examples.sh install scripts/test_examples.sh run - -check-hugo-versions: - scripts/hugo-version-check.sh $(HUGO_VERSION) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 6a04a87b77d7d..e3e643e339362 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -50,7 +50,6 @@ aliases: - kbhawkey - makoscafee - Rajakavitha1 - - ryanmcginnis - sftim - steveperry-53 - tengqm diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS index 6c33b7c038527..c87673847c5b6 100644 --- a/SECURITY_CONTACTS +++ b/SECURITY_CONTACTS @@ -10,6 +10,6 @@ # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ -bradamant3 jimangel +kbarnard10 zacharysarah diff --git a/content/de/docs/reference/kubectl/cheatsheet.md b/content/de/docs/reference/kubectl/cheatsheet.md index 7ab55e20ea7aa..507fbbd50dd40 100644 --- a/content/de/docs/reference/kubectl/cheatsheet.md +++ b/content/de/docs/reference/kubectl/cheatsheet.md @@ -27,7 +27,7 @@ source <(kubectl completion bash) # Wenn Sie autocomplete in bash in der aktuell echo "source <(kubectl completion bash)" >> ~/.bashrc # Fügen Sie der Bash-Shell dauerhaft Autocomplete hinzu. ``` -Sie können auch ein Abkürzungsalias für `kubectl` verwenden, weleches auch mit Vervollständigung funktioniert: +Sie können auch ein Abkürzungsalias für `kubectl` verwenden, welches auch mit Vervollständigung funktioniert: ```bash alias k=kubectl diff --git a/content/en/docs/concepts/_index.md b/content/en/docs/concepts/_index.md index 2106ae21cb9df..0cb970fd66e09 100644 --- a/content/en/docs/concepts/_index.md +++ b/content/en/docs/concepts/_index.md @@ -24,9 +24,9 @@ Once you've set your desired state, the *Kubernetes Control Plane* makes the clu * **[kubelet](/docs/admin/kubelet/)**, which communicates with the Kubernetes Master. * **[kube-proxy](/docs/admin/kube-proxy/)**, a network proxy which reflects Kubernetes networking services on each node. -## Kubernetes Objects +## Kubernetes objects -Kubernetes contains a number of abstractions that represent the state of your system: deployed containerized applications and workloads, their associated network and disk resources, and other information about what your cluster is doing. These abstractions are represented by objects in the Kubernetes API. See [Understanding Kubernetes Objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/) for more details. +Kubernetes contains a number of abstractions that represent the state of your system: deployed containerized applications and workloads, their associated network and disk resources, and other information about what your cluster is doing. These abstractions are represented by objects in the Kubernetes API. See [Understanding Kubernetes objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/#kubernetes-objects) for more details. The basic Kubernetes objects include: @@ -35,7 +35,7 @@ The basic Kubernetes objects include: * [Volume](/docs/concepts/storage/volumes/) * [Namespace](/docs/concepts/overview/working-with-objects/namespaces/) -Kubernetes also contains higher-level abstractions that rely on [Controllers](/docs/concepts/architecture/controller/) to build upon the basic objects, and provide additional functionality and convenience features. These include: +Kubernetes also contains higher-level abstractions that rely on [controllers](/docs/concepts/architecture/controller/) to build upon the basic objects, and provide additional functionality and convenience features. These include: * [Deployment](/docs/concepts/workloads/controllers/deployment/) * [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) diff --git a/content/en/docs/concepts/architecture/controller.md b/content/en/docs/concepts/architecture/controller.md index fe8965f3e2873..e5bee1d0a52a7 100644 --- a/content/en/docs/concepts/architecture/controller.md +++ b/content/en/docs/concepts/architecture/controller.md @@ -26,7 +26,7 @@ closer to the desired state, by turning equipment on or off. ## Controller pattern A controller tracks at least one Kubernetes resource type. -These [objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/) +These [objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/#kubernetes-objects) have a spec field that represents the desired state. The controller(s) for that resource are responsible for making the current state come closer to that desired state. diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index 4d3da6ad118f2..660d589169710 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -37,7 +37,7 @@ On their own, custom resources simply let you store and retrieve structured data When you combine a custom resource with a *custom controller*, custom resources provide a true _declarative API_. -A [declarative API](/docs/concepts/overview/working-with-objects/kubernetes-objects/#understanding-kubernetes-objects) +A [declarative API](/docs/concepts/overview/kubernetes-api/) allows you to _declare_ or specify the desired state of your resource and tries to keep the current state of Kubernetes objects in sync with the desired state. The controller interprets the structured data as a record of the user's diff --git a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md index f0bac7e4cbd00..b6c09be817bf2 100644 --- a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -12,9 +12,9 @@ This page explains how Kubernetes objects are represented in the Kubernetes API, {{% /capture %}} {{% capture body %}} -## Understanding Kubernetes Objects +## Understanding Kubernetes objects {#kubernetes-objects} -*Kubernetes Objects* are persistent entities in the Kubernetes system. Kubernetes uses these entities to represent the state of your cluster. Specifically, they can describe: +*Kubernetes objects* are persistent entities in the Kubernetes system. Kubernetes uses these entities to represent the state of your cluster. Specifically, they can describe: * What containerized applications are running (and on which nodes) * The resources available to those applications @@ -33,7 +33,7 @@ For example, a Kubernetes Deployment is an object that can represent an applicat For more information on the object spec, status, and metadata, see the [Kubernetes API Conventions](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md). -### Describing a Kubernetes Object +### Describing a Kubernetes object When you create an object in Kubernetes, you must provide the object spec that describes its desired state, as well as some basic information about the object (such as a name). When you use the Kubernetes API to create the object (either directly or via `kubectl`), that API request must include that information as JSON in the request body. **Most often, you provide the information to `kubectl` in a .yaml file.** `kubectl` converts the information to JSON when making the API request. @@ -51,7 +51,7 @@ kubectl apply -f https://k8s.io/examples/application/deployment.yaml --record The output is similar to this: -```shell +``` deployment.apps/nginx-deployment created ``` @@ -65,14 +65,15 @@ In the `.yaml` file for the Kubernetes object you want to create, you'll need to * `spec` - What state you desire for the object The precise format of the object `spec` is different for every Kubernetes object, and contains nested fields specific to that object. The [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) can help you find the spec format for all of the objects you can create using Kubernetes. -For example, the `spec` format for a `Pod` can be found -[here](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core), -and the `spec` format for a `Deployment` can be found -[here](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deploymentspec-v1-apps). +For example, the `spec` format for a Pod can be found in +[PodSpec v1 core](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core), +and the `spec` format for a Deployment can be found +[DeploymentSpec v1 apps](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deploymentspec-v1-apps). {{% /capture %}} {{% capture whatsnext %}} +* [Kubernetes API overview](/docs/reference/using-api/api-overview/) explains some more API concepts * Learn about the most important basic Kubernetes objects, such as [Pod](/docs/concepts/workloads/pods/pod-overview/). * Learn about [controllers](/docs/concepts/architecture/controller/) in Kubernetes {{% /capture %}} diff --git a/content/en/docs/concepts/overview/working-with-objects/labels.md b/content/en/docs/concepts/overview/working-with-objects/labels.md index 606b0f3f67669..a74e21910303a 100644 --- a/content/en/docs/concepts/overview/working-with-objects/labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/labels.md @@ -72,7 +72,7 @@ spec: image: nginx:1.7.9 ports: - containerPort: 80 - + ``` ## Label selectors @@ -92,7 +92,7 @@ them. For some API types, such as ReplicaSets, the label selectors of two instances must not overlap within a namespace, or the controller can see that as conflicting instructions and fail to determine how many replicas should be present. {{< /note >}} -{{< caution >}} +{{< caution >}} For both equality-based and set-based conditions there is no logical _OR_ (`||`) operator. Ensure your filter statements are structured accordingly. {{< /caution >}} @@ -210,7 +210,7 @@ this selector (respectively in `json` or `yaml` format) is equivalent to `compon #### Resources that support set-based requirements -Newer resources, such as [`Job`](/docs/concepts/jobs/run-to-completion-finite-workloads/), [`Deployment`](/docs/concepts/workloads/controllers/deployment/), [`Replica Set`](/docs/concepts/workloads/controllers/replicaset/), and [`Daemon Set`](/docs/concepts/workloads/controllers/daemonset/), support _set-based_ requirements as well. +Newer resources, such as [`Job`](/docs/concepts/workloads/controllers/jobs-run-to-completion/), [`Deployment`](/docs/concepts/workloads/controllers/deployment/), [`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/), and [`DaemonSet`](/docs/concepts/workloads/controllers/daemonset/), support _set-based_ requirements as well. ```yaml selector: diff --git a/content/en/docs/concepts/services-networking/dns-pod-service.md b/content/en/docs/concepts/services-networking/dns-pod-service.md index 63c6b59e7bd78..8e790151f28da 100644 --- a/content/en/docs/concepts/services-networking/dns-pod-service.md +++ b/content/en/docs/concepts/services-networking/dns-pod-service.md @@ -38,14 +38,16 @@ For more up-to-date specification, see ## Services -### A records +### A/AAAA records -"Normal" (not headless) Services are assigned a DNS A record for a name of the -form `my-svc.my-namespace.svc.cluster-domain.example`. This resolves to the cluster IP +"Normal" (not headless) Services are assigned a DNS A or AAAA record, +depending on the IP family of the service, for a name of the form +`my-svc.my-namespace.svc.cluster-domain.example`. This resolves to the cluster IP of the Service. -"Headless" (without a cluster IP) Services are also assigned a DNS A record for -a name of the form `my-svc.my-namespace.svc.cluster-domain.example`. Unlike normal +"Headless" (without a cluster IP) Services are also assigned a DNS A or AAAA record, +depending on the IP family of the service, for a name of the form +`my-svc.my-namespace.svc.cluster-domain.example`. Unlike normal Services, this resolves to the set of IPs of the pods selected by the Service. Clients are expected to consume the set or else use standard round-robin selection from the set. @@ -128,22 +130,22 @@ spec: ``` If there exists a headless service in the same namespace as the pod and with -the same name as the subdomain, the cluster's KubeDNS Server also returns an A +the same name as the subdomain, the cluster's DNS Server also returns an A or AAAA record for the Pod's fully qualified hostname. For example, given a Pod with the hostname set to "`busybox-1`" and the subdomain set to "`default-subdomain`", and a headless Service named "`default-subdomain`" in the same namespace, the pod will see its own FQDN as "`busybox-1.default-subdomain.my-namespace.svc.cluster-domain.example`". DNS serves an -A record at that name, pointing to the Pod's IP. Both pods "`busybox1`" and -"`busybox2`" can have their distinct A records. +A or AAAA record at that name, pointing to the Pod's IP. Both pods "`busybox1`" and +"`busybox2`" can have their distinct A or AAAA records. The Endpoints object can specify the `hostname` for any endpoint addresses, along with its IP. {{< note >}} -Because A records are not created for Pod names, `hostname` is required for the Pod's A +Because A or AAAA records are not created for Pod names, `hostname` is required for the Pod's A or AAAA record to be created. A Pod with no `hostname` but with `subdomain` will only create the -A record for the headless service (`default-subdomain.my-namespace.svc.cluster-domain.example`), +A or AAAA record for the headless service (`default-subdomain.my-namespace.svc.cluster-domain.example`), pointing to the Pod's IP address. Also, Pod needs to become ready in order to have a record unless `publishNotReadyAddresses=True` is set on the Service. {{< /note >}} diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index 04046c97e1e03..e916e774c475c 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -17,24 +17,15 @@ weight: 40 For clarity, this guide defines the following terms: -Node -: A worker machine in Kubernetes, part of a cluster. - -Cluster -: A set of Nodes that run containerized applications managed by Kubernetes. For this example, and in most common Kubernetes deployments, nodes in the cluster are not part of the public internet. - -Edge router -: A router that enforces the firewall policy for your cluster. This could be a gateway managed by a cloud provider or a physical piece of hardware. - -Cluster network -: A set of links, logical or physical, that facilitate communication within a cluster according to the Kubernetes [networking model](/docs/concepts/cluster-administration/networking/). - -Service -: A Kubernetes {{< glossary_tooltip term_id="service" >}} that identifies a set of Pods using {{< glossary_tooltip text="label" term_id="label" >}} selectors. Unless mentioned otherwise, Services are assumed to have virtual IPs only routable within the cluster network. +* Node: A worker machine in Kubernetes, part of a cluster. +* Cluster: A set of Nodes that run containerized applications managed by Kubernetes. For this example, and in most common Kubernetes deployments, nodes in the cluster are not part of the public internet. +* Edge router: A router that enforces the firewall policy for your cluster. This could be a gateway managed by a cloud provider or a physical piece of hardware. +* Cluster network: A set of links, logical or physical, that facilitate communication within a cluster according to the Kubernetes [networking model](/docs/concepts/cluster-administration/networking/). +* Service: A Kubernetes {{< glossary_tooltip term_id="service" >}} that identifies a set of Pods using {{< glossary_tooltip text="label" term_id="label" >}} selectors. Unless mentioned otherwise, Services are assumed to have virtual IPs only routable within the cluster network. ## What is Ingress? -Ingress exposes HTTP and HTTPS routes from outside the cluster to +[Ingress](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#ingress-v1beta1-networking-k8s-io) exposes HTTP and HTTPS routes from outside the cluster to {{< link text="services" url="/docs/concepts/services-networking/service/" >}} within the cluster. Traffic routing is controlled by rules defined on the Ingress resource. @@ -474,6 +465,7 @@ You can expose a Service in multiple ways that don't directly involve the Ingres {{% /capture %}} {{% capture whatsnext %}} -* Learn about [ingress controllers](/docs/concepts/services-networking/ingress-controllers/) +* Learn about the [Ingress API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#ingress-v1beta1-networking-k8s-io) +* Learn about [Ingress Controllers](/docs/concepts/services-networking/ingress-controllers/) * [Set up Ingress on Minikube with the NGINX Controller](/docs/tasks/access-application-cluster/ingress-minikube) {{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/network-policies.md b/content/en/docs/concepts/services-networking/network-policies.md index 989e31992061c..3de1c87076629 100644 --- a/content/en/docs/concepts/services-networking/network-policies.md +++ b/content/en/docs/concepts/services-networking/network-policies.md @@ -199,7 +199,7 @@ If you want to allow all traffic from all pods in a namespace (even if policies You can create a "default" policy for a namespace which prevents all ingress AND egress traffic by creating the following NetworkPolicy in that namespace. -{{< codenew file="service/networking/network-policy-default-deny-egress.yaml" >}} +{{< codenew file="service/networking/network-policy-default-deny-all.yaml" >}} This ensures that even pods that aren't selected by any other NetworkPolicy will not be allowed ingress or egress traffic. @@ -207,7 +207,7 @@ This ensures that even pods that aren't selected by any other NetworkPolicy will {{< feature-state for_k8s_version="v1.12" state="alpha" >}} -To use this feature, you (or your cluster administrator) will need to enable the `SCTPSupport` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the API server with `--feature-gates=SCTPSupport=true,…`. +To use this feature, you (or your cluster administrator) will need to enable the `SCTPSupport` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the API server with `--feature-gates=SCTPSupport=true,…`. When the feature gate is enabled, you can set the `protocol` field of a NetworkPolicy to `SCTP`. {{< note >}} diff --git a/content/en/docs/contribute/start.md b/content/en/docs/contribute/start.md index 4af5f6d5cefd2..acd5a5bfdf2e9 100644 --- a/content/en/docs/contribute/start.md +++ b/content/en/docs/contribute/start.md @@ -61,7 +61,7 @@ formatting, and typographic conventions. Look over the style guide before you make your first contribution, and use it when you have questions. Changes to the style guide are made by SIG Docs as a group. To propose a change -or addition, [add it to the agenda](https://docs.google.com/document/d/1zg6By77SGg90EVUrhDIhopjZlSDg2jCebU-Ks9cYx0w/edit#) for an upcoming SIG Docs meeting, and attend the meeting to participate in the +or addition, [add it to the agenda](https://docs.google.com/document/d/1ddHwLK3kUMX1wVFIwlksjTk0MsqitBnWPe1LRa1Rx5A/edit) for an upcoming SIG Docs meeting, and attend the meeting to participate in the discussion. See the [advanced contribution](/docs/contribute/advanced/) topic for more information. diff --git a/content/en/docs/reference/access-authn-authz/rbac.md b/content/en/docs/reference/access-authn-authz/rbac.md index 60fafdfa69a4b..852e73fd799bc 100644 --- a/content/en/docs/reference/access-authn-authz/rbac.md +++ b/content/en/docs/reference/access-authn-authz/rbac.md @@ -5,6 +5,7 @@ reviewers: - liggitt title: Using RBAC Authorization content_template: templates/concept +aliases: [../../../rbac/] weight: 70 --- diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 6c0ca8f273be0..b02b3e48bde90 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -128,7 +128,7 @@ different Kubernetes components. | `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | | `ServerSideApply` | `true` | Beta | 1.16 | | | `ServiceNodeExclusion` | `false` | Alpha | 1.8 | | -| `StartupProbe` | `true` | Beta | 1.17 | | +| `StartupProbe` | `false` | Alpha | 1.16 | | | `StorageVersionHash` | `false` | Alpha | 1.14 | 1.14 | | `StorageVersionHash` | `true` | Beta | 1.15 | | | `StreamingProxyRedirects` | `false` | Beta | 1.5 | 1.5 | diff --git a/content/en/docs/reference/using-api/api-concepts.md b/content/en/docs/reference/using-api/api-concepts.md index 8b82d7da260f6..e30de7533035e 100644 --- a/content/en/docs/reference/using-api/api-concepts.md +++ b/content/en/docs/reference/using-api/api-concepts.md @@ -18,7 +18,7 @@ updating, and deleting primary resources via the standard HTTP verbs (POST, PUT, ## Standard API terminology -Most Kubernetes API resource types are "objects" - they represent a concrete instance of a concept on the cluster, like a pod or namespace. A smaller number of API resource types are "virtual" - they often represent operations rather than objects, such as a permission check (use a POST with a JSON-encoded body of `SubjectAccessReview` to the `subjectaccessreviews` resource). All objects will have a unique name to allow idempotent creation and retrieval, but virtual resource types may not have unique names if they are not retrievable or do not rely on idempotency. +Most Kubernetes API resource types are [objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/#kubernetes-objects): they represent a concrete instance of a concept on the cluster, like a pod or namespace. A smaller number of API resource types are "virtual" - they often represent operations rather than objects, such as a permission check (use a POST with a JSON-encoded body of `SubjectAccessReview` to the `subjectaccessreviews` resource). All objects will have a unique name to allow idempotent creation and retrieval, but virtual resource types may not have unique names if they are not retrievable or do not rely on idempotency. Kubernetes generally leverages standard RESTful terminology to describe the API concepts: diff --git a/content/en/docs/reference/using-api/client-libraries.md b/content/en/docs/reference/using-api/client-libraries.md index c00f7736bbd96..093490b3453e1 100644 --- a/content/en/docs/reference/using-api/client-libraries.md +++ b/content/en/docs/reference/using-api/client-libraries.md @@ -71,6 +71,7 @@ their authors, not the Kubernetes team. | dotNet | [github.com/tonnyeremin/kubernetes_gen](https://github.com/tonnyeremin/kubernetes_gen) | | DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | | Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | +| Elixir | [github.com/coryodaniel/k8s](https://github.com/coryodaniel/k8s) | | Haskell | [github.com/kubernetes-client/haskell](https://github.com/kubernetes-client/haskell) | {{% /capture %}} diff --git a/content/en/docs/setup/production-environment/container-runtimes.md b/content/en/docs/setup/production-environment/container-runtimes.md index 704a2728c5e86..493e2ae5efe6e 100644 --- a/content/en/docs/setup/production-environment/container-runtimes.md +++ b/content/en/docs/setup/production-environment/container-runtimes.md @@ -162,6 +162,11 @@ This section contains the necessary steps to install `CRI-O` as CRI runtime. Use the following commands to install CRI-O on your system: +{{< note >}} +The CRI-O major and minor versions must match the Kubernetes major and minor versions. +For more information, see the [CRI-O compatiblity matrix](https://github.com/cri-o/cri-o). +{{< /note >}} + ### Prerequisites ```shell diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index b768e13323dc0..09aa01de0ce1b 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -100,9 +100,9 @@ update-alternatives --set iptables /usr/sbin/iptables-legacy | Protocol | Direction | Port Range | Purpose | Used By | |----------|-----------|-------------|-----------------------|-------------------------| | TCP | Inbound | 10250 | Kubelet API | Self, Control plane | -| TCP | Inbound | 30000-32767 | NodePort Services** | All | +| TCP | Inbound | 30000-32767 | NodePort Services† | All | -** Default port range for [NodePort Services](/docs/concepts/services-networking/service/). +† Default port range for [NodePort Services](/docs/concepts/services-networking/service/). Any port numbers marked with * are overridable, so you will need to ensure any custom ports you provide are also open. @@ -116,35 +116,48 @@ documentation for the plugins about what port(s) those need. ## Installing runtime {#installing-runtime} -Since v1.6.0, Kubernetes has enabled the use of CRI, Container Runtime Interface, by default. +To run containers in Pods, Kubernetes uses a +{{< glossary_tooltip term_id="container-runtime" text="container runtime" >}}. -Since v1.14.0, kubeadm will try to automatically detect the container runtime on Linux nodes -by scanning through a list of well known domain sockets. The detectable runtimes and the -socket paths, that are used, can be found in the table below. +{{< tabs name="container_runtime" >}} +{{% tab name="Linux nodes" %}} -| Runtime | Domain Socket | -|------------|----------------------------------| -| Docker | /var/run/docker.sock | -| containerd | /run/containerd/containerd.sock | -| CRI-O | /var/run/crio/crio.sock | +By default, Kubernetes uses the +{{< glossary_tooltip term_id="cri" text="Container Runtime Interface">}} (CRI) +to interface with your chosen container runtime. -If both Docker and containerd are detected together, Docker takes precedence. This is -needed, because Docker 18.09 ships with containerd and both are detectable. -If any other two or more runtimes are detected, kubeadm will exit with an appropriate -error message. +If you don't specify a runtime, kubeadm automatically tries to detect an installed +container runtime by scanning through a list of well known Unix domain sockets. +The following table lists container runtimes and their associated socket paths: -On non-Linux nodes the container runtime used by default is Docker. +{{< table caption = "Container runtimes and their socket paths" >}} +| Runtime | Path to Unix domain socket | +|------------|-----------------------------------| +| Docker | `/var/run/docker.sock` | +| containerd | `/run/containerd/containerd.sock` | +| CRI-O | `/var/run/crio/crio.sock` | +{{< /table >}} -If the container runtime of choice is Docker, it is used through the built-in -`dockershim` CRI implementation inside of the `kubelet`. +
+If both Docker and containerd are detected, Docker takes precedence. This is +needed because Docker 18.09 ships with containerd and both are detectable even if you only +installed Docker. +If any other two or more runtimes are detected, kubeadm exits with an error. -Other CRI-based runtimes include: +The kubelet integrates with Docker through the built-in `dockershim` CRI implementation. -- [containerd/cri](https://github.com/containerd/cri) (CRI plugin built into containerd) -- [cri-o](https://cri-o.io/) -- [frakti](https://github.com/kubernetes/frakti) +See [container runtimes](/docs/setup/production-environment/container-runtimes/) +for more information. +{{% /tab %}} +{{% tab name="other operating systems" %}} +By default, kubeadm uses {{< glossary_tooltip term_id="docker" >}} as the container runtime. +The kubelet integrates with Docker through the built-in `dockershim` CRI implementation. + +See [container runtimes](/docs/setup/production-environment/container-runtimes/) +for more information. +{{% /tab %}} +{{< /tabs >}} -Refer to the [CRI installation instructions](/docs/setup/cri) for more information. ## Installing kubeadm, kubelet and kubectl @@ -170,7 +183,7 @@ For information about installing `kubectl`, see [Install and set up kubectl](/do {{< warning >}} These instructions exclude all Kubernetes packages from any system upgrades. This is because kubeadm and Kubernetes require -[special attention to upgrade](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14/). +[special attention to upgrade](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/). {{}} For more information on version skews, see: diff --git a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md index 3cf8c0a40be04..720203d60d93e 100644 --- a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md +++ b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md @@ -127,10 +127,12 @@ IP and may cause a second hop to another node, but should have good overall load-spreading. Local preserves the client source IP and avoids a second hop for LoadBalancer and NodePort type services, but risks potentially imbalanced traffic spreading. -* `service.spec.healthCheckNodePort` - specifies the health check nodePort -(numeric port number) for the service. If not specified, `healthCheckNodePort` is -created by the service API backend with the allocated `nodePort`. It will use the -user-specified `nodePort` value if specified by the client. It only has an +* `service.spec.healthCheckNodePort` - specifies the health check node port +(numeric port number) for the service. If `healthCheckNodePort` isn't specified, +the service controller allocates a port from your cluster's NodePort range. You +can configure that range by setting an API server command line option, +`--service-node-port-range`. It will use the +user-specified `healthCheckNodePort` value if specified by the client. It only has an effect when `type` is set to LoadBalancer and `externalTrafficPolicy` is set to Local. diff --git a/content/en/docs/tasks/administer-cluster/declare-network-policy.md b/content/en/docs/tasks/administer-cluster/declare-network-policy.md index b282fd6514d33..edb389c46f3ac 100644 --- a/content/en/docs/tasks/administer-cluster/declare-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/declare-network-policy.md @@ -3,6 +3,7 @@ reviewers: - caseydavenport - danwinship title: Declare Network Policy +min-kubernetes-server-version: v1.8 content_template: templates/task --- {{% capture overview %}} @@ -30,7 +31,7 @@ The above list is sorted alphabetically by product name, not by recommendation o ## Create an `nginx` deployment and expose it via a service -To see how Kubernetes network policy works, start off by creating an `nginx` deployment. +To see how Kubernetes network policy works, start off by creating an `nginx` Deployment. ```console kubectl create deployment nginx --image=nginx @@ -39,7 +40,7 @@ kubectl create deployment nginx --image=nginx deployment.apps/nginx created ``` -And expose it via a service. +Expose the Deployment through a Service called `nginx`. ```console kubectl expose deployment nginx --port=80 @@ -49,7 +50,7 @@ kubectl expose deployment nginx --port=80 service/nginx exposed ``` -This runs a `nginx` pods in the default namespace, and exposes it through a service called `nginx`. +The above commands create a Deployment with an nginx Pod and expose the Deployment through a Service named `nginx`. The `nginx` Pod and Deployment are found in the `default` namespace. ```console kubectl get svc,pod @@ -64,59 +65,43 @@ NAME READY STATUS RESTARTS AGE pod/nginx-701339712-e0qfq 1/1 Running 0 35s ``` -## Test the service by accessing it from another pod +## Test the service by accessing it from another Pod -You should be able to access the new `nginx` service from other pods. To test, access the service from another pod in the default namespace. Make sure you haven't enabled isolation on the namespace. - -Start a busybox container, and use `wget` on the `nginx` service: +You should be able to access the new `nginx` service from other Pods. To access the `nginx` Service from another Pod in the `default` namespace, start a busybox container: ```console kubectl run --generator=run-pod/v1 busybox --rm -ti --image=busybox -- /bin/sh ``` -```console -Waiting for pod default/busybox-472357175-y0m47 to be running, status is Pending, pod ready: false +In your shell, run the following command: -Hit enter for command prompt +```shell +wget --spider --timeout=1 nginx +``` -/ # wget --spider --timeout=1 nginx +```none Connecting to nginx (10.100.0.16:80) -/ # +remote file exists ``` ## Limit access to the `nginx` service -Let's say you want to limit access to the `nginx` service so that only pods with the label `access: true` can query it. To do that, create a `NetworkPolicy` that allows connections only from those pods: +To limit the access to the `nginx` service so that only Pods with the label `access: true` can query it, create a NetworkPolicy object as follows: -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: access-nginx -spec: - podSelector: - matchLabels: - app: nginx - ingress: - - from: - - podSelector: - matchLabels: - access: "true" -``` +{{< codenew file="service/networking/nginx-policy.yaml" >}} {{< note >}} -In the case, the label `app=nginx` is automatically added. +NetworkPolicy includes a `podSelector` which selects the grouping of Pods to which the policy applies. You can see this policy selects Pods with the label `app=nginx`. The label was automatically added to the Pod in the `nginx` Deployment. An empty `podSelector` selects all pods in the namespace. {{< /note >}} - ## Assign the policy to the service -Use kubectl to create a NetworkPolicy from the above nginx-policy.yaml file: +Use kubectl to create a NetworkPolicy from the above `nginx-policy.yaml` file: ```console -kubectl apply -f nginx-policy.yaml +kubectl apply -f https://k8s.io/examples/service/networking/nginx-policy.yaml ``` ```none @@ -124,40 +109,40 @@ networkpolicy.networking.k8s.io/access-nginx created ``` ## Test access to the service when access label is not defined -If we attempt to access the nginx Service from a pod without the correct labels, the request will now time out: +When you attempt to access the `nginx` Service from a Pod without the correct labels, the request times out: ```console kubectl run --generator=run-pod/v1 busybox --rm -ti --image=busybox -- /bin/sh ``` -```console -Waiting for pod default/busybox-472357175-y0m47 to be running, status is Pending, pod ready: false +In your shell, run the command: -Hit enter for command prompt +```shell +wget --spider --timeout=1 nginx +``` -/ # wget --spider --timeout=1 nginx +```none Connecting to nginx (10.100.0.16:80) wget: download timed out -/ # ``` ## Define access label and test again -Create a pod with the correct labels, and you'll see that the request is allowed: +You can create a Pod with the correct labels to see that the request is allowed: ```console kubectl run --generator=run-pod/v1 busybox --rm -ti --labels="access=true" --image=busybox -- /bin/sh ``` -```console -Waiting for pod default/busybox-472357175-y0m47 to be running, status is Pending, pod ready: false +In your shell, run the command: -Hit enter for command prompt +```shell +wget --spider --timeout=1 nginx +``` -/ # wget --spider --timeout=1 nginx +```none Connecting to nginx (10.100.0.16:80) -/ # +remote file exists ``` -{{% /capture %}} - +{{% /capture %}} diff --git a/content/en/docs/tasks/federation/policy.rego b/content/en/docs/tasks/federation/policy.rego deleted file mode 100644 index 49827b6ae96e2..0000000000000 --- a/content/en/docs/tasks/federation/policy.rego +++ /dev/null @@ -1,74 +0,0 @@ -# OPA supports a high-level declarative language named Rego for authoring and -# enforcing policies. For more information on Rego, visit -# http://openpolicyagent.org. - -# Rego policies are namespaced by the "package" directive. -package kubernetes.placement - -# Imports provide aliases for data inside the policy engine. In this case, the -# policy simply refers to "clusters" below. -import data.kubernetes.clusters - -# The "annotations" rule generates a JSON object containing the key -# "federation.kubernetes.io/replica-set-preferences" mapped to . -# The preferences values is generated dynamically by OPA when it evaluates the -# rule. -# -# The SchedulingPolicy Admission Controller running inside the Federation API -# server will merge these annotations into incoming Federated resources. By -# setting replica-set-preferences, we can control the placement of Federated -# ReplicaSets. -# -# Rules are defined to generate JSON values (booleans, strings, objects, etc.) -# When OPA evaluates a rule, it generates a value IF all of the expressions in -# the body evaluate successfully. All rules can be understood intuitively as -# if where is true if AND AND ... -# is true (for some set of data.) -annotations["federation.kubernetes.io/replica-set-preferences"] = preferences { - input.kind = "ReplicaSet" - value = {"clusters": cluster_map, "rebalance": true} - json.marshal(value, preferences) -} - -# This "annotations" rule generates a value for the "federation.alpha.kubernetes.io/cluster-selector" -# annotation. -# -# In English, the policy asserts that resources in the "production" namespace -# that are not annotated with "criticality=low" MUST be placed on clusters -# labelled with "on-premises=true". -annotations["federation.alpha.kubernetes.io/cluster-selector"] = selector { - input.metadata.namespace = "production" - not input.metadata.annotations.criticality = "low" - json.marshal([{ - "operator": "=", - "key": "on-premises", - "values": "[true]", - }], selector) -} - -# Generates a set of cluster names that satisfy the incoming Federated -# ReplicaSet's requirements. In this case, just PCI compliance. -replica_set_clusters[cluster_name] { - clusters[cluster_name] - not insufficient_pci[cluster_name] -} - -# Generates a set of clusters that must not be used for Federated ReplicaSets -# that request PCI compliance. -insufficient_pci[cluster_name] { - clusters[cluster_name] - input.metadata.annotations["requires-pci"] = "true" - not pci_clusters[cluster_name] -} - -# Generates a set of clusters that are PCI certified. In this case, we assume -# clusters are annotated to indicate if they have passed PCI compliance audits. -pci_clusters[cluster_name] { - clusters[cluster_name].metadata.annotations["pci-certified"] = "true" -} - -# Helper rule to generate a mapping of desired clusters to weights. In this -# case, weights are static. -cluster_map[cluster_name] = {"weight": 1} { - replica_set_clusters[cluster_name] -} diff --git a/content/en/docs/tasks/federation/set-up-placement-policies-federation.md b/content/en/docs/tasks/federation/set-up-placement-policies-federation.md index 4329245d95c54..d7ac469ea9648 100644 --- a/content/en/docs/tasks/federation/set-up-placement-policies-federation.md +++ b/content/en/docs/tasks/federation/set-up-placement-policies-federation.md @@ -108,7 +108,82 @@ Create the namespace if it does not already exist: Configure a sample policy to test the external policy engine: -{{< code file="policy.rego" >}} +``` +# OPA supports a high-level declarative language named Rego for authoring and +# enforcing policies. For more information on Rego, visit +# http://openpolicyagent.org. + +# Rego policies are namespaced by the "package" directive. +package kubernetes.placement + +# Imports provide aliases for data inside the policy engine. In this case, the +# policy simply refers to "clusters" below. +import data.kubernetes.clusters + +# The "annotations" rule generates a JSON object containing the key +# "federation.kubernetes.io/replica-set-preferences" mapped to . +# The preferences values is generated dynamically by OPA when it evaluates the +# rule. +# +# The SchedulingPolicy Admission Controller running inside the Federation API +# server will merge these annotations into incoming Federated resources. By +# setting replica-set-preferences, we can control the placement of Federated +# ReplicaSets. +# +# Rules are defined to generate JSON values (booleans, strings, objects, etc.) +# When OPA evaluates a rule, it generates a value IF all of the expressions in +# the body evaluate successfully. All rules can be understood intuitively as +# if where is true if AND AND ... +# is true (for some set of data.) +annotations["federation.kubernetes.io/replica-set-preferences"] = preferences { + input.kind = "ReplicaSet" + value = {"clusters": cluster_map, "rebalance": true} + json.marshal(value, preferences) +} + +# This "annotations" rule generates a value for the "federation.alpha.kubernetes.io/cluster-selector" +# annotation. +# +# In English, the policy asserts that resources in the "production" namespace +# that are not annotated with "criticality=low" MUST be placed on clusters +# labelled with "on-premises=true". +annotations["federation.alpha.kubernetes.io/cluster-selector"] = selector { + input.metadata.namespace = "production" + not input.metadata.annotations.criticality = "low" + json.marshal([{ + "operator": "=", + "key": "on-premises", + "values": "[true]", + }], selector) +} + +# Generates a set of cluster names that satisfy the incoming Federated +# ReplicaSet's requirements. In this case, just PCI compliance. +replica_set_clusters[cluster_name] { + clusters[cluster_name] + not insufficient_pci[cluster_name] +} + +# Generates a set of clusters that must not be used for Federated ReplicaSets +# that request PCI compliance. +insufficient_pci[cluster_name] { + clusters[cluster_name] + input.metadata.annotations["requires-pci"] = "true" + not pci_clusters[cluster_name] +} + +# Generates a set of clusters that are PCI certified. In this case, we assume +# clusters are annotated to indicate if they have passed PCI compliance audits. +pci_clusters[cluster_name] { + clusters[cluster_name].metadata.annotations["pci-certified"] = "true" +} + +# Helper rule to generate a mapping of desired clusters to weights. In this +# case, weights are static. +cluster_map[cluster_name] = {"weight": 1} { + replica_set_clusters[cluster_name] +} +``` Shown below is the command to create the sample policy: diff --git a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md index d3d341cd0cbce..13bebe9f53688 100644 --- a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md +++ b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md @@ -18,9 +18,10 @@ This page shows how to run a replicated stateful application using a The example is a MySQL single-master topology with multiple slaves running asynchronous replication. -Note that **this is not a production configuration**. -In particular, MySQL settings remain on insecure defaults to keep the focus +{{ < note > }} +**This is not a production configuration**. MySQL settings remain on insecure defaults to keep the focus on general patterns for running stateful applications in Kubernetes. +{{ < /note > }} {{% /capture %}} diff --git a/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md b/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md index 87f0b01ad0b32..777265c68bedf 100644 --- a/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md +++ b/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md @@ -187,7 +187,7 @@ underlying resource upon deleting the PersistentVolume. * Learn more about [Deployment objects](/docs/concepts/workloads/controllers/deployment/). -* Learn more about [Deploying applications](/docs/user-guide/deploying-applications/) +* Learn more about [Deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/) * [kubectl run documentation](/docs/reference/generated/kubectl/kubectl-commands/#run) diff --git a/content/en/docs/tasks/tools/install-minikube.md b/content/en/docs/tasks/tools/install-minikube.md index 6effe11116b1f..03c3b07cd5cec 100644 --- a/content/en/docs/tasks/tools/install-minikube.md +++ b/content/en/docs/tasks/tools/install-minikube.md @@ -74,9 +74,17 @@ If you do not already have a hypervisor installed, install one of these now: • [VirtualBox](https://www.virtualbox.org/wiki/Downloads) -{{< note >}} -Minikube also supports a `--vm-driver=none` option that runs the Kubernetes components on the host and not in a VM. Using this driver requires [Docker](https://www.docker.com/products/docker-desktop) and a Linux environment but not a hypervisor. It is recommended to use the apt installation of docker from [Docker](https://www.docker.com/products/docker-desktop), when using the none driver. The snap installation of docker does not work with minikube. -{{< /note >}} +Minikube also supports a `--vm-driver=none` option that runs the Kubernetes components on the host and not in a VM. +Using this driver requires [Docker](https://www.docker.com/products/docker-desktop) and a Linux environment but not a hypervisor. + +If you're using the `none` driver in Debian or a derivative, use the `.deb` packages for +Docker rather than the snap package, which does not work with Minikube. +You can download `.deb` packages from [Docker](https://www.docker.com/products/docker-desktop). + +{{< caution >}} +The `none` VM driver can result in security and data loss issues. +Before using `--vm-driver=none`, consult [this documentation](https://minikube.sigs.k8s.io/docs/reference/drivers/none/) for more information. +{{< /caution >}} ### Install Minikube using a package diff --git a/content/en/docs/tutorials/hello-minikube.md b/content/en/docs/tutorials/hello-minikube.md index 92a465ea4ff08..e8a16568ad9b7 100644 --- a/content/en/docs/tutorials/hello-minikube.md +++ b/content/en/docs/tutorials/hello-minikube.md @@ -8,7 +8,7 @@ menu: weight: 10 post: >

Ready to get your hands dirty? Build a simple Kubernetes cluster that runs "Hello World" for Node.js.

-card: +card: name: tutorials weight: 10 --- @@ -17,7 +17,7 @@ card: This tutorial shows you how to run a simple Hello World Node.js app on Kubernetes using [Minikube](/docs/setup/learning-environment/minikube) and Katacoda. -Katacoda provides a free, in-browser Kubernetes environment. +Katacoda provides a free, in-browser Kubernetes environment. {{< note >}} You can also follow this tutorial if you've installed [Minikube locally](/docs/tasks/tools/install-minikube/). @@ -49,7 +49,7 @@ For more information on the `docker build` command, read the [Docker documentati ## Create a Minikube cluster -1. Click **Launch Terminal** +1. Click **Launch Terminal** {{< kat-button >}} @@ -63,7 +63,7 @@ For more information on the `docker build` command, read the [Docker documentati 3. Katacoda environment only: At the top of the terminal pane, click the plus sign, and then click **Select port to view on Host 1**. -4. Katacoda environment only: Type `30000`, and then click **Display Port**. +4. Katacoda environment only: Type `30000`, and then click **Display Port**. ## Create a Deployment @@ -75,7 +75,7 @@ Pod and restarts the Pod's Container if it terminates. Deployments are the recommended way to manage the creation and scaling of Pods. 1. Use the `kubectl create` command to create a Deployment that manages a Pod. The -Pod runs a Container based on the provided Docker image. +Pod runs a Container based on the provided Docker image. ```shell kubectl create deployment hello-node --image=gcr.io/hello-minikube-zero-install/hello-node @@ -118,7 +118,7 @@ Pod runs a Container based on the provided Docker image. ```shell kubectl config view ``` - + {{< note >}}For more information about `kubectl`commands, see the [kubectl overview](/docs/user-guide/kubectl-overview/).{{< /note >}} ## Create a Service @@ -133,7 +133,7 @@ Kubernetes [*Service*](/docs/concepts/services-networking/service/). ```shell kubectl expose deployment hello-node --type=LoadBalancer --port=8080 ``` - + The `--type=LoadBalancer` flag indicates that you want to expose your Service outside of the cluster. @@ -199,13 +199,13 @@ Minikube has a set of built-in {{< glossary_tooltip text="addons" term_id="addon storage-provisioner: enabled storage-provisioner-gluster: disabled ``` - + 2. Enable an addon, for example, `metrics-server`: ```shell minikube addons enable metrics-server ``` - + The output is similar to: ``` @@ -246,7 +246,7 @@ Minikube has a set of built-in {{< glossary_tooltip text="addons" term_id="addon ```shell minikube addons disable metrics-server ``` - + The output is similar to: ``` @@ -279,7 +279,7 @@ minikube delete {{% capture whatsnext %}} * Learn more about [Deployment objects](/docs/concepts/workloads/controllers/deployment/). -* Learn more about [Deploying applications](/docs/user-guide/deploying-applications/). +* Learn more about [Deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/). * Learn more about [Service objects](/docs/concepts/services-networking/service/). {{% /capture %}} diff --git a/content/en/examples/service/networking/network-policy-default-deny-all.yaml b/content/en/examples/service/networking/network-policy-default-deny-all.yaml index 589f15eb3e0c4..5c0086bd71e8b 100644 --- a/content/en/examples/service/networking/network-policy-default-deny-all.yaml +++ b/content/en/examples/service/networking/network-policy-default-deny-all.yaml @@ -1,3 +1,4 @@ +--- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: diff --git a/content/en/examples/service/networking/nginx-policy.yaml b/content/en/examples/service/networking/nginx-policy.yaml new file mode 100644 index 0000000000000..89ee9886925e7 --- /dev/null +++ b/content/en/examples/service/networking/nginx-policy.yaml @@ -0,0 +1,13 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: access-nginx +spec: + podSelector: + matchLabels: + app: nginx + ingress: + - from: + - podSelector: + matchLabels: + access: "true" diff --git a/content/id/docs/concepts/policy/resource-quotas.md b/content/id/docs/concepts/policy/resource-quotas.md new file mode 100644 index 0000000000000..b4a3e28ebb1dc --- /dev/null +++ b/content/id/docs/concepts/policy/resource-quotas.md @@ -0,0 +1,622 @@ +--- +title: Resource Quota +content_template: templates/concept +weight: 10 +--- + +{{% capture overview %}} + +Saat beberapa pengguna atau tim berbagi sebuah klaster dengan jumlah Node yang tetap, +ada satu hal yang perlu diperhatikan yaitu suatu tim dapat menggunakan sumber daya +lebih dari jatah yang mereka perlukan. + +_Resource Quota_ (kuota sumber daya) adalah sebuah alat yang dapat digunakan oleh +administrator untuk mengatasi hal ini. + +{{% /capture %}} + +{{% capture body %}} + +Sebuah Resource Quota, didefinisikan oleh objek API `ResourceQuota`, menyediakan batasan-batasan +yang membatasi konsumsi gabungan sumber daya komputasi untuk tiap Namespace. Resource Quota dapat +membatasi jumlah objek yang dapat dibuat dalam sebuah Namespace berdasarkan tipenya, maupun jumlah +seluruh sumber daya komputasi yang dapat dipakai oleh sumber daya API (misalnya Pod) di Namespace +tersebut. + +Resource Quota bekerja sebagai berikut: + +- Tim-tim berbeda bekerja pada Namespace yang berbeda pula. Sekarang hal ini belum diwajibkan, + tetapi dukungan untuk mewajibkannya melalui ACL sedang direncanakan. +- Administrator membuat sebuah `ResourceQuota` untuk setiap Namespace. +- Para pengguna membuat sumber daya (Pod, Service, dll.) di dalam Namespace tersebut, kemudian + sistem kuota memantau penggunaan untuk memastikan bahwa penggunaannya tidak melebihi batas + sumber daya yang ditentukan di `ResourceQuota`. +- Jika pembuatan atau pembaruan sebuah sumber daya melanggar sebuah batasan kuota, maka permintaan + tersebut akan gagal dengan kode status `403 FORBIDDEN` dengan sebuah pesan yang menjelaskan batasan + yang akan dilanggar. +- Jika kuota diaktifkan di sebuah Namespace untuk sumber daya komputasi seperti `cpu` dan `memory`, + pengguna-pengguna harus menentukan `requests` atau `limits` untuk sumber daya tersebut; atau sistem + kuota akan menolak pembuatan Pod tersebut. Petunjuk: Gunakan Admission Controller `LimitRanger` untuk + memaksa nilai-nilai bawaan untuk Pod-Pod yang tidak menentukan kebutuhan sumber daya komputasi. + Lihat [petunjuknya](/docs/tasks/administer-cluster/quota-memory-cpu-namespace/) untuk contoh bagaimana + cara menghindari masalah ini. + +Contoh-contoh kebijakan yang dapat dibuat menggunakan Namespace dan kuota adalah: + +- Dalam sebuah klaster dengan kapasitas RAM 32 GiB, dan CPU 16 _core_, misalkan tim A menggunakan 20GiB + dan 10 _core_, dan tim B menggunakan 10GiB dan 4 _core_, dan menyimpan 2GiB dan 2 _core_ untuk cadangan + penggunaan di masa depan. +- Batasi Namespace "testing" dengan batas 1 _core_ dan RAM 1GiB. Biarkan Namespace "production" menggunakan + berapapun jumlah yang diinginkan. + +Pada kasus di mana total kapasitas klaster lebih sedikit dari jumlah seluruh kuota di seluruh Namespace, +dapat terjadi perebutan sumber daya komputasi. Masalah ini akan ditangani dengan cara siapa-cepat-dia-dapat. + +Perebutan sumber daya komputasi maupun perubahan kuota tidak akan memengaruhi sumber daya yang sudah dibuat +sebelumnya. + +## Mengaktifkan Resource Quota + +Dukungan untuk Resource Quota diaktifkan secara bawaan pada banyak distribusi Kubernetes. Resource Quota +diaktifkan saat _flag_ `--enable-admission-plugins=` pada apiserver memiliki `ResourceQuota` sebagai +salah satu nilainya. + +Sebuah Resource Quota akan dipaksakan pada sebuah Namespace tertentu saat ada sebuah objek `ResourceQuota` +di dalam Namespace tersebut. + +## Resource Quota Komputasi + +Kamu dapat membatasi jumlah total [sumber daya komputasi](/docs/user-guide/compute-resources) yang dapat +diminta di dalam sebuah Namespace. + +Berikut jenis-jenis sumber daya yang didukung: + +| Nama Sumber Daya | Deskripsi | +| --------------------- | ----------------------------------------------------------- | +| `limits.cpu` | Pada seluruh Pod yang berada pada kondisi non-terminal, jumlah `limits` CPU tidak dapat melebihi nilai ini. | +| `limits.memory` | Pada seluruh Pod yang berada pada kondisi non-terminal, jumlah `limits` memori tidak dapat melebihi nilai ini. | +| `limits.cpu` | Pada seluruh Pod yang berada pada kondisi non-terminal, jumlah `requests` CPU tidak dapat melebihi nilai ini. | +| `limits.memory` | Pada seluruh Pod yang berada pada kondisi non-terminal, jumlah `requests` memori tidak dapat melebihi nilai ini. | + +### Resource Quota untuk sumber daya yang diperluas + +Sebagai tambahan untuk sumber daya yang disebutkan di atas, pada rilis 1.10, dukungan kuota untuk +[sumber daya yang diperluas](/docs/concepts/configuration/manage-compute-resources-container/#extended-resources) ditambahkan. + +Karena _overcommit_ tidak diperbolehkan untuk sumber daya yang diperluas, tidak masuk akal untuk menentukan +keduanya; `requests` dan `limits` untuk sumber daya yang diperluas yang sama pada sebuah kuota. Jadi, untuk +sumber daya yang diperluas, hanya kuota dengan prefiks `requests.` saja yang diperbolehkan untuk sekarang. + +Mari kita ambil contoh sumber daya GPU. Jika nama sumber dayanya adalah `nvidia.com/gpu`, dan kamu ingin +membatasi jumlah total GPU yang diminta pada sebuah Namespace menjadi 4, kamu dapat menentukan sebuah kuota +sebagai berikut: + +* `requests.nvidia.com/gpu: 4` + +Lihat [Melihat dan Menyetel Kuota](#melihat-dan-menyetel-kuota) untuk informasi lebih lanjut. + + +## Resource Quota untuk penyimpanan + +Kamu dapat membatasi jumlah total [sumber daya penyimpanan](/docs/concepts/storage/persistent-volumes/) yang dapat +diminta pada sebuah Namespace. + +Sebagai tambahan, kamu dapat membatasi penggunaan sumber daya penyimpanan berdasarkan _storage class_ +sumber daya penyimpanan tersebut. + +| Nama Sumber Daya | Deskripsi | +| --------------------- | ----------------------------------------------------------- | +| `requests.storage` | Pada seluruh Persistent Volume Claim, jumlah `requests` penyimpanan tidak dapat melebihi nilai ini. | +| `persistentvolumeclaims` | Jumlah kuantitas [Persistent Volume Claim](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) yang dapat ada di dalam sebuah Namespace. | +| `.storageclass.storage.k8s.io/requests.storage` | Pada seluruh Persistent Volume Claim yang dikaitkan dengan sebuah nama _storage-class_ (melalui kolom `storageClassName`), jumlah permintaan penyimpanan tidak dapat melebihi nilai ini. | +| `.storageclass.storage.k8s.io/persistentvolumeclaims` | Pada seluruh Persistent Volume Claim yang dikaitkan dengan sebuah nama _storage-class_ (melalui kolom `storageClassName`), jumlah kuantitas [Persistent Volume Claim](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) yang dapat ada di dalam sebuah Namespace. | + +Sebagai contoh, jika sebuah operator ingin membatasi penyimpanan dengan Storage Class `gold` yang berbeda dengan Storage Class `bronze`, maka operator tersebut dapat menentukan kuota sebagai berikut: + +* `gold.storageclass.storage.k8s.io/requests.storage: 500Gi` +* `bronze.storageclass.storage.k8s.io/requests.storage: 100Gi` + +Pada rilis 1.8, dukungan kuota untuk penyimpanan lokal sementara (_local ephemeral storage_) ditambahkan sebagai +sebuah fitur _alpha_: + +| Nama Sumber Daya | Deskripsi | +| ------------------------------- |----------------------------------------------------------- | +| `requests.ephemeral-storage` | Pada seluruh Pod di sebuah Namespace, jumlah `requests` penyimpanan lokal sementara tidak dapat melebihi nilai ini. | +| `limits.ephemeral-storage` | Pada seluruh Pod di sebuah Namespace, jumlah `limits` penyimpanan lokal sementara tidak dapat melebihi nilai ini. | + +## Kuota Kuantitas Objek + +Rilis 1.9 menambahkan dukungan untuk membatasi semua jenis sumber daya standar yang berada pada sebuah Namespace dengan sintaksis sebagai berikut: + +* `count/.` + +Berikut contoh-contoh sumber daya yang dapat ditentukan pengguna pada kuota kuantitas objek: + +* `count/persistentvolumeclaims` +* `count/services` +* `count/secrets` +* `count/configmaps` +* `count/replicationcontrollers` +* `count/deployments.apps` +* `count/replicasets.apps` +* `count/statefulsets.apps` +* `count/jobs.batch` +* `count/cronjobs.batch` +* `count/deployments.extensions` + +Rilis 1.15 menambahkan dukungan untuk sumber daya _custom_ menggunakan sintaksis yang sama. +Contohnya, untuk membuat kuota pada sumber daya _custom_ `widgets` pada grup API `example.com`, gunakan +`count/widgets.example.com`. + +Saat menggunakan Resource Quota `count/*`, sebuah objek akan menggunakan kuotanya jika ia berada pada penyimpanan Apiserver. +Tipe-tipe kuota ini berguna untuk menjaga dari kehabisan sumber daya penyimpanan. Misalnya, kamu mungkin +ingin membatasi kuantitas objek Secret pada sebuah Apiserver karena ukuran mereka yang besar. Terlalu banyak +Secret pada sebuah klaster bahkan dapat membuat Server dan Controller tidak dapat dijalankan! Kamu dapat membatasi +jumlah Job untuk menjaga dari CronJob yang salah dikonfigurasi sehingga membuat terlalu banyak Job pada sebuah +Namespace yang mengakibatkan _denial of service_. + +Sebelum rilis 1.9, kita tidak dapat melakukan pembatasan kuantitas objek generik pada kumpulan sumber daya yang terbatas. +Sebagai tambahan, kita dapat membatasi lebih lanjut sumber daya tertentu dengan kuota berdasarkan jenis mereka. + +Berikut jenis-jenis yang telah didukung: + +| Nama Sumber Daya | Deskripsi | +| ------------------------------- | ------------------------------------------------- | +| `configmaps` | Jumlah total ConfigMap yang dapat berada pada suatu Namespace. | +| `persistentvolumeclaims` | Jumlah total PersistentVolumeClaim[persistent volume claims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) yang dapat berada pada suatu Namespace. | +| `pods` | Jumlah total Pod yang berada pada kondisi non-terminal yang dapat berada pada suatu Namespace. Sebuah Pod berada kondisi terminal yaitu jika `.status.phase in (Failed, Succeded)` adalah `true`. | +| `replicationcontrollers` | Jumlah total ReplicationController yang dapat berada pada suatu Namespace. | +| `resourcequotas` | Jumlah total [ResourceQuota](/docs/reference/access-authn-authz/admission-controllers/#resourcequota) yang dapat berada pada suatu Namespace. | +| `services` | Jumlah total Service yang dapat berada pada suatu Namespace. | +| `services.loadbalancers` | Jumlah total Service dengan tipe LoadBalancer yang dapat berada pada suatu Namespace. | +| `services.nodeports` | Jumlah total Service dengan tipe NodePort yang dapat berada pada suatu Namespace. | +| `secrets` | Jumlah total Secret yang dapat berada pada suatu Namespace. | + +Sebagai contoh, `pods` membatasi kuantitas dan memaksa kuantitas maksimum `pods` yang +berada pada kondisi non-terminal yang dibuat pada sebuah Namespace. Kamu mungkin ingin +menyetel kuota `pods` pada sebuah Namespace untuk menghindari kasus di mana pengguna membuat +banyak Pod kecil dan menghabiskan persediaan alamat IP Pod pada klaster. + +## Lingkup Kuota + +Setiap kuota dapat memiliki kumpulan lingkup yang dikaitkan. Sebuah kuota hanya akan mengukur penggunaan sebuah +sumber daya jika sumber daya tersebut cocok dengan irisan dari lingkup-lingkup yang ditentukan. + +Saat sebuah lingkup ditambahkan kepada kuota, lingkup itu akan membatasi kuantitas sumber daya yang didukung menjadi yang berkaitan dengan lingkup tersebut. +Sumber daya yang ditentukan pada kuota di luar kumpulan yang diizinkan akan menghasilkan kesalahan validasi. + +| Lingkup | Deskripsi | +| ----- | ----------- | +| `Terminating` | Mencocokkan dengan Pod-Pod yang memiliki `.spec.activeDeadlineSeconds >= 0` | +| `NotTerminating` | Mencocokkan dengan Pod-Pod yang memiliki `.spec.activeDeadlineSeconds is nil` | +| `BestEffort` | Mencocokkan dengan Pod-Pod yang memiliki _quality of service_ bertipe _best effort_. | +| `NotBestEffort` | Mencocokkan dengan Pod-Pod yang tidak memiliki _quality of service_ bertipe _best effort_. | + +Lingkup `BestEffort` membatasi sebuah kuota untuk memantau sumber daya berikut: `pods` + +Lingkup `Terminating`, `NotTerminating`, dan `NotBestEffort` membatasi sebuah kuota untuk memantau sumber daya berikut: + +* `cpu` +* `limits.cpu` +* `limits.memory` +* `memory` +* `pods` +* `requests.cpu` +* `requests.memory` + +### Resource Quota Per PriorityClass + +{{< feature-state for_k8s_version="1.12" state="beta" >}} + +Pod-Pod dapat dibuat dengan sebuah [Priority (prioritas)](/docs/concepts/configuration/pod-priority-preemption/#pod-priority) tertentu. +Kamu dapat mengontrol konsumsi sumber daya sistem sebuah Pod berdasarkan Priority Pod tersebut, menggunakan +kolom `scopeSelector` pada spesifikasi kuota tersebut. + +Sebuah kuota dicocokkan dan digunakan hanya jika `scopeSelector` pada spesifikasi kuota tersebut memilih Pod tersebut. + +Contoh ini membuat sebuah objek kuota dan mencocokkannya dengan Pod-Pod pada Priority tertentu. Contoh tersebut +bekerja sebagai berikut: + +- Pod-Pod di dalam klaster memiliki satu dari tiga Priority Class, "low", "medium", "high". +- Satu objek kuota dibuat untuk setiap Priority. + +Simpan YAML berikut ke sebuah berkas bernama `quota.yml`. + +```yaml +apiVersion: v1 +kind: List +items: +- apiVersion: v1 + kind: ResourceQuota + metadata: + name: pods-high + spec: + hard: + cpu: "1000" + memory: 200Gi + pods: "10" + scopeSelector: + matchExpressions: + - operator : In + scopeName: PriorityClass + values: ["high"] +- apiVersion: v1 + kind: ResourceQuota + metadata: + name: pods-medium + spec: + hard: + cpu: "10" + memory: 20Gi + pods: "10" + scopeSelector: + matchExpressions: + - operator : In + scopeName: PriorityClass + values: ["medium"] +- apiVersion: v1 + kind: ResourceQuota + metadata: + name: pods-low + spec: + hard: + cpu: "5" + memory: 10Gi + pods: "10" + scopeSelector: + matchExpressions: + - operator : In + scopeName: PriorityClass + values: ["low"] +``` + +Terapkan YAML tersebut dengan `kubectl create`. + +```shell +kubectl create -f ./quota.yml +``` + +```shell +resourcequota/pods-high created +resourcequota/pods-medium created +resourcequota/pods-low created +``` + +Pastikan bahwa kuota `Used` adalah `0` dengan `kubectl describe quota`. + +```shell +kubectl describe quota +``` + +```shell +Name: pods-high +Namespace: default +Resource Used Hard +-------- ---- ---- +cpu 0 1k +memory 0 200Gi +pods 0 10 + + +Name: pods-low +Namespace: default +Resource Used Hard +-------- ---- ---- +cpu 0 5 +memory 0 10Gi +pods 0 10 + + +Name: pods-medium +Namespace: default +Resource Used Hard +-------- ---- ---- +cpu 0 10 +memory 0 20Gi +pods 0 10 +``` + +Buat sebuah Pod dengan Priority "high". Simpan YAML berikut ke sebuah +berkas bernama `high-priority-pod.yml`. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: high-priority +spec: + containers: + - name: high-priority + image: ubuntu + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello; sleep 10;done"] + resources: + requests: + memory: "10Gi" + cpu: "500m" + limits: + memory: "10Gi" + cpu: "500m" + priorityClassName: high +``` + +Terapkan dengan `kubectl create`. + +```shell +kubectl create -f ./high-priority-pod.yml +``` + +Pastikan bahwa status "Used" untuk kuota dengan Priority "high", `pods-high`, telah berubah +dan dua kuota lainnya tidak berubah. + +```shell +kubectl describe quota +``` + +```shell +Name: pods-high +Namespace: default +Resource Used Hard +-------- ---- ---- +cpu 500m 1k +memory 10Gi 200Gi +pods 1 10 + + +Name: pods-low +Namespace: default +Resource Used Hard +-------- ---- ---- +cpu 0 5 +memory 0 10Gi +pods 0 10 + + +Name: pods-medium +Namespace: default +Resource Used Hard +-------- ---- ---- +cpu 0 10 +memory 0 20Gi +pods 0 10 +``` + +`scopeSelector` mendukung nilai-nilai berikut pada kolom `operator`: + +* `In` +* `NotIn` +* `Exist` +* `DoesNotExist` + +## _Request_ vs Limit + +Saat mengalokasikan sumber daya komputasi, setiap Container dapat menentukan sebuah nilai _request_ (permintaan) dan limit untuk CPU atau memori. +Kuota tersebut dapat dikonfigurasi untuk membatasi nilai salah satunya. + +Jika kuota tersebut memiliki sebuah nilai yang ditentukan untuk `requests.cpu` atau `requests.memory`, maka kuota +tersebut mengharuskan setiap Container yang akan dibuat untuk menentukan request eksplisit untuk sumber daya tersebut. +Jika kuota tersebut memiliki sebuah nilai yang ditentukan untuk `limits.cpu` atau `limits.memory`, maka kuota tersebut +mengharuskan setiap Container yang akan dibuat untuk menentukan limit eksplisit untuk sumber daya tersebut. + +## Melihat dan Menyetel kuota + +Kubectl mendukung membuat, membarui, dan melihat kuota: + +```shell +kubectl create namespace myspace +``` + +```shell +cat < compute-resources.yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: compute-resources +spec: + hard: + requests.cpu: "1" + requests.memory: 1Gi + limits.cpu: "2" + limits.memory: 2Gi + requests.nvidia.com/gpu: 4 +EOF +``` + +```shell +kubectl create -f ./compute-resources.yaml --namespace=myspace +``` + +```shell +cat < object-counts.yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: object-counts +spec: + hard: + configmaps: "10" + persistentvolumeclaims: "4" + pods: "4" + replicationcontrollers: "20" + secrets: "10" + services: "10" + services.loadbalancers: "2" +EOF +``` + +```shell +kubectl create -f ./object-counts.yaml --namespace=myspace +``` + +```shell +kubectl get quota --namespace=myspace +``` + +```shell +NAME AGE +compute-resources 30s +object-counts 32s +``` + +```shell +kubectl describe quota compute-resources --namespace=myspace +``` + +```shell +Name: compute-resources +Namespace: myspace +Resource Used Hard +-------- ---- ---- +limits.cpu 0 2 +limits.memory 0 2Gi +requests.cpu 0 1 +requests.memory 0 1Gi +requests.nvidia.com/gpu 0 4 +``` + +```shell +kubectl describe quota object-counts --namespace=myspace +``` + +```shell +Name: object-counts +Namespace: myspace +Resource Used Hard +-------- ---- ---- +configmaps 0 10 +persistentvolumeclaims 0 4 +pods 0 4 +replicationcontrollers 0 20 +secrets 1 10 +services 0 10 +services.loadbalancers 0 2 +``` + +Kubectl juga mendukung kuota kuantitas objek untuk semua sumber daya standar yang berada pada Namespace +menggunakan sintaksis `count/.`: + +```shell +kubectl create namespace myspace +``` + +```shell +kubectl create quota test --hard=count/deployments.extensions=2,count/replicasets.extensions=4,count/pods=3,count/secrets=4 --namespace=myspace +``` + +```shell +kubectl run nginx --image=nginx --replicas=2 --namespace=myspace +``` + +```shell +kubectl describe quota --namespace=myspace +``` + +```shell +Name: test +Namespace: myspace +Resource Used Hard +-------- ---- ---- +count/deployments.extensions 1 2 +count/pods 2 3 +count/replicasets.extensions 1 4 +count/secrets 1 4 +``` + +## Kuota dan Kapasitas Klaster + +`ResourceQuota` tidak tergantung pada kapasitas klaster. `ResourceQuota` ditentukan dalam +satuan-satuan absolut. Jadi, jika kamu menambahkan Node ke klaster kamu, penambahan ini +**bukan** berarti secara otomatis memberikan setiap Namespace kemampuan untuk menggunakan +lebih banyak sumber daya. + +Terkadang kebijakan yang lebih kompleks mungkin lebih diinginkan, seperti: + + - Secara proporsional membagi sumber daya total klaster untuk beberapa tim. + - Mengizinkan setiap tim untuk meningkatkan penggunaan sumber daya sesuai kebutuhan, + tetapi tetap memiliki batas yang cukup besar untuk menghindari kehabisan sumber daya. + - Mendeteksi permintaan dari sebuah Namespace, menambah Node, kemudian menambah kuota. + +Kebijakan-kebijakan seperti itu dapat diterapkan dengan `ResourceQuota` sebagai dasarnya, +dengan membuat sebuah "pengontrol" yang memantau penggunaan kuota dan menyesuaikan batas +keras kuota untuk setiap Namespace berdasarkan sinyal-sinyal lainnya. + +Perlu dicatat bahwa Resource Quota membagi agregat sumber daya klaster, tapi Resource Quota +tidak membuat batasan-batasan terhadap Node: Pod-Pod dari beberapa Namespace boleh berjalan +di Node yang sama. + +## Membatasi konsumsi Priority Class secara bawaan + +Mungkin saja diinginkan untuk Pod-Pod pada kelas prioritas tertentu, misalnya "cluster-services", sebaiknya diizinkan pada sebuah Namespace, jika dan hanya jika terdapat sebuah objek kuota yang cocok. + +Dengan mekanisme ini, operator-operator dapat membatasi penggunaan Priority Class dengan prioritas tinggi pada Namespace-Namespace tertentu saja dan tidak semua Namespace dapat menggunakan Priority Class tersebut secara bawaan. + +Untuk memaksa aturan ini, _flag_ kube-apiserver `--admission-control-config-file` sebaiknya digunakan untuk memberikan _path_ menuju berkas konfigurasi berikut: + +{{< tabs name="example1" >}} +{{% tab name="apiserver.config.k8s.io/v1" %}} + +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: "ResourceQuota" + configuration: + apiVersion: apiserver.config.k8s.io/v1 + kind: ResourceQuotaConfiguration + limitedResources: + - resource: pods + matchScopes: + - scopeName: PriorityClass + operator: In + values: ["cluster-services"] +``` + +{{% /tab %}} +{{% tab name="apiserver.k8s.io/v1alpha1" %}} + +```yaml +# Kedaluwarsa pada v1.17 digantikan oleh apiserver.config.k8s.io/v1 +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: "ResourceQuota" + configuration: + # Kedaluwarsa pada v1.17 digantikan oleh apiserver.config.k8s.io/v1, ResourceQuotaConfiguration + apiVersion: resourcequota.admission.k8s.io/v1beta1 + kind: Configuration + limitedResources: + - resource: pods + matchScopes: + - scopeName: PriorityClass + operator: In + values: ["cluster-services"] +``` + +{{% /tab %}} +{{< /tabs >}} + +Sekarang, Pod-Pod "cluster-services" akan diizinkan hanya pada Namespace di mana ada sebuah objek kuota dengan sebuah `scopeSelector` yang cocok. + +Contohnya: + +```yaml + scopeSelector: + matchExpressions: + - scopeName: PriorityClass + operator: In + values: ["cluster-services"] +``` + +Lihat [LimitedResources](https://github.com/kubernetes/kubernetes/pull/36765) dan [dokumen desain dukungan Quota untuk Priority Class](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/pod-priority-resourcequota.md) untuk informasi lebih lanjut. + +## Contoh + +Lihat [contoh detail cara menggunakan sebuah Resource Quota](/docs/tasks/administer-cluster/quota-api-object/). + +{{% /capture %}} + +{{% capture whatsnext %}} + +Lihat [dokumen desain ResourceQuota](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md) untuk informasi lebih lanjut. + +{{% /capture %}} diff --git a/content/zh/docs/concepts/services-networking/ingress.md b/content/zh/docs/concepts/services-networking/ingress.md index 5f6b98b6d6302..516d0492ad364 100644 --- a/content/zh/docs/concepts/services-networking/ingress.md +++ b/content/zh/docs/concepts/services-networking/ingress.md @@ -35,35 +35,45 @@ For clarity, this guide defines the following terms: Node : A worker machine in Kubernetes, part of a cluster. --> -节点(Node) -: Kubernetes 集群中其中一台工作机器,是集群的一部分。 + +节点(Node): + +Kubernetes 集群中其中一台工作机器,是集群的一部分。 -集群(Cluster) -: 一组运行程序(这些程序是容器化的,被 Kubernetes 管理的)的节点。 在此示例中,和在大多数常见的Kubernetes部署方案,集群中的节点都不会是公共网络。 + +集群(Cluster): + +一组运行程序(这些程序是容器化的,被 Kubernetes 管理的)的节点。 在此示例中,和在大多数常见的Kubernetes部署方案,集群中的节点都不会是公共网络。 -边缘路由器(Edge router) -: 在集群中强制性执行防火墙策略的路由器(router)。可以是由云提供商管理的网关,也可以是物理硬件。 + +边缘路由器(Edge router): + +在集群中强制性执行防火墙策略的路由器(router)。可以是由云提供商管理的网关,也可以是物理硬件。 -集群网络(Cluster network) -: 一组逻辑或物理的链接,根据 Kubernetes [网络模型](/docs/concepts/cluster-administration/networking/) 在集群内实现通信。 + +集群网络(Cluster network): + +一组逻辑或物理的链接,根据 Kubernetes [网络模型](/docs/concepts/cluster-administration/networking/) 在集群内实现通信。 + 服务(Service): + Kubernetes {{< glossary_tooltip term_id="service" >}} 使用 {{< glossary_tooltip text="标签" term_id="label" >}} 选择器(selectors)标识的一组 Pod。除非另有说明,否则假定服务只具有在集群网络中可路由的虚拟 IP。 diff --git a/content/zh/docs/concepts/workloads/controllers/deployment.md b/content/zh/docs/concepts/workloads/controllers/deployment.md index 1a97cd1fbab80..838647f68c7ad 100644 --- a/content/zh/docs/concepts/workloads/controllers/deployment.md +++ b/content/zh/docs/concepts/workloads/controllers/deployment.md @@ -42,8 +42,10 @@ Do not manage ReplicaSets owned by a Deployment. Consider opening an issue in th {{% /capture %}} +You describe a _desired state_ in a Deployment, and the Deployment controller changes the actual state to the desired state at a controlled rate. You can define Deployments to create new ReplicaSets, or to remove existing Deployments and adopt all their resources with new Deployments. -{{% capture body %}} +--> +描述 Deployment 中的 _desired state_,并且 Deployment 控制器以受控速率更改实际状态,以达到期望状态。可以定义 Deployments 以创建新的 ReplicaSets ,或删除现有 Deployments ,并通过新的 Deployments 使用其所有资源。 * `selector` 字段定义 Deployment 如何查找要管理的 Pods。 在这种情况下,只需选择在 Pod 模板(`app: nginx`)中定义的标签。但是,更复杂的选择规则是可能的,只要 Pod 模板本身满足规则。 - {{< note >}} + +{{< note >}} + `matchLabels` 字段是 {key,value} 的映射。单个 {key,value}在 `matchLabels` 映射中的值等效于 `matchExpressions` 的元素,其键字段是“key”,运算符为“In”,值数组仅包含“value”。所有要求,从 `matchLabels` 和 `matchExpressions`,必须满足才能匹配。 - {{< /note >}} +{{< /note >}} 1. 通过运行以下命令创建 Deployment : - {{< note >}} - 可以指定 `--record` 标志来写入在资源注释`kubernetes.io/change-cause`中执行的命令。它对以后的检查是有用的。 - 例如,查看在每个 Deployment 修改中执行的命令。 - {{< /note >}} +{{< /note >}} ```shell kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml @@ -210,10 +214,12 @@ The following is an example of a Deployment. It creates a ReplicaSet to bring up 2. Run `kubectl get deployments` to check if the Deployment was created. If the Deployment is still being created, the output is similar to the following: --> 2. 运行 `kubectl get deployments` 以检查 Deployment 是否已创建。如果仍在创建 Deployment ,则输出以下内容: + ```shell NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 3 0 0 0 1s ``` + @@ -243,6 +249,7 @@ The following is an example of a Deployment. It creates a ReplicaSet to bring up 3. To see the Deployment rollout status, run `kubectl rollout status deployment.v1.apps/nginx-deployment`. The output is similar to this: --> 3. 要查看 Deployment 展开状态,运行 `kubectl rollout status deployment.v1.apps/nginx-deployment`。输出: + ```shell Waiting for rollout to finish: 2 out of 3 new replicas have been updated... deployment.apps/nginx-deployment successfully rolled out @@ -252,6 +259,7 @@ The following is an example of a Deployment. It creates a ReplicaSet to bring up 4. Run the `kubectl get deployments` again a few seconds later. The output is similar to this: --> 4. 几秒钟后再次运行 `kubectl get deployments`。输出: + ```shell NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 3 3 3 3 18s @@ -265,6 +273,7 @@ The following is an example of a Deployment. It creates a ReplicaSet to bring up 5. To see the ReplicaSet (`rs`) created by the Deployment, run `kubectl get rs`. The output is similar to this: --> 5. 要查看 Deployment 创建的 ReplicaSet (`rs`),运行 `kubectl get rs`。输出: + ```shell NAME DESIRED CURRENT READY AGE nginx-deployment-75675f5897 3 3 3 18s @@ -279,6 +288,7 @@ The following is an example of a Deployment. It creates a ReplicaSet to bring up 6. To see the labels automatically generated for each Pod, run `kubectl get pods --show-labels`. The following output is returned: --> 6. 要查看每个 Pod 自动生成的标签,运行 `kubectl get pods --show-labels`。返回以下输出: + ```shell NAME READY STATUS RESTARTS AGE LABELS nginx-deployment-75675f5897-7ci7o 1/1 Running 0 18s app=nginx,pod-template-hash=3123191453 @@ -291,13 +301,13 @@ The following is an example of a Deployment. It creates a ReplicaSet to bring up --> 创建的复制集可确保有三个 `nginx` Pods。 - {{< note >}} +{{< note >}} 必须在 Deployment 中指定适当的选择器和 Pod 模板标签(在本例中为`app: nginx`)。不要与其他控制器(包括其他 Deployments 和状态设置)重叠标签或选择器。Kubernetes 不会阻止重叠,如果多个控制器具有重叠的选择器,这些控制器可能会冲突并运行意外。 - {{< /note >}} +{{< /note >}} -1. 让我们更新 nginx Pods,以使用 `nginx:1.9.1` 镜像 ,而不是 `nginx:1.7.9` 镜像 。 + 1. 让我们更新 nginx Pods,以使用 `nginx:1.9.1` 镜像 ,而不是 `nginx:1.7.9` 镜像 。 ```shell kubectl --record deployment.apps/nginx-deployment set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 ``` + 输出: - ``` + + ```shell deployment.apps/nginx-deployment image updated ``` @@ -368,15 +380,17 @@ is changed, for example if the labels or container images of the template are up + 输出: - ``` + + ```shell deployment.apps/nginx-deployment edited ``` -2. 要查看展开状态,运行: + 2. 要查看展开状态,运行: ```shell kubectl rollout status deployment.v1.apps/nginx-deployment @@ -386,14 +400,16 @@ is changed, for example if the labels or container images of the template are up The output is similar to this: --> 输出: - ``` + + ```shell Waiting for rollout to finish: 2 out of 3 new replicas have been updated... ``` 或者 - ``` + + ```shell deployment.apps/nginx-deployment successfully rolled out ``` @@ -408,7 +424,8 @@ is changed, for example if the labels or container images of the template are up --> * 在展开成功后,可以通过运行 `kubectl get deployments`来查看 Deployment 。 输出: - ``` + + ```shell NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 3 3 3 3 36s ``` @@ -427,7 +444,8 @@ up to 3 replicas, as well as scaling down the old ReplicaSet to 0 replicas. The output is similar to this: --> 输出: - ``` + + ```shell NAME DESIRED CURRENT READY AGE nginx-deployment-1564180365 3 3 3 6s nginx-deployment-2035384211 0 0 0 36s @@ -446,7 +464,8 @@ up to 3 replicas, as well as scaling down the old ReplicaSet to 0 replicas. The output is similar to this: --> 输出: - ``` + + ```shell NAME READY STATUS RESTARTS AGE nginx-deployment-1564180365-khku8 1/1 Running 0 14s nginx-deployment-1564180365-nacti 1/1 Running 0 14s @@ -486,10 +505,11 @@ up to 3 replicas, as well as scaling down the old ReplicaSet to 0 replicas. kubectl describe deployments ``` 输出: - ``` + + ```shell Name: nginx-deployment Namespace: default CreationTimestamp: Thu, 30 Nov 2017 10:56:25 +0000 @@ -526,7 +546,8 @@ up to 3 replicas, as well as scaling down the old ReplicaSet to 0 replicas. Normal ScalingReplicaSet 19s deployment-controller Scaled down replica set nginx-deployment-2035384211 to 1 Normal ScalingReplicaSet 19s deployment-controller Scaled up replica set nginx-deployment-1564180365 to 3 Normal ScalingReplicaSet 14s deployment-controller Scaled down replica set nginx-deployment-2035384211 to 0 - ``` + ``` + 输出: - ``` + + ```shell deployment.apps/nginx-deployment image updated ``` @@ -653,7 +675,8 @@ rolled back. The output is similar to this: --> 输出: - ``` + + ```shell Waiting for rollout to finish: 1 out of 3 new replicas have been updated... ``` @@ -678,7 +701,8 @@ rolled back. The output is similar to this: --> 输出: - ``` + + ```shell NAME DESIRED CURRENT READY AGE nginx-deployment-1564180365 3 3 3 25s nginx-deployment-2035384211 0 0 0 36s @@ -698,7 +722,8 @@ rolled back. The output is similar to this: --> 输出: - ``` + + ```shell NAME READY STATUS RESTARTS AGE nginx-deployment-1564180365-70iae 1/1 Running 0 25s nginx-deployment-1564180365-jbqqo 1/1 Running 0 25s @@ -706,17 +731,17 @@ rolled back. nginx-deployment-3066724191-08mng 0/1 ImagePullBackOff 0 6s ``` - {{< note >}} +{{< note >}} Deployment 控制器自动停止不良展开,并停止向上扩展新的 ReplicaSet 。这取决于指定的滚动更新参数(具体为 `maxUnavailable`)。默认情况下,Kubernetes 将值设置为 25%。 - {{< /note >}} +{{< /note >}} * 获取 Deployment 描述信息: ```shell @@ -724,10 +749,11 @@ rolled back. ``` 输出: - ``` + + ```shell Name: nginx-deployment Namespace: default CreationTimestamp: Tue, 15 Mar 2016 14:48:04 -0700 @@ -783,17 +809,19 @@ rolled back. 按照如下步骤检查回滚历史: -1. 首先,检查 Deployment 修改历史: + 1. 首先,检查 Deployment 修改历史: + ```shell kubectl rollout history deployment.v1.apps/nginx-deployment ``` 输出: - ``` + + ```shell deployments "nginx-deployment" REVISION CHANGE-CAUSE 1 kubectl apply --filename=https://k8s.io/examples/controllers/nginx-deployment.yaml --record=true @@ -818,16 +846,18 @@ rolled back. -2. 查看修改历史的详细信息,运行: + 2. 查看修改历史的详细信息,运行: + ```shell kubectl rollout history deployment.v1.apps/nginx-deployment --revision=2 ``` 输出: - ``` + + ```shell deployments "nginx-deployment" revision 2 Labels: app=nginx pod-template-hash=1159050644 @@ -854,16 +884,18 @@ Follow the steps given below to rollback the Deployment from the current version -1. 现在已决定撤消当前展开并回滚到以前的版本: + 1. 现在已决定撤消当前展开并回滚到以前的版本: + ```shell kubectl rollout undo deployment.v1.apps/nginx-deployment ``` 输出: - ``` + + ```shell deployment.apps/nginx-deployment ``` 输出: - ``` + + ```shell deployment.apps/nginx-deployment ``` @@ -897,16 +930,18 @@ Follow the steps given below to rollback the Deployment from the current version -2. 检查回滚是否成功、 Deployment 是否正在运行,运行: + 2. 检查回滚是否成功、 Deployment 是否正在运行,运行: + ```shell kubectl get deployment nginx-deployment ``` 输出: - ``` + + ```shell NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 3 3 3 3 30m ``` @@ -914,15 +949,18 @@ Follow the steps given below to rollback the Deployment from the current version -3. 获取 Deployment 描述信息: + 3. 获取 Deployment 描述信息: + ```shell kubectl describe deployment nginx-deployment ``` + 输出: - ``` + + ```shell Name: nginx-deployment Namespace: default CreationTimestamp: Sun, 02 Sep 2018 18:17:55 -0500 @@ -985,7 +1023,8 @@ kubectl scale deployment.v1.apps/nginx-deployment --replicas=10 The output is similar to this: --> 输出: -``` + +```shell deployment.apps/nginx-deployment scaled ``` @@ -1005,7 +1044,8 @@ kubectl autoscale deployment.v1.apps/nginx-deployment --min=10 --max=15 --cpu-pe The output is similar to this: --> 输出: -``` + +```shell deployment.apps/nginx-deployment scaled ``` @@ -1040,7 +1080,7 @@ ReplicaSets (ReplicaSets with Pods) in order to mitigate risk. This is called *p --> 输出: - ``` + ```shell NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 10 10 10 10 50s ``` @@ -1057,7 +1097,8 @@ ReplicaSets (ReplicaSets with Pods) in order to mitigate risk. This is called *p The output is similar to this: --> 输出: - ``` + + ```shell deployment.apps/nginx-deployment image updated ``` @@ -1073,7 +1114,8 @@ ReplicaSets (ReplicaSets with Pods) in order to mitigate risk. This is called *p The output is similar to this: --> 输出: - ``` + + ```shell NAME DESIRED CURRENT READY AGE nginx-deployment-1989198191 5 5 0 9s nginx-deployment-618515232 8 8 8 1m @@ -1092,7 +1134,7 @@ ReplicaSet with the most replicas. ReplicaSets with zero replicas are not scaled 在上面的示例中,3 个副本添加到旧 ReplicaSet 中,2 个副本添加到新 ReplicaSet 。展开过程最终应将所有副本移动到新的 ReplicaSet ,假定新的副本变得正常。要确认这一点,请运行: @@ -1104,7 +1146,8 @@ kubectl get deploy The output is similar to this: --> 输出: -``` + +```shell NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 15 18 7 8 7m ``` @@ -1113,6 +1156,7 @@ nginx-deployment 15 18 7 8 7m The rollout status confirms how the replicas were added to each ReplicaSet. --> 展开状态确认副本如何添加到每个 ReplicaSet 。 + ```shell kubectl get rs ``` @@ -1121,7 +1165,8 @@ kubectl get rs The output is similar to this: --> 输出: -``` + +```shell NAME DESIRED CURRENT READY AGE nginx-deployment-1989198191 7 7 0 7m nginx-deployment-618515232 11 11 11 7m @@ -1138,6 +1183,219 @@ apply multiple fixes in between pausing and resuming without triggering unnecess --> 可以在触发一个或多个更新之前暂停 Deployment ,然后继续它。这允许在暂停和恢复之间应用多个修补程序,而不会触发不必要的 Deployment 。 + +* 例如,对于一个刚刚创建的 Deployment : + 获取 Deployment 信息: + + ```shell + kubectl get deploy + ``` + + 输出: + + ```shell + NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE + nginx 3 3 3 3 1m + ``` + + + 获取 Deployment 状态: + + ```shell + kubectl get rs + ``` + + + 输出: + + ```shell + NAME DESIRED CURRENT READY AGE + nginx-2142116321 3 3 3 1m + ``` + + +使用如下指令中断运行: + + ```shell + kubectl rollout pause deployment.v1.apps/nginx-deployment + ``` + + + 输出: + + ```shell + deployment.apps/nginx-deployment paused + ``` + + +* 然后更新 Deployment 镜像: + + ```shell + kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 + ``` + + + 输出: + + ```shell + deployment.apps/nginx-deployment image updated + ``` + + +* 注意没有新的展开: + + ```shell + kubectl rollout history deployment.v1.apps/nginx-deployment + ``` + + + 输出: + + ```shell + deployments "nginx" + REVISION CHANGE-CAUSE + 1 + ``` + + +* 获取展开状态确保 Deployment 更新已经成功: + + ```shell + kubectl get rs + ``` + + + 输出: + + ```shell + NAME DESIRED CURRENT READY AGE + nginx-2142116321 3 3 3 2m + ``` + + +* 更新是很容易的,例如,可以这样更新使用到的资源: + + ```shell + kubectl set resources deployment.v1.apps/nginx-deployment -c=nginx --limits=cpu=200m,memory=512Mi + ``` + + + 输出: + + ```shell + deployment.apps/nginx-deployment resource requirements updated + ``` + + + 暂停 Deployment 之前的初始状态将继续其功能,但新的更新只要暂停 Deployment , Deployment 就不会产生任何效果。 + + +* 最后,恢复 Deployment 并观察新的 ReplicaSet ,并更新所有新的更新: + + ```shell + kubectl rollout resume deployment.v1.apps/nginx-deployment + ``` + + + 输出: + + ```shell + deployment.apps/nginx-deployment resumed + ``` + +* 观察展开的状态,直到完成。 + + ```shell + kubectl get rs -w + ``` + + + 输出: + + ```shell + NAME DESIRED CURRENT READY AGE + nginx-2142116321 2 2 2 2m + nginx-3926361531 2 2 0 6s + nginx-3926361531 2 2 1 18s + nginx-2142116321 1 2 2 2m + nginx-2142116321 1 2 2 2m + nginx-3926361531 3 2 1 18s + nginx-3926361531 3 2 1 18s + nginx-2142116321 1 1 1 2m + nginx-3926361531 3 3 1 18s + nginx-3926361531 3 3 2 19s + nginx-2142116321 0 1 1 2m + nginx-2142116321 0 1 1 2m + nginx-2142116321 0 0 0 2m + nginx-3926361531 3 3 3 20s + ``` + + +* 获取最近展开的状态: + + ```shell + kubectl get rs + ``` + + + 输出: + + ```shell + NAME DESIRED CURRENT READY AGE + nginx-2142116321 0 0 0 2m + nginx-3926361531 3 3 3 28s + ``` + + +可以在触发一个或多个更新之前暂停 Deployment ,然后继续它。这允许在暂停和恢复之间应用多个修补程序,而不会触发不必要的 Deployment 。 + 输出: -``` + +```shell Waiting for rollout to finish: 2 of 3 updated replicas are available... deployment.apps/nginx-deployment successfully rolled out $ echo $? @@ -1463,7 +1722,8 @@ kubectl patch deployment.v1.apps/nginx-deployment -p '{"spec":{"progressDeadline The output is similar to this: --> 输出: -``` + +```shell deployment.apps/nginx-deployment patched ``` @@ -1515,7 +1775,8 @@ kubectl describe deployment nginx-deployment The output is similar to this: --> 输出: -``` + +```shell <...> Conditions: Type Status Reason @@ -1531,7 +1792,7 @@ Conditions: --> 如果运行 `kubectl get deployment nginx-deployment -o yaml`, Deployment 状态输出: -``` +```shell status: availableReplicas: 2 conditions: @@ -1565,7 +1826,7 @@ reason for the Progressing condition: --> 最终,一旦超过 Deployment 进度截止时间,Kubernetes 将更新状态和进度状态: -``` +```shell Conditions: Type Status Reason ---- ------ ------ @@ -1582,7 +1843,7 @@ Deployment's status update with a successful condition (`Status=True` and `Reaso --> 可以通过缩减 Deployment 来解决配额不足的问题,或者直接在命名空间中增加配额。如果配额条件满足, Deployment 控制器完成了 Deployment 展开, Deployment 状态会更新为成功(`Status=True` and `Reason=NewReplicaSetAvailable`)。 -``` +```shell Conditions: Type Status Reason ---- ------ ------ @@ -1612,7 +1873,8 @@ kubectl rollout status deployment.v1.apps/nginx-deployment The output is similar to this: --> 输出: -``` + +```shell Waiting for rollout to finish: 2 out of 3 new replicas have been updated... error: deployment "nginx" exceeded its progress deadline $ echo $? diff --git a/content/zh/docs/tasks/federation/policy.rego b/content/zh/docs/tasks/federation/policy.rego deleted file mode 100644 index 49827b6ae96e2..0000000000000 --- a/content/zh/docs/tasks/federation/policy.rego +++ /dev/null @@ -1,74 +0,0 @@ -# OPA supports a high-level declarative language named Rego for authoring and -# enforcing policies. For more information on Rego, visit -# http://openpolicyagent.org. - -# Rego policies are namespaced by the "package" directive. -package kubernetes.placement - -# Imports provide aliases for data inside the policy engine. In this case, the -# policy simply refers to "clusters" below. -import data.kubernetes.clusters - -# The "annotations" rule generates a JSON object containing the key -# "federation.kubernetes.io/replica-set-preferences" mapped to . -# The preferences values is generated dynamically by OPA when it evaluates the -# rule. -# -# The SchedulingPolicy Admission Controller running inside the Federation API -# server will merge these annotations into incoming Federated resources. By -# setting replica-set-preferences, we can control the placement of Federated -# ReplicaSets. -# -# Rules are defined to generate JSON values (booleans, strings, objects, etc.) -# When OPA evaluates a rule, it generates a value IF all of the expressions in -# the body evaluate successfully. All rules can be understood intuitively as -# if where is true if AND AND ... -# is true (for some set of data.) -annotations["federation.kubernetes.io/replica-set-preferences"] = preferences { - input.kind = "ReplicaSet" - value = {"clusters": cluster_map, "rebalance": true} - json.marshal(value, preferences) -} - -# This "annotations" rule generates a value for the "federation.alpha.kubernetes.io/cluster-selector" -# annotation. -# -# In English, the policy asserts that resources in the "production" namespace -# that are not annotated with "criticality=low" MUST be placed on clusters -# labelled with "on-premises=true". -annotations["federation.alpha.kubernetes.io/cluster-selector"] = selector { - input.metadata.namespace = "production" - not input.metadata.annotations.criticality = "low" - json.marshal([{ - "operator": "=", - "key": "on-premises", - "values": "[true]", - }], selector) -} - -# Generates a set of cluster names that satisfy the incoming Federated -# ReplicaSet's requirements. In this case, just PCI compliance. -replica_set_clusters[cluster_name] { - clusters[cluster_name] - not insufficient_pci[cluster_name] -} - -# Generates a set of clusters that must not be used for Federated ReplicaSets -# that request PCI compliance. -insufficient_pci[cluster_name] { - clusters[cluster_name] - input.metadata.annotations["requires-pci"] = "true" - not pci_clusters[cluster_name] -} - -# Generates a set of clusters that are PCI certified. In this case, we assume -# clusters are annotated to indicate if they have passed PCI compliance audits. -pci_clusters[cluster_name] { - clusters[cluster_name].metadata.annotations["pci-certified"] = "true" -} - -# Helper rule to generate a mapping of desired clusters to weights. In this -# case, weights are static. -cluster_map[cluster_name] = {"weight": 1} { - replica_set_clusters[cluster_name] -} diff --git a/content/zh/docs/tasks/federation/set-up-placement-policies-federation.md b/content/zh/docs/tasks/federation/set-up-placement-policies-federation.md index b56d43e83bbf0..774966f39caf4 100644 --- a/content/zh/docs/tasks/federation/set-up-placement-policies-federation.md +++ b/content/zh/docs/tasks/federation/set-up-placement-policies-federation.md @@ -188,7 +188,82 @@ Configure a sample policy to test the external policy engine: --> 配置一个示例策略来测试外部策略引擎: -{{< code file="policy.rego" >}} +``` +# OPA supports a high-level declarative language named Rego for authoring and +# enforcing policies. For more information on Rego, visit +# http://openpolicyagent.org. + +# Rego policies are namespaced by the "package" directive. +package kubernetes.placement + +# Imports provide aliases for data inside the policy engine. In this case, the +# policy simply refers to "clusters" below. +import data.kubernetes.clusters + +# The "annotations" rule generates a JSON object containing the key +# "federation.kubernetes.io/replica-set-preferences" mapped to . +# The preferences values is generated dynamically by OPA when it evaluates the +# rule. +# +# The SchedulingPolicy Admission Controller running inside the Federation API +# server will merge these annotations into incoming Federated resources. By +# setting replica-set-preferences, we can control the placement of Federated +# ReplicaSets. +# +# Rules are defined to generate JSON values (booleans, strings, objects, etc.) +# When OPA evaluates a rule, it generates a value IF all of the expressions in +# the body evaluate successfully. All rules can be understood intuitively as +# if where is true if AND AND ... +# is true (for some set of data.) +annotations["federation.kubernetes.io/replica-set-preferences"] = preferences { + input.kind = "ReplicaSet" + value = {"clusters": cluster_map, "rebalance": true} + json.marshal(value, preferences) +} + +# This "annotations" rule generates a value for the "federation.alpha.kubernetes.io/cluster-selector" +# annotation. +# +# In English, the policy asserts that resources in the "production" namespace +# that are not annotated with "criticality=low" MUST be placed on clusters +# labelled with "on-premises=true". +annotations["federation.alpha.kubernetes.io/cluster-selector"] = selector { + input.metadata.namespace = "production" + not input.metadata.annotations.criticality = "low" + json.marshal([{ + "operator": "=", + "key": "on-premises", + "values": "[true]", + }], selector) +} + +# Generates a set of cluster names that satisfy the incoming Federated +# ReplicaSet's requirements. In this case, just PCI compliance. +replica_set_clusters[cluster_name] { + clusters[cluster_name] + not insufficient_pci[cluster_name] +} + +# Generates a set of clusters that must not be used for Federated ReplicaSets +# that request PCI compliance. +insufficient_pci[cluster_name] { + clusters[cluster_name] + input.metadata.annotations["requires-pci"] = "true" + not pci_clusters[cluster_name] +} + +# Generates a set of clusters that are PCI certified. In this case, we assume +# clusters are annotated to indicate if they have passed PCI compliance audits. +pci_clusters[cluster_name] { + clusters[cluster_name].metadata.annotations["pci-certified"] = "true" +} + +# Helper rule to generate a mapping of desired clusters to weights. In this +# case, weights are static. +cluster_map[cluster_name] = {"weight": 1} { + replica_set_clusters[cluster_name] +} +```