diff --git a/.well-known/security.txt b/.well-known/security.txt new file mode 100644 index 0000000000000..40371f929208b --- /dev/null +++ b/.well-known/security.txt @@ -0,0 +1,5 @@ +Contact: mailto:security@kubernetes.io +Expires: 2031-01-11T06:30:00.000Z +Preferred-Languages: en +Canonical: https://kubernetes.io/.well-known/security.txt +Policy: https://github.com/kubernetes/website/blob/main/SECURITY.md diff --git a/content/de/docs/reference/glossary/cadvisor.md b/content/de/docs/reference/glossary/cadvisor.md new file mode 100644 index 0000000000000..f04a5c8370688 --- /dev/null +++ b/content/de/docs/reference/glossary/cadvisor.md @@ -0,0 +1,16 @@ +--- +title: cAdvisor +id: cadvisor +date: 2021-12-09 +full_link: https://github.com/google/cadvisor/ +short_description: > + Werkzeug, um Ressourcenverbrauch und Performance Charakteristiken von Container besser zu verstehen +aka: +tags: +- tool +--- +cAdvisor (Container Advisor) ermöglicht Benutzer von Container ein besseres Verständnis des Ressourcenverbrauchs und der Performance Charakteristiken ihrer laufenden {{< glossary_tooltip text="Container" term_id="container" >}}. + + + +Es ist ein laufender Daemon, der Informationen über laufende Container sammelt, aggregiert, verarbeitet, und exportiert. Genauer gesagt, speichert es für jeden Container die Ressourcenisolationsparameter, den historischen Ressourcenverbrauch, die Histogramme des kompletten historischen Ressourcenverbrauchs und die Netzwerkstatistiken. Diese Daten werden pro Container und maschinenweit exportiert. diff --git a/content/de/docs/reference/glossary/certificate.md b/content/de/docs/reference/glossary/certificate.md new file mode 100644 index 0000000000000..e9eea0db659a4 --- /dev/null +++ b/content/de/docs/reference/glossary/certificate.md @@ -0,0 +1,18 @@ +--- +title: Zertifikat +id: certificate +date: 2018-04-12 +full_link: /docs/tasks/tls/managing-tls-in-a-cluster/ +short_description: > + Eine kryptographisch sichere Datei, die verwendet wird um den Zugriff auf das Kubernetes Cluster zu validieren. + +aka: +tags: +- security +--- + Eine kryptographisch sichere Datei, die verwendet wird um den Zugriff auf das Kubernetes Cluster zu bestätigen. + + + +Zertfikate ermöglichen es Anwendungen in einem Kubernetes Cluster sicher auf die Kubernetes API zuzugreifen. Zertfikate bestätigen, dass Clients die Erlaubnis haben auf die API zuzugreifen. + diff --git a/content/de/docs/reference/glossary/cidr.md b/content/de/docs/reference/glossary/cidr.md new file mode 100644 index 0000000000000..245ccd9b19d0c --- /dev/null +++ b/content/de/docs/reference/glossary/cidr.md @@ -0,0 +1,18 @@ +--- +title: CIDR +id: cidr +date: 2019-11-12 +full_link: +short_description: > + CIDR ist eine Notation, um Blöcke von IP Adressen zu beschreiben und wird viel verwendet in verschiedenen Netzwerkkonfigurationen. + +aka: +tags: +- networking +--- +CIDR (Classless Inter-Domain Routing) ist eine Notation, um Blöcke von IP Adressen zu beschreiben und wird viel verwendet in verschiedenen Netzwerkkonfigurationen. + + + +Im Kubernetes Kontext, erhält jeder {{< glossary_tooltip text="Knoten" term_id="node" >}} eine Reihe von IP Adressen durch die Startadresse und eine Subnetzmaske unter Verwendung von CIDR. Dies erlaubt Knoten jedem {{< glossary_tooltip text="Pod" term_id="pod" >}} eine eigene IP Adresse zuzuweisen. Obwohl es ursprünglich ein Konzept für IPv4 ist, wurde CIDR erweitert um auch IPv6 einzubinden. + diff --git a/content/de/docs/reference/glossary/cla.md b/content/de/docs/reference/glossary/cla.md new file mode 100644 index 0000000000000..b6163a995cc30 --- /dev/null +++ b/content/de/docs/reference/glossary/cla.md @@ -0,0 +1,18 @@ +--- +title: CLA (Contributor License Agreement) +id: cla +date: 2018-04-12 +full_link: https://github.com/kubernetes/community/blob/master/CLA.md +short_description: > + Bedingungen unter denen ein Mitwirkender eine Lizenz an ein Open Source Projekt erteilt für seine Mitwirkungen. + +aka: +tags: +- community +--- + Bedingungen unter denen ein {{< glossary_tooltip text="Mitwirkender" term_id="contributor" >}} eine Lizenz an ein Open Source Projekt erteilt für seine Mitwirkungen. + + + +CLAs helfen dabei rechtliche Streitigkeiten rund um Mitwirkungen und geistigem Eigentum (IP) zu lösen. + diff --git a/content/en/docs/concepts/architecture/garbage-collection.md b/content/en/docs/concepts/architecture/garbage-collection.md index 4b36d850b55bb..4c61c968ba054 100644 --- a/content/en/docs/concepts/architecture/garbage-collection.md +++ b/content/en/docs/concepts/architecture/garbage-collection.md @@ -139,7 +139,7 @@ until disk usage reaches the `LowThresholdPercent` value. #### Garbage collection for unused container images {#image-maximum-age-gc} -{{< feature-state for_k8s_version="v1.29" state="alpha" >}} +{{< feature-state feature_gate_name="ImageMaximumGCAge" >}} As an alpha feature, you can specify the maximum time a local image can be unused for, regardless of disk usage. This is a kubelet setting that you configure for each node. diff --git a/content/en/docs/concepts/architecture/leases.md b/content/en/docs/concepts/architecture/leases.md index b07f4db3de256..8d74f81a91d76 100644 --- a/content/en/docs/concepts/architecture/leases.md +++ b/content/en/docs/concepts/architecture/leases.md @@ -33,7 +33,7 @@ instances are on stand-by. ## API server identity -{{< feature-state for_k8s_version="v1.26" state="beta" >}} +{{< feature-state feature_gate_name="APIServerIdentity" >}} Starting in Kubernetes v1.26, each `kube-apiserver` uses the Lease API to publish its identity to the rest of the system. While not particularly useful on its own, this provides a mechanism for clients to diff --git a/content/en/docs/concepts/architecture/mixed-version-proxy.md b/content/en/docs/concepts/architecture/mixed-version-proxy.md index 36588430c1f32..1045c83119ec6 100644 --- a/content/en/docs/concepts/architecture/mixed-version-proxy.md +++ b/content/en/docs/concepts/architecture/mixed-version-proxy.md @@ -8,7 +8,7 @@ weight: 220 -{{< feature-state state="alpha" for_k8s_version="v1.28" >}} +{{< feature-state feature_gate_name="UnknownVersionInteroperabilityProxy" >}} Kubernetes {{< skew currentVersion >}} includes an alpha feature that lets an {{< glossary_tooltip text="API Server" term_id="kube-apiserver" >}} diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index 6473d35a17e25..c0bcecd3df4ac 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -280,7 +280,7 @@ If you want to explicitly reserve resources for non-Pod processes, see ## Node topology -{{< feature-state state="stable" for_k8s_version="v1.27" >}} +{{< feature-state feature_gate_name="TopologyManager" >}} If you have enabled the `TopologyManager` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/), then @@ -290,7 +290,7 @@ for more information. ## Graceful node shutdown {#graceful-node-shutdown} -{{< feature-state state="beta" for_k8s_version="v1.21" >}} +{{< feature-state feature_gate_name="GracefulNodeShutdown" >}} The kubelet attempts to detect node system shutdown and terminates pods running on the node. @@ -374,7 +374,7 @@ Message: Pod was terminated in response to imminent node shutdown. ### Pod Priority based graceful node shutdown {#pod-priority-graceful-node-shutdown} -{{< feature-state state="beta" for_k8s_version="v1.24" >}} +{{< feature-state feature_gate_name="GracefulNodeShutdownBasedOnPodPriority" >}} To provide more flexibility during graceful node shutdown around the ordering of pods during shutdown, graceful node shutdown honors the PriorityClass for @@ -471,7 +471,7 @@ are emitted under the kubelet subsystem to monitor node shutdowns. ## Non-graceful node shutdown handling {#non-graceful-node-shutdown} -{{< feature-state state="stable" for_k8s_version="v1.28" >}} +{{< feature-state feature_gate_name="NodeOutOfServiceVolumeDetach" >}} A node shutdown action may not be detected by kubelet's Node Shutdown Manager, either because the command does not trigger the inhibitor locks mechanism used by @@ -515,7 +515,7 @@ During a non-graceful shutdown, Pods are terminated in the two phases: ## Swap memory management {#swap-memory} -{{< feature-state state="beta" for_k8s_version="v1.28" >}} +{{< feature-state feature_gate_name="NodeSwap" >}} To enable swap on a node, the `NodeSwap` feature gate must be enabled on the kubelet, and the `--fail-swap-on` command line flag or `failSwapOn` diff --git a/content/en/docs/concepts/cluster-administration/system-logs.md b/content/en/docs/concepts/cluster-administration/system-logs.md index 1feeecd3db7e5..9fed93fc75dca 100644 --- a/content/en/docs/concepts/cluster-administration/system-logs.md +++ b/content/en/docs/concepts/cluster-administration/system-logs.md @@ -238,7 +238,7 @@ The `logrotate` tool rotates logs daily, or once the log size is greater than 10 ## Log query -{{< feature-state for_k8s_version="v1.27" state="alpha" >}} +{{< feature-state feature_gate_name="NodeLogQuery" >}} To help with debugging issues on nodes, Kubernetes v1.27 introduced a feature that allows viewing logs of services running on the node. To use the feature, ensure that the `NodeLogQuery` diff --git a/content/en/docs/concepts/cluster-administration/system-traces.md b/content/en/docs/concepts/cluster-administration/system-traces.md index aaaf342b57042..9213dd4b48016 100644 --- a/content/en/docs/concepts/cluster-administration/system-traces.md +++ b/content/en/docs/concepts/cluster-administration/system-traces.md @@ -76,7 +76,7 @@ For more information about the `TracingConfiguration` struct, see ### kubelet traces -{{< feature-state for_k8s_version="v1.27" state="beta" >}} +{{< feature-state feature_gate_name="KubeletTracing" >}} The kubelet CRI interface and authenticated http servers are instrumented to generate trace spans. As with the apiserver, the endpoint and sampling rate are configurable. diff --git a/content/en/docs/concepts/containers/images.md b/content/en/docs/concepts/containers/images.md index 9b36a6b72803d..8602d6c98d15b 100644 --- a/content/en/docs/concepts/containers/images.md +++ b/content/en/docs/concepts/containers/images.md @@ -161,7 +161,7 @@ which is 300 seconds (5 minutes). ### Image pull per runtime class -{{< feature-state for_k8s_version="v1.29" state="alpha" >}} +{{< feature-state feature_gate_name="RuntimeClassInImageCriApi" >}} Kubernetes includes alpha support for performing image pulls based on the RuntimeClass of a Pod. If you enable the `RuntimeClassInImageCriApi` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/), diff --git a/content/en/docs/concepts/overview/components.md b/content/en/docs/concepts/overview/components.md index 28b633c141a29..177354fa47812 100644 --- a/content/en/docs/concepts/overview/components.md +++ b/content/en/docs/concepts/overview/components.md @@ -31,7 +31,7 @@ as well as detecting and responding to cluster events (for example, starting up `{{< glossary_tooltip text="replicas" term_id="replica" >}}` field is unsatisfied). Control plane components can be run on any machine in the cluster. However, -for simplicity, set up scripts typically start all control plane components on +for simplicity, setup scripts typically start all control plane components on the same machine, and do not run user containers on this machine. See [Creating Highly Available clusters with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/) for an example control plane setup that runs across multiple machines. @@ -150,4 +150,4 @@ Learn more about the following: * Etcd's official [documentation](https://etcd.io/docs/). * Several [container runtimes](/docs/setup/production-environment/container-runtimes/) in Kubernetes. * Integrating with cloud providers using [cloud-controller-manager](/docs/concepts/architecture/cloud-controller/). - * [kubectl](/docs/reference/generated/kubectl/kubectl-commands) commands. \ No newline at end of file + * [kubectl](/docs/reference/generated/kubectl/kubectl-commands) commands. diff --git a/content/en/docs/concepts/overview/kubernetes-api.md b/content/en/docs/concepts/overview/kubernetes-api.md index ceec7e1eacd3d..f7e6da3d866aa 100644 --- a/content/en/docs/concepts/overview/kubernetes-api.md +++ b/content/en/docs/concepts/overview/kubernetes-api.md @@ -82,7 +82,7 @@ packages that define the API objects. ### OpenAPI V3 -{{< feature-state state="stable" for_k8s_version="v1.27" >}} +{{< feature-state feature_gate_name="OpenAPIV3" >}} Kubernetes supports publishing a description of its APIs as OpenAPI v3. @@ -167,7 +167,7 @@ cluster. ### Aggregated Discovery -{{< feature-state state="beta" for_k8s_version="v1.27" >}} +{{< feature-state feature_gate_name="AggregatedDiscoveryEndpoint" >}} Kubernetes offers beta support for aggregated discovery, publishing all resources supported by a cluster through two endpoints (`/api` and diff --git a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md index 8aa1e97200891..c976faa97878f 100644 --- a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -360,7 +360,7 @@ null `namespaceSelector` matches the namespace of the Pod where the rule is defi #### matchLabelKeys -{{< feature-state for_k8s_version="v1.29" state="alpha" >}} +{{< feature-state feature_gate_name="MatchLabelKeysInPodAffinity" >}} {{< note >}} @@ -391,26 +391,27 @@ metadata: ... spec: template: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - database - topologyKey: topology.kubernetes.io/zone - # Only Pods from a given rollout are taken into consideration when calculating pod affinity. - # If you update the Deployment, the replacement Pods follow their own affinity rules - # (if there are any defined in the new Pod template) - matchLabelKeys: - - pod-template-hash + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - database + topologyKey: topology.kubernetes.io/zone + # Only Pods from a given rollout are taken into consideration when calculating pod affinity. + # If you update the Deployment, the replacement Pods follow their own affinity rules + # (if there are any defined in the new Pod template) + matchLabelKeys: + - pod-template-hash ``` #### mismatchLabelKeys -{{< feature-state for_k8s_version="v1.29" state="alpha" >}} +{{< feature-state feature_gate_name="MatchLabelKeysInPodAffinity" >}} {{< note >}} diff --git a/content/en/docs/concepts/security/_index.md b/content/en/docs/concepts/security/_index.md index 50edcda94a3f6..47d4ef8d365f6 100644 --- a/content/en/docs/concepts/security/_index.md +++ b/content/en/docs/concepts/security/_index.md @@ -3,4 +3,127 @@ title: "Security" weight: 85 description: > Concepts for keeping your cloud-native workload secure. +simple_list: true --- + +This section of the Kubernetes documentation aims to help you learn to run +workloads more securely, and about the essential aspects of keeping a +Kubernetes cluster secure. + +Kubernetes is based on a cloud-native architecture, and draws on advice from the +{{< glossary_tooltip text="CNCF" term_id="cncf" >}} about good practice for +cloud native information security. + +Read [Cloud Native Security and Kubernetes](/docs/concepts/security/cloud-native-security/) +for the broader context about how to secure your cluster and the applications that +you're running on it. + +## Kubernetes security mechanisms {#security-mechanisms} + +Kubernetes includes several APIs and security controls, as well as ways to +define [policies](#policies) that can form part of how you manage information security. + +### Control plane protection + +A key security mechanism for any Kubernetes cluster is to +[control access to the Kubernetes API](/docs/concepts/security/controlling-access). + +Kubernetes expects you to configure and use TLS to provide +[data encryption in transit](/docs/tasks/tls/managing-tls-in-a-cluster/) +within the control plane, and between the control plane and its clients. +You can also enable [encryption at rest](/docs/tasks/administer-cluster/encrypt-data/) +for the data stored within Kubernetes control plane; this is separate from using +encryption at rest for your own workloads' data, which might also be a good idea. + +### Secrets + +The [Secret](/docs/concepts/configuration/secret/) API provides basic protection for +configuration values that require confidentiality. + +### Workload protection + +Enforce [Pod security standards](/docs/concepts/security/pod-security-standards/) to +ensure that Pods and their containers are isolated appropriately. You can also use +[RuntimeClasses](/docs/concepts/containers/runtime-class) to define custom isolation +if you need it. + +[Network policies](/docs/concepts/services-networking/network-policies/) let you control +network traffic between Pods, or between Pods and the network outside your cluster. + +You can deploy security controls from the wider ecosystem to implement preventative +or detective controls around Pods, their containers, and the images that run in them. + +### Auditing + +Kubernetes [audit logging](/docs/tasks/debug/debug-cluster/audit/) provides a +security-relevant, chronological set of records documenting the sequence of actions +in a cluster. The cluster audits the activities generated by users, by applications +that use the Kubernetes API, and by the control plane itself. + +## Cloud provider security + +{{% thirdparty-content vendor="true" %}} + +If you are running a Kubernetes cluster on your own hardware or a different cloud provider, +consult your documentation for security best practices. +Here are links to some of the popular cloud providers' security documentation: + +{{< table caption="Cloud provider security" >}} + +IaaS Provider | Link | +-------------------- | ------------ | +Alibaba Cloud | https://www.alibabacloud.com/trust-center | +Amazon Web Services | https://aws.amazon.com/security | +Google Cloud Platform | https://cloud.google.com/security | +Huawei Cloud | https://www.huaweicloud.com/intl/en-us/securecenter/overallsafety | +IBM Cloud | https://www.ibm.com/cloud/security | +Microsoft Azure | https://docs.microsoft.com/en-us/azure/security/azure-security | +Oracle Cloud Infrastructure | https://www.oracle.com/security | +VMware vSphere | https://www.vmware.com/security/hardening-guides | + +{{< /table >}} + +## Policies + +You can define security policies using Kubernetes-native mechanisms, +such as [NetworkPolicy](/docs/concepts/services-networking/network-policies/) +(declarative control over network packet filtering) or +[ValidatingAdmisisonPolicy](/docs/reference/access-authn-authz/validating-admission-policy/) (declarative restrictions on what changes +someone can make using the Kubernetes API). + +However, you can also rely on policy implementations from the wider +ecosystem around Kubernetes. Kubernetes provides extension mechanisms +to let those ecosystem projects implement their own policy controls +on source code review, container image approval, API access controls, +networking, and more. + +For more information about policy mechanisms and Kubernetes, +read [Policies](/docs/concepts/policy/). + +## {{% heading "whatsnext" %}} + +Learn about related Kubernetes security topics: + +* [Securing your cluster](/docs/tasks/administer-cluster/securing-a-cluster/) +* [Known vulnerabilities](/docs/reference/issues-security/official-cve-feed/) + in Kubernetes (and links to further information) +* [Data encryption in transit](/docs/tasks/tls/managing-tls-in-a-cluster/) for the control plane +* [Data encryption at rest](/docs/tasks/administer-cluster/encrypt-data/) +* [Controlling Access to the Kubernetes API](/docs/concepts/security/controlling-access) +* [Network policies](/docs/concepts/services-networking/network-policies/) for Pods +* [Secrets in Kubernetes](/docs/concepts/configuration/secret/) +* [Pod security standards](/docs/concepts/security/pod-security-standards/) +* [RuntimeClasses](/docs/concepts/containers/runtime-class) + +Learn the context: + + +* [Cloud Native Security and Kubernetes](/docs/concepts/security/cloud-native-security/) + +Get certified: + +* [Certified Kubernetes Security Specialist](https://training.linuxfoundation.org/certification/certified-kubernetes-security-specialist/) + certification and official training course. + +Read more in this section: + diff --git a/content/en/docs/concepts/security/cloud-native-security.md b/content/en/docs/concepts/security/cloud-native-security.md new file mode 100644 index 0000000000000..778dba0c3836e --- /dev/null +++ b/content/en/docs/concepts/security/cloud-native-security.md @@ -0,0 +1,226 @@ +--- +title: "Cloud Native Security and Kubernetes" +linkTitle: "Cloud Native Security" +weight: 10 + +# The section index lists this explicitly +hide_summary: true + +description: > + Concepts for keeping your cloud-native workload secure. +--- + +Kubernetes is based on a cloud-native architecture, and draws on advice from the +{{< glossary_tooltip text="CNCF" term_id="cncf" >}} about good practice for +cloud native information security. + +Read on through this page for an overview of how Kubernetes is designed to +help you deploy a secure cloud native platform. + +## Cloud native information security + +{{< comment >}} +There are localized versions available of this whitepaper; if you can link to one of those +when localizing, that's even better. +{{< /comment >}} + +The CNCF [white paper](https://github.com/cncf/tag-security/tree/main/security-whitepaper) +on cloud native security defines security controls and practices that are +appropriate to different _lifecycle phases_. + +## _Develop_ lifecycle phase {#lifecycle-phase-develop} + +- Ensure the integrity of development environments. +- Design applications following good practice for information security, + appropriate for your context. +- Consider end user security as part of solution design. + +To achieve this, you can: + +1. Adopt an architecture, such as [zero trust](https://glossary.cncf.io/zero-trust-architecture/), + that minimizes attack surfaces, even for internal threats. +1. Define a code review process that considers security concerns. +1. Build a _threat model_ of your system or application that identifies + trust boundaries. Use that to model to identify risks and to help find + ways to treat those risks. +1. Incorporate advanced security automation, such as _fuzzing_ and + [security chaos engineering](https://glossary.cncf.io/security-chaos-engineering/), + where it's justified. + +## _Distribute_ lifecycle phase {#lifecycle-phase-distribute} + +- Ensure the security of the supply chain for container images you execute. +- Ensure the security of the supply chain for the cluster and other components + that execute your application. An example of another component might be an + external database that your cloud-native application uses for persistence. + +To achieve this, you can: + +1. Scan container images and other artifacts for known vulnerabilities. +1. Ensure that software distribution uses encryption in transit, with + a chain of trust for the software source. +1. Adopt and follow processes to update dependencies when updates are + available, especially in response to security announcements. +1. Use validation mechanisms such as digital certificates for supply + chain assurance. +1. Subscribe to feeds and other mechanisms to alert you to security + risks. +1. Restrict access to artifacts. Place container images in a + [private registry](/docs/concepts/containers/images/#using-a-private-registry) + that only allows authorized clients to pull images. + +## _Deploy_ lifecycle phase {#lifecycle-phase-deploy} + +Ensure appropriate restrictions on what can be deployed, who can deploy it, +and where it can be deployed to. +You can enforce measures from the _distribute_ phase, such as verifying the +cryptographic identity of container image artifacts. + +When you deploy Kubernetes, you also set the foundation for your +applications' runtime environment: a Kubernetes cluster (or +multiple clusters). +That IT infrastructure must provide the security guarantees that higher +layers expect. + +## _Runtime_ lifecycle phase {#lifecycle-phase-runtime} + +The Runtime phase comprises three critical areas: [compute](#protection-runtime-compute), +[access](#protection-runtime-access), and [storage](#protection-runtime-storage). + + +### Runtime protection: access {#protection-runtime-access} + +The Kubernetes API is what makes your cluster work. Protecting this API is key +to providing effective cluster security. + +Other pages in the Kubernetes documentation have more detail about how to set up +specific aspects of access control. The [security checklist](/docs/concepts/security/security-checklist/) +has a set of suggested basic checks for your cluster. + +Beyond that, securing your cluster means implementing effective +[authentication](/docs/concepts/security/controlling-access/#authentication) and +[authorization](/docs/concepts/security/controlling-access/#authorization) for API access. Use [ServiceAccounts](/docs/concepts/security/service-accounts/) to +provide and manage security identities for workloads and cluster +components. + +Kubernetes uses TLS to protect API traffic; make sure to deploy the cluster using +TLS (including for traffic between nodes and the control plane), and protect the +encryption keys. If you use Kubernetes' own API for +[CertificateSigningRequests](/docs/reference/access-authn-authz/certificate-signing-requests/#certificate-signing-requests), +pay special attention to restricting misuse there. + +### Runtime protection: compute {#protection-runtime-compute} + +{{< glossary_tooltip text="Containers" term_id="container" >}} provide two +things: isolation between different applications, and a mechanism to combine +those isolated applications to run on the same host computer. Those two +aspects, isolation and aggregation, mean that runtime security involves +trade-offs and finding an appropriate balance. + +Kubernetes relies on a {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}} +to actually set up and run containers. The Kubernetes project does +not recommend a specific container runtime and you should make sure that +the runtime(s) that you choose meet your information security needs. + +To protect your compute at runtime, you can: + +1. Enforce [Pod security standards](/docs/concepts/security/pod-security-standards/) + for applications, to help ensure they run with only the necessary privileges. +1. Run a specialized operating system on your nodes that is designed specifically + for running containerized workloads. This is typically based on a read-only + operating system (_immutable image_) that provides only the services + essential for running containers. + + Container-specific operating systems help to isolate system components and + present a reduced attack surface in case of a container escape. +1. Define [ResourceQuotas](/docs/concepts/policy/resource-quotas/) to + fairly allocate shared resources, and use + mechanisms such as [LimitRanges](/docs/concepts/policy/limit-range/) + to ensure that Pods specify their resource requirements. +1. Partition workloads across different nodes. + Use [node isolation](/docs/concepts/scheduling-eviction/assign-pod-node/#node-isolation-restriction) + mechanisms, either from Kubernetes itself or from the ecosystem, to ensure that + Pods with different trust contexts are run on separate sets of nodes. +1. Use a {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}} + that provides security restrictions. +1. On Linux nodes, use a Linux security module such as [AppArmor](/docs/tutorials/security/apparmor/) (beta) + or [seccomp](/docs/tutorials/security/seccomp/). + +### Runtime protection: storage {#protection-runtime-storage} + +To protect storage for your cluster and the applications that run there, you can: + +1. Integrate your cluster with an external storage plugin that provides encryption at + rest for volumes. +1. Enable [encryption at rest](/docs/tasks/administer-cluster/encrypt-data/) for + API objects. +1. Protect data durability using backups. Verify that you can restore these, whenever you need to. +1. Authenticate connections between cluster nodes and any network storage they rely + upon. +1. Implement data encryption within your own application. + +For encryption keys, generating these within specialized hardware provides +the best protection against disclosure risks. A _hardware security module_ +can let you perform cryptographic operations without allowing the security +key to be copied elsewhere. + +### Networking and security + +You should also consider network security measures, such as +[NetworkPolicy](/docs/concepts/services-networking/network-policies/) or a +[service mesh](https://glossary.cncf.io/service-mesh/). +Some network plugins for Kubernetes provide encryption for your +cluster network, using technologies such as a virtual +private network (VPN) overlay. +By design, Kubernetes lets you use your own networking plugin for your +cluster (if you use managed Kubernetes, the person or organization +managing your cluster may have chosen a network plugin for you). + +The network plugin you choose and the way you integrate it can have a +strong impact on the security of information in transit. + +### Observability and runtime security + +Kubernetes lets you extend your cluster with extra tooling. You can set up third +party solutions to help you monitor or troubleshoot your applications and the +clusters they are running. You also get some basic observability features built +in to Kubernetes itself. Your code running in containers can generate logs, +publish metrics or provide other observability data; at deploy time, you need to +make sure your cluster provides an appropriate level of protection there. + +If you set up a metrics dashboard or something similar, review the chain of components +that populate data into that dashboard, as well as the dashboard itself. Make sure +that the whole chain is designed with enough resilience and enough integrity protection +that you can rely on it even during an incident where your cluster might be degraded. + +Where appropriate, deploy security measures below the level of Kubernetes +itself, such as cryptographically measured boot, or authenticated distribution +of time (which helps ensure the fidelity of logs and audit records). + +For a high assurance environment, deploy cryptographic protections to ensure that +logs are both tamper-proof and confidential. + +## {{% heading "whatsnext" %}} + +### Cloud native security {#further-reading-cloud-native} + +* CNCF [white paper](https://github.com/cncf/tag-security/tree/main/security-whitepaper) + on cloud native security. +* CNCF [white paper](https://github.com/cncf/tag-security/blob/f80844baaea22a358f5b20dca52cd6f72a32b066/supply-chain-security/supply-chain-security-paper/CNCF_SSCP_v1.pdf) + on good practices for securing a software supply chain. +* [Fixing the Kubernetes clusterf\*\*k: Understanding security from the kernel up](https://archive.fosdem.org/2020/schedule/event/kubernetes/) (FOSDEM 2020) +* [Kubernetes Security Best Practices](https://www.youtube.com/watch?v=wqsUfvRyYpw) (Kubernetes Forum Seoul 2019) +* [Towards Measured Boot Out of the Box](https://www.youtube.com/watch?v=EzSkU3Oecuw) (Linux Security Summit 2016) + +### Kubernetes and information security {#further-reading-k8s} + +* [Kubernetes security](/docs/concepts/security/) +* [Securing your cluster](/docs/tasks/administer-cluster/securing-a-cluster/) +* [Data encryption in transit](/docs/tasks/tls/managing-tls-in-a-cluster/) for the control plane +* [Data encryption at rest](/docs/tasks/administer-cluster/encrypt-data/) +* [Secrets in Kubernetes](/docs/concepts/configuration/secret/) +* [Controlling Access to the Kubernetes API](/docs/concepts/security/controlling-access) +* [Network policies](/docs/concepts/services-networking/network-policies/) for Pods +* [Pod security standards](/docs/concepts/security/pod-security-standards/) +* [RuntimeClasses](/docs/concepts/containers/runtime-class) + diff --git a/content/en/docs/concepts/security/overview.md b/content/en/docs/concepts/security/overview.md deleted file mode 100644 index 29b4d8d55ba7f..0000000000000 --- a/content/en/docs/concepts/security/overview.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -reviewers: -- zparnold -title: Overview of Cloud Native Security -description: > - A model for thinking about Kubernetes security in the context of Cloud Native security. -content_type: concept -weight: 1 ---- - - - -This overview defines a model for thinking about Kubernetes security in the context of Cloud Native security. - -{{< warning >}} -This container security model provides suggestions, not proven information security policies. -{{< /warning >}} - - - -## The 4C's of Cloud Native security - -You can think about security in layers. The 4C's of Cloud Native security are Cloud, -Clusters, Containers, and Code. - -{{< note >}} -This layered approach augments the [defense in depth](https://en.wikipedia.org/wiki/Defense_in_depth_(computing)) -computing approach to security, which is widely regarded as a best practice for securing -software systems. -{{< /note >}} - -{{< figure src="/images/docs/4c.png" title="The 4C's of Cloud Native Security" class="diagram-large" >}} - -Each layer of the Cloud Native security model builds upon the next outermost layer. -The Code layer benefits from strong base (Cloud, Cluster, Container) security layers. -You cannot safeguard against poor security standards in the base layers by addressing -security at the Code level. - -## Cloud - -In many ways, the Cloud (or co-located servers, or the corporate datacenter) is the -[trusted computing base](https://en.wikipedia.org/wiki/Trusted_computing_base) -of a Kubernetes cluster. If the Cloud layer is vulnerable (or -configured in a vulnerable way) then there is no guarantee that the components built -on top of this base are secure. Each cloud provider makes security recommendations -for running workloads securely in their environment. - -### Cloud provider security - -If you are running a Kubernetes cluster on your own hardware or a different cloud provider, -consult your documentation for security best practices. -Here are links to some of the popular cloud providers' security documentation: - -{{< table caption="Cloud provider security" >}} - -IaaS Provider | Link | --------------------- | ------------ | -Alibaba Cloud | https://www.alibabacloud.com/trust-center | -Amazon Web Services | https://aws.amazon.com/security | -Google Cloud Platform | https://cloud.google.com/security | -Huawei Cloud | https://www.huaweicloud.com/intl/en-us/securecenter/overallsafety | -IBM Cloud | https://www.ibm.com/cloud/security | -Microsoft Azure | https://docs.microsoft.com/en-us/azure/security/azure-security | -Oracle Cloud Infrastructure | https://www.oracle.com/security | -VMware vSphere | https://www.vmware.com/security/hardening-guides | - -{{< /table >}} - -### Infrastructure security {#infrastructure-security} - -Suggestions for securing your infrastructure in a Kubernetes cluster: - -{{< table caption="Infrastructure security" >}} - -Area of Concern for Kubernetes Infrastructure | Recommendation | ---------------------------------------------- | -------------- | -Network access to API Server (Control plane) | All access to the Kubernetes control plane is not allowed publicly on the internet and is controlled by network access control lists restricted to the set of IP addresses needed to administer the cluster.| -Network access to Nodes (nodes) | Nodes should be configured to _only_ accept connections (via network access control lists) from the control plane on the specified ports, and accept connections for services in Kubernetes of type NodePort and LoadBalancer. If possible, these nodes should not be exposed on the public internet entirely. -Kubernetes access to Cloud Provider API | Each cloud provider needs to grant a different set of permissions to the Kubernetes control plane and nodes. It is best to provide the cluster with cloud provider access that follows the [principle of least privilege](https://en.wikipedia.org/wiki/Principle_of_least_privilege) for the resources it needs to administer. The [Kops documentation](https://github.com/kubernetes/kops/blob/master/docs/iam_roles.md#iam-roles) provides information about IAM policies and roles. -Access to etcd | Access to etcd (the datastore of Kubernetes) should be limited to the control plane only. Depending on your configuration, you should attempt to use etcd over TLS. More information can be found in the [etcd documentation](https://github.com/etcd-io/etcd/tree/master/Documentation). -etcd Encryption | Wherever possible it's a good practice to encrypt all storage at rest, and since etcd holds the state of the entire cluster (including Secrets) its disk should especially be encrypted at rest. - -{{< /table >}} - -## Cluster - -There are two areas of concern for securing Kubernetes: - -* Securing the cluster components that are configurable -* Securing the applications which run in the cluster - -### Components of the Cluster {#cluster-components} - -If you want to protect your cluster from accidental or malicious access and adopt -good information practices, read and follow the advice about -[securing your cluster](/docs/tasks/administer-cluster/securing-a-cluster/). - -### Components in the cluster (your application) {#cluster-applications} - -Depending on the attack surface of your application, you may want to focus on specific -aspects of security. For example: If you are running a service (Service A) that is critical -in a chain of other resources and a separate workload (Service B) which is -vulnerable to a resource exhaustion attack, then the risk of compromising Service A -is high if you do not limit the resources of Service B. The following table lists -areas of security concerns and recommendations for securing workloads running in Kubernetes: - -Area of Concern for Workload Security | Recommendation | ------------------------------- | --------------------- | -RBAC Authorization (Access to the Kubernetes API) | https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -Authentication | https://kubernetes.io/docs/concepts/security/controlling-access/ -Application secrets management (and encrypting them in etcd at rest) | https://kubernetes.io/docs/concepts/configuration/secret/
https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/ -Ensuring that pods meet defined Pod Security Standards | https://kubernetes.io/docs/concepts/security/pod-security-standards/#policy-instantiation -Quality of Service (and Cluster resource management) | https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/ -Network Policies | https://kubernetes.io/docs/concepts/services-networking/network-policies/ -TLS for Kubernetes Ingress | https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - -## Container - -Container security is outside the scope of this guide. Here are general recommendations and -links to explore this topic: - -Area of Concern for Containers | Recommendation | ------------------------------- | -------------- | -Container Vulnerability Scanning and OS Dependency Security | As part of an image build step, you should scan your containers for known vulnerabilities. -Image Signing and Enforcement | Sign container images to maintain a system of trust for the content of your containers. -Disallow privileged users | When constructing containers, consult your documentation for how to create users inside of the containers that have the least level of operating system privilege necessary in order to carry out the goal of the container. -Use container runtime with stronger isolation | Select [container runtime classes](/docs/concepts/containers/runtime-class/) that provide stronger isolation. - -## Code - -Application code is one of the primary attack surfaces over which you have the most control. -While securing application code is outside of the Kubernetes security topic, here -are recommendations to protect application code: - -### Code security - -{{< table caption="Code security" >}} - -Area of Concern for Code | Recommendation | --------------------------| -------------- | -Access over TLS only | If your code needs to communicate by TCP, perform a TLS handshake with the client ahead of time. With the exception of a few cases, encrypt everything in transit. Going one step further, it's a good idea to encrypt network traffic between services. This can be done through a process known as mutual TLS authentication or [mTLS](https://en.wikipedia.org/wiki/Mutual_authentication) which performs a two sided verification of communication between two certificate holding services. | -Limiting port ranges of communication | This recommendation may be a bit self-explanatory, but wherever possible you should only expose the ports on your service that are absolutely essential for communication or metric gathering. | -3rd Party Dependency Security | It is a good practice to regularly scan your application's third party libraries for known security vulnerabilities. Each programming language has a tool for performing this check automatically. | -Static Code Analysis | Most languages provide a way for a snippet of code to be analyzed for any potentially unsafe coding practices. Whenever possible you should perform checks using automated tooling that can scan codebases for common security errors. Some of the tools can be found at: https://owasp.org/www-community/Source_Code_Analysis_Tools | -Dynamic probing attacks | There are a few automated tools that you can run against your service to try some of the well known service attacks. These include SQL injection, CSRF, and XSS. One of the most popular dynamic analysis tools is the [OWASP Zed Attack proxy](https://www.zaproxy.org/) tool. | - -{{< /table >}} - -## {{% heading "whatsnext" %}} - -Learn about related Kubernetes security topics: - -* [Pod security standards](/docs/concepts/security/pod-security-standards/) -* [Network policies for Pods](/docs/concepts/services-networking/network-policies/) -* [Controlling Access to the Kubernetes API](/docs/concepts/security/controlling-access) -* [Securing your cluster](/docs/tasks/administer-cluster/securing-a-cluster/) -* [Data encryption in transit](/docs/tasks/tls/managing-tls-in-a-cluster/) for the control plane -* [Data encryption at rest](/docs/tasks/administer-cluster/encrypt-data/) -* [Secrets in Kubernetes](/docs/concepts/configuration/secret/) -* [Runtime class](/docs/concepts/containers/runtime-class) diff --git a/content/en/docs/concepts/security/pod-security-standards.md b/content/en/docs/concepts/security/pod-security-standards.md index 886137f0f75d9..9757e581598a2 100644 --- a/content/en/docs/concepts/security/pod-security-standards.md +++ b/content/en/docs/concepts/security/pod-security-standards.md @@ -5,7 +5,7 @@ title: Pod Security Standards description: > A detailed look at the different policy levels defined in the Pod Security Standards. content_type: concept -weight: 10 +weight: 15 --- diff --git a/content/en/docs/concepts/security/service-accounts.md b/content/en/docs/concepts/security/service-accounts.md index a7b3d54d76d33..4ae41f8008e3a 100644 --- a/content/en/docs/concepts/security/service-accounts.md +++ b/content/en/docs/concepts/security/service-accounts.md @@ -3,7 +3,7 @@ title: Service Accounts description: > Learn about ServiceAccount objects in Kubernetes. content_type: concept -weight: 10 +weight: 25 --- diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index fd992995288da..2fbbf1673831a 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -621,7 +621,7 @@ can define your own (provider specific) annotations on the Service that specify #### Load balancers with mixed protocol types -{{< feature-state for_k8s_version="v1.26" state="stable" >}} +{{< feature-state feature_gate_name="MixedProtocolLBService" >}} By default, for LoadBalancer type of Services, when there is more than one port defined, all ports must have the same protocol, and the protocol must be one which is supported @@ -670,7 +670,7 @@ Unprefixed names are reserved for end-users. #### Specifying IPMode of load balancer status {#load-balancer-ip-mode} -{{< feature-state for_k8s_version="v1.29" state="alpha" >}} +{{< feature-state feature_gate_name="LoadBalancerIPMode" >}} Starting as Alpha in Kubernetes 1.29, a [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) diff --git a/content/en/docs/concepts/storage/dynamic-provisioning.md b/content/en/docs/concepts/storage/dynamic-provisioning.md index 54ab391d80636..903945f12b6e5 100644 --- a/content/en/docs/concepts/storage/dynamic-provisioning.md +++ b/content/en/docs/concepts/storage/dynamic-provisioning.md @@ -119,9 +119,10 @@ When a default `StorageClass` exists in a cluster and a user creates a `DefaultStorageClass` admission controller automatically adds the `storageClassName` field pointing to the default storage class. -Note that there can be at most one *default* storage class on a cluster, or -a `PersistentVolumeClaim` without `storageClassName` explicitly specified cannot -be created. +Note that if you set the `storageclass.kubernetes.io/is-default-class` +annotation to true on more than one StorageClass in your cluster, and you then +create a `PersistentVolumeClaim` with no `storageClassName` set, Kubernetes +uses the most recently created default StorageClass. ## Topology Awareness diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index b7dbf54651a07..13b83596f4de6 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -194,7 +194,7 @@ keyed with `log_level`. {{< note >}} -* You must create a [ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/) +* You must [create a ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap) before you can use it. * A ConfigMap is always mounted as `readOnly`. diff --git a/content/en/docs/concepts/windows/intro.md b/content/en/docs/concepts/windows/intro.md index 3e22aa7d624f0..5bb8c60fe6d95 100644 --- a/content/en/docs/concepts/windows/intro.md +++ b/content/en/docs/concepts/windows/intro.md @@ -352,6 +352,40 @@ Windows Server SAC release The Kubernetes [version-skew policy](/docs/setup/release/version-skew-policy/) also applies. +## Hardware recommendations and considerations {#windows-hardware-recommendations} + +{{% thirdparty-content %}} + +{{< note >}} +The following hardware specifications outlined here should be regarded as sensible default values. +They are not intended to represent minimum requirements or specific recommendations for production environments. +Depending on the requirements for your workload these values may need to be adjusted. +{{< /note >}} + +- 64-bit processor 4 CPU cores or more, capable of supporting virtualization +- 8GB or more of RAM +- 50GB or more of free disk space + +Refer to +[Hardware requirements for Windows Server Microsoft documentation](https://learn.microsoft.com/en-us/windows-server/get-started/hardware-requirements) +for the most up-to-date information on minimum hardware requirements. For guidance on deciding on resources for +production worker nodes refer to [Production worker nodes Kubernetes documentation](https://kubernetes.io/docs/setup/production-environment/#production-worker-nodes). + +To optimize system resources, if a graphical user interface is not required, +it may be preferable to use a Windows Server OS installation that excludes +the [Windows Desktop Experience](https://learn.microsoft.com/en-us/windows-server/get-started/install-options-server-core-desktop-experience) +installation option, as this configuration typically frees up more system +resources. + +In assessing disk space for Windows worker nodes, take note that Windows container images are typically larger than +Linux container images, with container image sizes ranging +from [300MB to over 10GB](https://techcommunity.microsoft.com/t5/containers/nano-server-x-server-core-x-server-which-base-image-is-the-right/ba-p/2835785) +for a single image. Additionally, take note that the `C:` drive in Windows containers represents a virtual free size of +20GB by default, which is not the actual consumed space, but rather the disk size for which a single container can grow +to occupy when using local storage on the host. +See [Containers on Windows - Container Storage Documentation](https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/container-storage#storage-limits) +for more detail. + ## Getting help and troubleshooting {#troubleshooting} Your main source of help for troubleshooting your Kubernetes cluster should start diff --git a/content/en/docs/concepts/workloads/autoscaling.md b/content/en/docs/concepts/workloads/autoscaling.md new file mode 100644 index 0000000000000..5ecd2755e23cd --- /dev/null +++ b/content/en/docs/concepts/workloads/autoscaling.md @@ -0,0 +1,146 @@ +--- +title: Autoscaling Workloads +description: >- + With autoscaling, you can automatically update your workloads in one way or another. This allows your cluster to react to changes in resource demand more elastically and efficiently. +content_type: concept +weight: 40 +--- + + + +In Kubernetes, you can _scale_ a workload depending on the current demand of resources. +This allows your cluster to react to changes in resource demand more elastically and efficiently. + +When you scale a workload, you can either increase or decrease the number of replicas managed by +the workload, or adjust the resources available to the replicas in-place. + +The first approach is referred to as _horizontal scaling_, while the second is referred to as +_vertical scaling_. + +There are manual and automatic ways to scale your workloads, depending on your use case. + + + +## Scaling workloads manually + +Kubernetes supports _manual scaling_ of workloads. Horizontal scaling can be done +using the `kubectl` CLI. +For vertical scaling, you need to _patch_ the resource definition of your workload. + +See below for examples of both strategies. + +- **Horizontal scaling**: [Running multiple instances of your app](/docs/tutorials/kubernetes-basics/scale/scale-intro/) +- **Vertical scaling**: [Resizing CPU and memory resources assigned to containers](/docs/tasks/configure-pod-container/resize-container-resources) + +## Scaling workloads automatically + +Kubernetes also supports _automatic scaling_ of workloads, which is the focus of this page. + +The concept of _Autoscaling_ in Kubernetes refers to the ability to automatically update an +object that manages a set of Pods (for example a +{{< glossary_tooltip text="Deployment" term_id="deployment" >}}. + +### Scaling workloads horizontally + +In Kubernetes, you can automatically scale a workload horizontally using a _HorizontalPodAutoscaler_ (HPA). + +It is implemented as a Kubernetes API resource and a {{< glossary_tooltip text="controller" term_id="controller" >}} +and periodically adjusts the number of {{< glossary_tooltip text="replicas" term_id="replica" >}} +in a workload to match observed resource utilization such as CPU or memory usage. + +There is a [walkthrough tutorial](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough) of configuring a HorizontalPodAutoscaler for a Deployment. + +### Scaling workloads vertically + +{{< feature-state for_k8s_version="v1.25" state="stable" >}} + +You can automatically scale a workload vertically using a _VerticalPodAutoscaler_ (VPA). +Different to the HPA, the VPA doesn't come with Kubernetes by default, but is a separate project +that can be found [on GitHub](https://github.com/kubernetes/autoscaler/tree/9f87b78df0f1d6e142234bb32e8acbd71295585a/vertical-pod-autoscaler). + +Once installed, it allows you to create {{< glossary_tooltip text="CustomResourceDefinitions" term_id="customresourcedefinition" >}} +(CRDs) for your workloads which define _how_ and _when_ to scale the resources of the managed replicas. + +{{< note >}} +You will need to have the [Metrics Server](https://github.com/kubernetes-sigs/metrics-server) +installed to your cluster for the HPA to work. +{{< /note >}} + +At the moment, the VPA can operate in four different modes: + +{{< table caption="Different modes of the VPA" >}} +Mode | Description +:----|:----------- +`Auto` | Currently `Recreate`, might change to in-place updates in the future +`Recreate` | The VPA assigns resource requests on pod creation as well as updates them on existing pods by evicting them when the requested resources differ significantly from the new recommendation +`Initial` | The VPA only assigns resource requests on pod creation and never changes them later. +`Off` | The VPA does not automatically change the resource requirements of the pods. The recommendations are calculated and can be inspected in the VPA object. +{{< /table >}} + +#### Requirements for in-place resizing + +{{< feature-state for_k8s_version="v1.27" state="alpha" >}} + +Resizing a workload in-place **without** restarting the {{< glossary_tooltip text="Pods" term_id="pod" >}} +or its {{< glossary_tooltip text="Containers" term_id="container" >}} requires Kubernetes version 1.27 or later.
+Additionally, the `InPlaceVerticalScaling` feature gate needs to be enabled. + +{{< feature-gate-description name="InPlacePodVerticalScaling" >}} + +### Autoscaling based on cluster size + +For workloads that need to be scaled based on the size of the cluster (for example +`cluster-dns` or other system components), you can use the +[_Cluster Proportional Autoscaler_](https://github.com/kubernetes-sigs/cluster-proportional-autoscaler).
+Just like the VPA, it is not part of the Kubernetes core, but hosted as its +own project on GitHub. + +The Cluster Proportional Autoscaler watches the number of schedulable {{< glossary_tooltip text="nodes" term_id="node" >}} +and cores and scales the number of replicas of the target workload accordingly. + +If the number of replicas should stay the same, you can scale your workloads vertically according to the cluster size using +the [_Cluster Proportional Vertical Autoscaler_](https://github.com/kubernetes-sigs/cluster-proportional-vertical-autoscaler). +The project is **currently in beta** and can be found on GitHub. + +While the Cluster Proportional Autoscaler scales the number of replicas of a workload, the Cluster Proportional Vertical Autoscaler +adjusts the resource requests for a workload (for example a Deployment or DaemonSet) based on the number of nodes and/or cores +in the cluster. + +### Event driven Autoscaling + +It is also possible to scale workloads based on events, for example using the +[_Kubernetes Event Driven Autoscaler_ (**KEDA**)](https://keda.sh/). + +KEDA is a CNCF graduated enabling you to scale your workloads based on the number +of events to be processed, for example the amount of messages in a queue. There exists +a wide range of adapters for different event sources to choose from. + +### Autoscaling based on schedules + +Another strategy for scaling your workloads is to **schedule** the scaling operations, for example in order to +reduce resource consumption during off-peak hours. + +Similar to event driven autoscaling, such behavior can be achieved using KEDA in conjunction with +its [`Cron` scaler](https://keda.sh/docs/2.13/scalers/cron/). The `Cron` scaler allows you to define schedules +(and time zones) for scaling your workloads in or out. + +## Scaling cluster infrastructure + +If scaling workloads isn't enough to meet your needs, you can also scale your cluster infrastructure itself. + +Scaling the cluster infrastructure normally means adding or removing {{< glossary_tooltip text="nodes" term_id="node" >}}. +This can be done using one of two available autoscalers: + +- [**Cluster Autoscaler**](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) +- [**Karpenter**](https://github.com/kubernetes-sigs/karpenter?tab=readme-ov-file) + +Both scalers work by watching for pods marked as _unschedulable_ or _underutilized_ nodes and then adding or +removing nodes as needed. + +## {{% heading "whatsnext" %}} + +- Learn more about scaling horizontally + - [Scale a StatefulSet](/docs/tasks/run-application/scale-stateful-set/) + - [HorizontalPodAutoscaler Walkthrough](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/) +- [Resize Container Resources In-Place](/docs/tasks/configure-pod-container/resize-container-resources/) +- [Autoscale the DNS Service in a Cluster](/docs/tasks/administer-cluster/dns-horizontal-autoscaling/) diff --git a/content/en/docs/concepts/workloads/pods/_index.md b/content/en/docs/concepts/workloads/pods/_index.md index 1132c38793c5a..1ae8a7c7febfb 100644 --- a/content/en/docs/concepts/workloads/pods/_index.md +++ b/content/en/docs/concepts/workloads/pods/_index.md @@ -19,10 +19,10 @@ containers which are relatively tightly coupled. In non-cloud contexts, applications executed on the same physical or virtual machine are analogous to cloud applications executed on the same logical host. As well as application containers, a Pod can contain -[init containers](/docs/concepts/workloads/pods/init-containers/) that run +{{< glossary_tooltip text="init containers" term_id="init-container" >}} that run during Pod startup. You can also inject -[ephemeral containers](/docs/concepts/workloads/pods/ephemeral-containers/) -for debugging if your cluster offers this. +{{< glossary_tooltip text="ephemeral containers" term_id="ephemeral-container" >}} +for debugging a running Pod. @@ -39,6 +39,26 @@ further sub-isolations applied. A Pod is similar to a set of containers with shared namespaces and shared filesystem volumes. +Pods in a Kubernetes cluster are used in two main ways: + +* **Pods that run a single container**. The "one-container-per-Pod" model is the + most common Kubernetes use case; in this case, you can think of a Pod as a + wrapper around a single container; Kubernetes manages Pods rather than managing + the containers directly. +* **Pods that run multiple containers that need to work together**. A Pod can + encapsulate an application composed of + [multiple co-located containers](#how-pods-manage-multiple-containers) that are + tightly coupled and need to share resources. These co-located containers + form a single cohesive unit. + + Grouping multiple co-located and co-managed containers in a single Pod is a + relatively advanced use case. You should use this pattern only in specific + instances in which your containers are tightly coupled. + + You don't need to run multiple containers to provide replication (for resilience + or capacity); if you need multiple replicas, see + [Workload management](/docs/concepts/workloads/controllers/). + ## Using Pods The following is an example of a Pod which consists of a container running the image `nginx:1.14.2`. @@ -61,26 +81,6 @@ term_id="deployment" >}} or {{< glossary_tooltip text="Job" term_id="job" >}}. If your Pods need to track state, consider the {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}} resource. -Pods in a Kubernetes cluster are used in two main ways: - -* **Pods that run a single container**. The "one-container-per-Pod" model is the - most common Kubernetes use case; in this case, you can think of a Pod as a - wrapper around a single container; Kubernetes manages Pods rather than managing - the containers directly. -* **Pods that run multiple containers that need to work together**. A Pod can - encapsulate an application composed of multiple co-located containers that are - tightly coupled and need to share resources. These co-located containers - form a single cohesive unit of service—for example, one container serving data - stored in a shared volume to the public, while a separate _sidecar_ container - refreshes or updates those files. - The Pod wraps these containers, storage resources, and an ephemeral network - identity together as a single unit. - - {{< note >}} - Grouping multiple co-located and co-managed containers in a single Pod is a - relatively advanced use case. You should use this pattern only in specific - instances in which your containers are tightly coupled. - {{< /note >}} Each Pod is meant to run a single instance of a given application. If you want to scale your application horizontally (to provide more overall resources by running @@ -93,36 +93,10 @@ See [Pods and controllers](#pods-and-controllers) for more information on how Kubernetes uses workload resources, and their controllers, to implement application scaling and auto-healing. -### How Pods manage multiple containers - -Pods are designed to support multiple cooperating processes (as containers) that form -a cohesive unit of service. The containers in a Pod are automatically co-located and -co-scheduled on the same physical or virtual machine in the cluster. The containers -can share resources and dependencies, communicate with one another, and coordinate -when and how they are terminated. - -For example, you might have a container that -acts as a web server for files in a shared volume, and a separate "sidecar" container -that updates those files from a remote source, as in the following diagram: - -{{< figure src="/images/docs/pod.svg" alt="Pod creation diagram" class="diagram-medium" >}} - -Some Pods have {{< glossary_tooltip text="init containers" term_id="init-container" >}} -as well as {{< glossary_tooltip text="app containers" term_id="app-container" >}}. -By default, init containers run and complete before the app containers are started. - -{{< feature-state for_k8s_version="v1.29" state="beta" >}} - -Enabled by default, the `SidecarContainers` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -allows you to specify `restartPolicy: Always` for init containers. -Setting the `Always` restart policy ensures that the init containers where you set it are -kept running during the entire lifetime of the Pod. -See [Sidecar containers and restartPolicy](/docs/concepts/workloads/pods/init-containers/#sidecar-containers-and-restartpolicy) -for more details. - Pods natively provide two kinds of shared resources for their constituent containers: [networking](#pod-networking) and [storage](#pod-storage). + ## Working with Pods You'll rarely create individual Pods directly in Kubernetes—even singleton Pods. This @@ -343,6 +317,57 @@ The `spec` of a static Pod cannot refer to other API objects {{< glossary_tooltip text="Secret" term_id="secret" >}}, etc). {{< /note >}} +## Pods with multiple containers {#how-pods-manage-multiple-containers} + +Pods are designed to support multiple cooperating processes (as containers) that form +a cohesive unit of service. The containers in a Pod are automatically co-located and +co-scheduled on the same physical or virtual machine in the cluster. The containers +can share resources and dependencies, communicate with one another, and coordinate +when and how they are terminated. + + +Pods in a Kubernetes cluster are used in two main ways: + +* **Pods that run a single container**. The "one-container-per-Pod" model is the + most common Kubernetes use case; in this case, you can think of a Pod as a + wrapper around a single container; Kubernetes manages Pods rather than managing + the containers directly. +* **Pods that run multiple containers that need to work together**. A Pod can + encapsulate an application composed of + multiple co-located containers that are + tightly coupled and need to share resources. These co-located containers + form a single cohesive unit of service—for example, one container serving data + stored in a shared volume to the public, while a separate + {{< glossary_tooltip text="sidecar container" term_id="sidecar-container" >}} + refreshes or updates those files. + The Pod wraps these containers, storage resources, and an ephemeral network + identity together as a single unit. + +For example, you might have a container that +acts as a web server for files in a shared volume, and a separate +[sidecar container](/docs/concepts/workloads/pods/sidecar-containers/) +that updates those files from a remote source, as in the following diagram: + +{{< figure src="/images/docs/pod.svg" alt="Pod creation diagram" class="diagram-medium" >}} + +Some Pods have {{< glossary_tooltip text="init containers" term_id="init-container" >}} +as well as {{< glossary_tooltip text="app containers" term_id="app-container" >}}. +By default, init containers run and complete before the app containers are started. + +You can also have [sidecar containers](/docs/concepts/workloads/pods/sidecar-containers/) +that provide auxiliary services to the main application Pod (for example: a service mesh). + +{{< feature-state for_k8s_version="v1.29" state="beta" >}} + +Enabled by default, the `SidecarContainers` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +allows you to specify `restartPolicy: Always` for init containers. +Setting the `Always` restart policy ensures that the containers where you set it are +treated as _sidecars_ that are kept running during the entire lifetime of the Pod. +Containers that you explicitly define as sidecar containers +start up before the main application Pod and remain running until the Pod is +shut down. + + ## Container probes A _probe_ is a diagnostic performed periodically by the kubelet on a container. To perform a diagnostic, the kubelet can invoke different actions: diff --git a/content/en/docs/concepts/workloads/pods/disruptions.md b/content/en/docs/concepts/workloads/pods/disruptions.md index 1d2b33d55f5d7..83befbe71d931 100644 --- a/content/en/docs/concepts/workloads/pods/disruptions.md +++ b/content/en/docs/concepts/workloads/pods/disruptions.md @@ -5,7 +5,7 @@ reviewers: - davidopp title: Disruptions content_type: concept -weight: 60 +weight: 70 --- diff --git a/content/en/docs/concepts/workloads/pods/ephemeral-containers.md b/content/en/docs/concepts/workloads/pods/ephemeral-containers.md index dfd7c366c114d..efdf0e1a0c771 100644 --- a/content/en/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/en/docs/concepts/workloads/pods/ephemeral-containers.md @@ -4,7 +4,7 @@ reviewers: - yujuhong title: Ephemeral Containers content_type: concept -weight: 80 +weight: 60 --- diff --git a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md index ff73d7bc2310a..c07ed9bb3824c 100644 --- a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md @@ -161,7 +161,7 @@ the Pod level `restartPolicy` is either `OnFailure` or `Always`. When the kubelet is handling container restarts according to the configured restart policy, that only applies to restarts that make replacement containers inside the same Pod and running on the same node. After containers in a Pod exit, the kubelet -restarts them with an exponential back-off delay (10s, 20s,40s, …), that is capped at +restarts them with an exponential back-off delay (10s, 20s, 40s, …), that is capped at five minutes. Once a container has executed for 10 minutes without any problems, the kubelet resets the restart backoff timer for that container. [Sidecar containers and Pod lifecycle](/docs/concepts/workloads/pods/sidecar-containers/#sidecar-containers-and-pod-lifecycle) diff --git a/content/en/docs/concepts/workloads/pods/pod-qos.md b/content/en/docs/concepts/workloads/pods/pod-qos.md index 491a4af2effb0..e656208958c83 100644 --- a/content/en/docs/concepts/workloads/pods/pod-qos.md +++ b/content/en/docs/concepts/workloads/pods/pod-qos.md @@ -87,7 +87,7 @@ Containers in a Pod can request other resources (not CPU or memory) and still be ## Memory QoS with cgroup v2 -{{< feature-state for_k8s_version="v1.22" state="alpha" >}} +{{< feature-state feature-gate-name="MemoryQoS" >}} Memory QoS uses the memory controller of cgroup v2 to guarantee memory resources in Kubernetes. Memory requests and limits of containers in pod are used to set specific interfaces `memory.min` diff --git a/content/en/docs/contribute/style/hugo-shortcodes/index.md b/content/en/docs/contribute/style/hugo-shortcodes/index.md index 6112080eb8552..18ff7822406bb 100644 --- a/content/en/docs/contribute/style/hugo-shortcodes/index.md +++ b/content/en/docs/contribute/style/hugo-shortcodes/index.md @@ -49,6 +49,21 @@ Renders to: {{< feature-state for_k8s_version="v1.10" state="beta" >}} +### Feature state retrieval from description file + +To dynamically determine the state of the feature, make use of the `feature_gate_name` +shortcode parameter. The feature state details will be extracted from the corresponding feature gate +description file located in `content/en/docs/reference/command-line-tools-reference/feature-gates/`. +For example: + +``` +{{}} +``` + +Renders to: + +{{< feature-state feature_gate_name="NodeSwap" >}} + ## Feature gate description In a Markdown page (`.md` file) on this site, you can add a shortcode to diff --git a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md index 1f1fa64f60b46..92e631fc20897 100644 --- a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md @@ -62,7 +62,7 @@ for a number of reasons: ## Bound service account token volume mechanism {#bound-service-account-token-volume} -{{< feature-state for_k8s_version="v1.22" state="stable" >}} +{{< feature-state feature_gate_name="BoundServiceAccountTokenVolume" >}} By default, the Kubernetes control plane (specifically, the [ServiceAccount admission controller](#serviceaccount-admission-controller)) @@ -249,7 +249,7 @@ it does the following when a Pod is created: ### Legacy ServiceAccount token tracking controller -{{< feature-state for_k8s_version="v1.28" state="stable" >}} +{{< feature-state feature_gate_name="LegacyServiceAccountTokenTracking" >}} This controller generates a ConfigMap called `kube-system/kube-apiserver-legacy-service-account-token-tracking` in the @@ -258,7 +258,7 @@ account tokens began to be monitored by the system. ### Legacy ServiceAccount token cleaner -{{< feature-state for_k8s_version="v1.29" state="beta" >}} +{{< feature-state feature_gate_name="LegacyServiceAccountTokenCleanUp" >}} The legacy ServiceAccount token cleaner runs as part of the `kube-controller-manager` and checks every 24 hours to see if any auto-generated diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/readonly-apidata-volumes.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/readonly-apidata-volumes.md new file mode 100644 index 0000000000000..6e2e37ed67b2d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/readonly-apidata-volumes.md @@ -0,0 +1,27 @@ +--- +# Removed from Kubernetes +title: ReadOnlyAPIDataVolumes +content_type: feature_gate + +_build: + list: never + render: false + +stages: + - stage: beta + defaultValue: true + fromVersion: "1.8" + toVersion: "1.9" + - stage: stable + fromVersion: "1.10" + toVersion: "1.10" + +removed: true +--- +Set [`configMap`](/docs/concepts/storage/volumes/#configmap), +[`secret`](/docs/concepts/storage/volumes/#secret), +[`downwardAPI`](/docs/concepts/storage/volumes/#downwardapi) and +[`projected`](/docs/concepts/storage/volumes/#projected) +{{< glossary_tooltip term_id="volume" text="volumes" >}} to be mounted read-only. + +Since Kubernetes v1.10, these volume types are always read-only and you cannot opt out. diff --git a/content/en/docs/reference/glossary/init-container.md b/content/en/docs/reference/glossary/init-container.md index a999042e3056f..dfb29b5b4bfec 100644 --- a/content/en/docs/reference/glossary/init-container.md +++ b/content/en/docs/reference/glossary/init-container.md @@ -5,7 +5,7 @@ date: 2018-04-12 full_link: short_description: > One or more initialization containers that must run to completion before any app containers run. - +full_link: /docs/concepts/workloads/pods/init-containers/ aka: tags: - fundamental @@ -15,3 +15,7 @@ tags: Initialization (init) containers are like regular app containers, with one difference: init containers must run to completion before any app containers can start. Init containers run in series: each init container must run to completion before the next init container begins. + +Unlike {{< glossary_tooltip text="sidecar containers" term_id="sidecar-container" >}}, init containers do not remain running after Pod startup. + +For more information, read [init containers](/docs/concepts/workloads/pods/init-containers/). diff --git a/content/en/docs/reference/glossary/sidecar-container.md b/content/en/docs/reference/glossary/sidecar-container.md new file mode 100644 index 0000000000000..97faa10153216 --- /dev/null +++ b/content/en/docs/reference/glossary/sidecar-container.md @@ -0,0 +1,20 @@ +--- +title: Sidecar Container +id: sidecar-container +date: 2018-04-12 +full_link: +short_description: > + An auxilliary container that stays running throughout the lifecycle of a Pod. +full_link: /docs/concepts/workloads/pods/sidecar-containers/ +tags: +- fundamental +--- + One or more {{< glossary_tooltip text="containers" term_id="container" >}} that are typically started before any app containers run. + + + +Sidecar containers are like regular app containers, but with a different purpose: the sidecar provides a Pod-local service to the main app container. +Unlike {{< glossary_tooltip text="init containers" term_id="init-container" >}}, sidecar containers +continue running after Pod startup. + +Read [Sidecar containers](/docs/concepts/workloads/pods/sidecar-containers/) for more information. diff --git a/content/en/docs/reference/kubectl/quick-reference.md b/content/en/docs/reference/kubectl/quick-reference.md index b88cff5b183c0..a4e2df27d6ee5 100644 --- a/content/en/docs/reference/kubectl/quick-reference.md +++ b/content/en/docs/reference/kubectl/quick-reference.md @@ -287,7 +287,7 @@ kubectl label pods my-pod new-label=awesome # Add a Label kubectl label pods my-pod new-label- # Remove a label kubectl label pods my-pod new-label=new-value --overwrite # Overwrite an existing value kubectl annotate pods my-pod icon-url=http://goo.gl/XXBTWq # Add an annotation -kubectl annotate pods my-pod icon- # Remove annotation +kubectl annotate pods my-pod icon-url- # Remove annotation kubectl autoscale deployment foo --min=2 --max=10 # Auto scale a deployment "foo" ``` diff --git a/content/en/docs/reference/labels-annotations-taints/_index.md b/content/en/docs/reference/labels-annotations-taints/_index.md index 7c12db03205aa..4849f60f9aabb 100644 --- a/content/en/docs/reference/labels-annotations-taints/_index.md +++ b/content/en/docs/reference/labels-annotations-taints/_index.md @@ -2255,7 +2255,8 @@ Starting in v1.16, this annotation was removed in favor of - [`pod-security.kubernetes.io/audit-violations`](/docs/reference/labels-annotations-taints/audit-annotations/#pod-security-kubernetes-io-audit-violations) - [`pod-security.kubernetes.io/enforce-policy`](/docs/reference/labels-annotations-taints/audit-annotations/#pod-security-kubernetes-io-enforce-policy) - [`pod-security.kubernetes.io/exempt`](/docs/reference/labels-annotations-taints/audit-annotations/#pod-security-kubernetes-io-exempt) - +- [`validation.policy.admission.k8s.io/validation_failure`](/docs/reference/labels-annotations-taints/audit-annotations/#validation-policy-admission-k8s-io-validation-failure) + See more details on [Audit Annotations](/docs/reference/labels-annotations-taints/audit-annotations/). ## kubeadm diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md index 9a5f9b29fde69..944d0accaa880 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md @@ -34,6 +34,17 @@ etcdctl del "" --prefix See the [etcd documentation](https://github.com/coreos/etcd/tree/master/etcdctl) for more information. +### Graceful kube-apiserver shutdown + +If you have your `kube-apiserver` configured with the `--shutdown-delay-duration` flag, +you can run the following commands to attempt a graceful shutdown for the running API server Pod, +before you run `kubeadm reset`: + +```bash +yq eval -i '.spec.containers[0].command = []' /etc/kubernetes/manifests/kube-apiserver.yaml +timeout 60 sh -c 'while pgrep kube-apiserver >/dev/null; do sleep 1; done' || true +``` + ## {{% heading "whatsnext" %}} * [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes control-plane node diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index dd30bc2fee3e0..a2078c77fc4b6 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -568,37 +568,6 @@ reference documentation for more information about this subcommand and its options. - - - -## What's next {#whats-next} - -* Verify that your cluster is running properly with [Sonobuoy](https://github.com/heptio/sonobuoy) -* See [Upgrading kubeadm clusters](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) - for details about upgrading your cluster using `kubeadm`. -* Learn about advanced `kubeadm` usage in the [kubeadm reference documentation](/docs/reference/setup-tools/kubeadm/) -* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/reference/kubectl/). -* See the [Cluster Networking](/docs/concepts/cluster-administration/networking/) page for a bigger list - of Pod network add-ons. -* See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to - explore other add-ons, including tools for logging, monitoring, network policy, visualization & - control of your Kubernetes cluster. -* Configure how your cluster handles logs for cluster events and from - applications running in Pods. - See [Logging Architecture](/docs/concepts/cluster-administration/logging/) for - an overview of what is involved. - -### Feedback {#feedback} - -* For bugs, visit the [kubeadm GitHub issue tracker](https://github.com/kubernetes/kubeadm/issues) -* For support, visit the - [#kubeadm](https://kubernetes.slack.com/messages/kubeadm/) Slack channel -* General SIG Cluster Lifecycle development Slack channel: - [#sig-cluster-lifecycle](https://kubernetes.slack.com/messages/sig-cluster-lifecycle/) -* SIG Cluster Lifecycle [SIG information](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle#readme) -* SIG Cluster Lifecycle mailing list: - [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) - ## Version skew policy {#version-skew-policy} While kubeadm allows version skew against some components that it manages, it is recommended that you @@ -619,8 +588,8 @@ Example: ### kubeadm's skew against the kubelet -Similarly to the Kubernetes version, kubeadm can be used with a kubelet version that is the same -version as kubeadm or one version older. +Similarly to the Kubernetes version, kubeadm can be used with a kubelet version that is +the same version as kubeadm or three versions older. Example: * kubeadm is at {{< skew currentVersion >}} @@ -686,3 +655,33 @@ supports your chosen platform. If you are running into difficulties with kubeadm, please consult our [troubleshooting docs](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). + + + +## What's next {#whats-next} + +* Verify that your cluster is running properly with [Sonobuoy](https://github.com/heptio/sonobuoy) +* See [Upgrading kubeadm clusters](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) + for details about upgrading your cluster using `kubeadm`. +* Learn about advanced `kubeadm` usage in the [kubeadm reference documentation](/docs/reference/setup-tools/kubeadm/) +* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/reference/kubectl/). +* See the [Cluster Networking](/docs/concepts/cluster-administration/networking/) page for a bigger list + of Pod network add-ons. +* See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to + explore other add-ons, including tools for logging, monitoring, network policy, visualization & + control of your Kubernetes cluster. +* Configure how your cluster handles logs for cluster events and from + applications running in Pods. + See [Logging Architecture](/docs/concepts/cluster-administration/logging/) for + an overview of what is involved. + +### Feedback {#feedback} + +* For bugs, visit the [kubeadm GitHub issue tracker](https://github.com/kubernetes/kubeadm/issues) +* For support, visit the + [#kubeadm](https://kubernetes.slack.com/messages/kubeadm/) Slack channel +* General SIG Cluster Lifecycle development Slack channel: + [#sig-cluster-lifecycle](https://kubernetes.slack.com/messages/sig-cluster-lifecycle/) +* SIG Cluster Lifecycle [SIG information](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle#readme) +* SIG Cluster Lifecycle mailing list: + [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index abd3f3e0e4968..64a4ce2286897 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -213,7 +213,7 @@ in kube-apiserver logs. To fix the issue you must follow these steps: `kubeadm kubeconfig user --org system:nodes --client-name system:node:$NODE > kubelet.conf`. `$NODE` must be set to the name of the existing failed node in the cluster. Modify the resulted `kubelet.conf` manually to adjust the cluster name and server endpoint, - or pass `kubeconfig user --config` (it accepts `InitConfiguration`). If your cluster does not have + or pass `kubeconfig user --config` (see [Generating kubeconfig files for additional users](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubeconfig-additional-users)). If your cluster does not have the `ca.key` you must sign the embedded certificates in the `kubelet.conf` externally. 1. Copy this resulted `kubelet.conf` to `/etc/kubernetes/kubelet.conf` on the failed node. 1. Restart the kubelet (`systemctl restart kubelet`) on the failed node and wait for diff --git a/content/en/docs/tasks/administer-cluster/encrypt-data.md b/content/en/docs/tasks/administer-cluster/encrypt-data.md index d113cdd836a28..6e7a05ef09c16 100644 --- a/content/en/docs/tasks/administer-cluster/encrypt-data.md +++ b/content/en/docs/tasks/administer-cluster/encrypt-data.md @@ -168,19 +168,31 @@ encrypt all resources, even custom resources that are added after API server sta since part of the configuration would be ineffective. The `resources` list's processing order and precedence are determined by the order it's listed in the configuration. {{< /note >}} -Opting out of encryption for specific resources while wildcard is enabled can be achieved by adding a new -`resources` array item with the resource name, followed by the `providers` array item with the `identity` provider. -For example, if '`*.*`' is enabled and you want to opt-out encryption for the `events` resource, add a new item -to the `resources` array with `events` as the resource name, followed by the providers array item with `identity`. -The new item should look like this: +If you have a wildcard covering resources and want to opt out of at-rest encryption for a particular kind +of resource, you achieve that by adding a separate `resources` array item with the name of the resource that +you want to exempt, followed by a `providers` array item where you specify the `identity` provider. You add +this item to the list so that it appears earlier than the configuration where you do specify encryption +(a provider that is not `identity`). + +For example, if '`*.*`' is enabled and you want to opt out of encryption for Events and ConfigMaps, add a +new **earlier** item to the `resources`, followed by the providers array item with `identity` as the +provider. The more specific entry must come before the wildcard entry. + +The new item would look similar to: ```yaml -- resources: - - events - providers: - - identity: {} + ... + - resources: + - configmaps. # specifically from the core API group, + # because of trailing "." + - events + providers: + - identity: {} + # and then other entries in resources ``` -Ensure that the new item is listed before the wildcard '`*.*`' item in the resources array to give it precedence. + +Ensure that the exemption is listed _before_ the wildcard '`*.*`' item in the resources array +to give it precedence. For more detailed information about the `EncryptionConfiguration` struct, please refer to the [encryption configuration API](/docs/reference/config-api/apiserver-encryption.v1/). diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index df93d79c4687c..623e528aa80d6 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -46,8 +46,46 @@ CA key on disk. Instead, run the controller-manager standalone with `--controllers=csrsigner` and point to the CA certificate and key. -[PKI certificates and requirements](/docs/setup/best-practices/certificates/) includes guidance on -setting up a cluster to use an external CA. +There are various ways to prepare the component credentials when using external CA mode. + +### Manual preparation of component credentials + +[PKI certificates and requirements](/docs/setup/best-practices/certificates/) includes information +on how to prepare all the required by kubeadm component credentials manually. + +### Preparation of credentials by signing CSRs generated by kubeadm + +kubeadm can [generate CSR files](#signing-csr) that you can sign manually with tools like +`openssl` and your external CA. These CSR files will include all the specification for credentials +that components deployed by kubeadm require. + +### Automated preparation of component credentials by using kubeadm phases + +Alternatively, it is possible to use kubeadm phase commands to automate this process. + +- Go to a host that you want to prepare as a kubeadm control plane node with external CA. +- Copy the external CA files `ca.crt` and `ca.key` that you have into `/etc/kubernetes/pki` on the node. +- Prepare a temporary [kubeadm configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file) +called `config.yaml` that can be used with `kubeadm init`. Make sure that this file includes +any relevant cluster wide or host-specific information that could be included in certificates, such as, +`ClusterConfiguration.controlPlaneEndpoint`, `ClusterConfiguration.certSANs` and `InitConfiguration.APIEndpoint`. +- On the same host execute the commands `kubeadm init phase kubeconfig all --config config.yaml` and +`kubeadm init phase certs all --config config.yaml`. This will generate all required kubeconfig +files and certificates under `/etc/kubernetes/` and its `pki` sub directory. +- Inspect the generated files. Delete `/etc/kubernetes/pki/ca.key`, delete or move to a safe location +the file `/etc/kubernetes/super-admin.conf`. +- On nodes where `kubeadm join` will be called also delete `/etc/kubernetes/kubelet.conf`. +This file is only required on the first node where `kubeadm init` will be called. +- Note that some files such `pki/sa.*`, `pki/front-proxy-ca.*` and `pki/etc/ca.*` are +shared between control plane nodes, You can generate them once and +[distribute them manually](/docs/setup/production-environment/tools/kubeadm/high-availability/#manual-certs) +to nodes where `kubeadm join` will be called, or you can use the +[`--upload-certs`](/docs/setup/production-environment/tools/kubeadm/high-availability/#stacked-control-plane-and-etcd-nodes) +functionality of `kubeadm init` and `--certificate-key` of `kubeadm join` to automate this distribution. + +Once the credentials are prepared on all nodes, call `kubeadm init` and `kubeadm join` for these nodes to +join the cluster. kubeadm will use the existing kubeconfig and certificate files under `/etc/kubernetes/` +and its `pki` sub directory. ## Check certificate expiration diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index 09f43dd6341dc..9cb522c416e1d 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -43,7 +43,7 @@ The upgrade workflow at high level is the following: they could be running CoreDNS Pods or other critical workloads. For more information see [Draining nodes](/docs/tasks/administer-cluster/safely-drain-node/). - The Kubernetes project recommends that you match your kubelet and kubeadm versions. - You can instead use an a version of kubelet that is older than kubeadm, provided it is within the + You can instead use a version of kubelet that is older than kubeadm, provided it is within the range of supported versions. For more details, please visit [kubeadm's skew against the kubelet](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#kubeadm-s-skew-against-the-kubelet). - All containers are restarted after upgrade, because the container spec hash value is changed. @@ -75,8 +75,8 @@ Find the latest patch release for Kubernetes {{< skew currentVersion >}} using t ```shell # Find the latest {{< skew currentVersion >}} version in the list. # It should look like {{< skew currentVersion >}}.x-*, where x is the latest patch. -apt update -apt-cache madison kubeadm +sudo apt update +sudo apt-cache madison kubeadm ``` {{% /tab %}} @@ -85,7 +85,7 @@ apt-cache madison kubeadm ```shell # Find the latest {{< skew currentVersion >}} version in the list. # It should look like {{< skew currentVersion >}}.x-*, where x is the latest patch. -yum list --showduplicates kubeadm --disableexcludes=kubernetes +sudo yum list --showduplicates kubeadm --disableexcludes=kubernetes ``` {{% /tab %}} @@ -107,9 +107,9 @@ Pick a control plane node that you wish to upgrade first. It must have the `/etc ```shell # replace x in {{< skew currentVersion >}}.x-* with the latest patch version - apt-mark unhold kubeadm && \ - apt-get update && apt-get install -y kubeadm='{{< skew currentVersion >}}.x-*' && \ - apt-mark hold kubeadm + sudo apt-mark unhold kubeadm && \ + sudo apt-get update && sudo apt-get install -y kubeadm='{{< skew currentVersion >}}.x-*' && \ + sudo apt-mark hold kubeadm ``` {{% /tab %}} @@ -117,7 +117,7 @@ Pick a control plane node that you wish to upgrade first. It must have the `/etc ```shell # replace x in {{< skew currentVersion >}}.x-* with the latest patch version - yum install -y kubeadm-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes + sudo yum install -y kubeadm-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes ``` {{% /tab %}} @@ -132,7 +132,7 @@ Pick a control plane node that you wish to upgrade first. It must have the `/etc 1. Verify the upgrade plan: ```shell - kubeadm upgrade plan + sudo kubeadm upgrade plan ``` This command checks that your cluster can be upgraded, and fetches the versions you can upgrade to. @@ -221,9 +221,9 @@ kubectl drain --ignore-daemonsets ```shell # replace x in {{< skew currentVersion >}}.x-* with the latest patch version - apt-mark unhold kubelet kubectl && \ - apt-get update && apt-get install -y kubelet='{{< skew currentVersion >}}.x-*' kubectl='{{< skew currentVersion >}}.x-*' && \ - apt-mark hold kubelet kubectl + sudo apt-mark unhold kubelet kubectl && \ + sudo apt-get update && sudo apt-get install -y kubelet='{{< skew currentVersion >}}.x-*' kubectl='{{< skew currentVersion >}}.x-*' && \ + sudo apt-mark hold kubelet kubectl ``` {{% /tab %}} @@ -231,7 +231,7 @@ kubectl drain --ignore-daemonsets ```shell # replace x in {{< skew currentVersion >}}.x-* with the latest patch version - yum install -y kubelet-'{{< skew currentVersion >}}.x-*' kubectl-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes + sudo yum install -y kubelet-'{{< skew currentVersion >}}.x-*' kubectl-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes ``` {{% /tab %}} @@ -279,7 +279,7 @@ The `STATUS` column should show `Ready` for all your nodes, and the version numb If `kubeadm upgrade` fails and does not roll back, for example because of an unexpected shutdown during execution, you can run `kubeadm upgrade` again. This command is idempotent and eventually makes sure that the actual state is the desired state you declare. -To recover from a bad state, you can also run `kubeadm upgrade apply --force` without changing the version that your cluster is running. +To recover from a bad state, you can also run `sudo kubeadm upgrade apply --force` without changing the version that your cluster is running. During upgrade kubeadm writes the following backup folders under `/etc/kubernetes/tmp`: diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-linux-nodes.md b/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-linux-nodes.md index e61c6f3d2b134..70b63288533e7 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-linux-nodes.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-linux-nodes.md @@ -36,15 +36,15 @@ Upgrade kubeadm: {{% tab name="Ubuntu, Debian or HypriotOS" %}} ```shell # replace x in {{< skew currentVersion >}}.x-* with the latest patch version -apt-mark unhold kubeadm && \ -apt-get update && apt-get install -y kubeadm='{{< skew currentVersion >}}.x-*' && \ -apt-mark hold kubeadm +sudo apt-mark unhold kubeadm && \ +sudo apt-get update && sudo apt-get install -y kubeadm='{{< skew currentVersion >}}.x-*' && \ +sudo apt-mark hold kubeadm ``` {{% /tab %}} {{% tab name="CentOS, RHEL or Fedora" %}} ```shell # replace x in {{< skew currentVersion >}}.x-* with the latest patch version -yum install -y kubeadm-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes +sudo yum install -y kubeadm-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes ``` {{% /tab %}} {{< /tabs >}} @@ -75,15 +75,15 @@ kubectl drain --ignore-daemonsets {{% tab name="Ubuntu, Debian or HypriotOS" %}} ```shell # replace x in {{< skew currentVersion >}}.x-* with the latest patch version - apt-mark unhold kubelet kubectl && \ - apt-get update && apt-get install -y kubelet='{{< skew currentVersion >}}.x-*' kubectl='{{< skew currentVersion >}}.x-*' && \ - apt-mark hold kubelet kubectl + sudo apt-mark unhold kubelet kubectl && \ + sudo apt-get update && sudo apt-get install -y kubelet='{{< skew currentVersion >}}.x-*' kubectl='{{< skew currentVersion >}}.x-*' && \ + sudo apt-mark hold kubelet kubectl ``` {{% /tab %}} {{% tab name="CentOS, RHEL or Fedora" %}} ```shell # replace x in {{< skew currentVersion >}}.x-* with the latest patch version - yum install -y kubelet-'{{< skew currentVersion >}}.x-*' kubectl-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes + sudo yum install -y kubelet-'{{< skew currentVersion >}}.x-*' kubectl-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes ``` {{% /tab %}} {{< /tabs >}} diff --git a/content/en/docs/tasks/extend-kubernetes/socks5-proxy-access-api.md b/content/en/docs/tasks/extend-kubernetes/socks5-proxy-access-api.md index 3e607a9cbaa38..549753f54bb33 100644 --- a/content/en/docs/tasks/extend-kubernetes/socks5-proxy-access-api.md +++ b/content/en/docs/tasks/extend-kubernetes/socks5-proxy-access-api.md @@ -39,7 +39,7 @@ Figure 1 represents what you're going to achieve in this task. graph LR; subgraph local[Local client machine] - client([client])-- local
traffic .-> local_ssh[Local SSH
SOCKS5 proxy]; + client([client])-. local
traffic .-> local_ssh[Local SSH
SOCKS5 proxy]; end local_ssh[SSH
SOCKS5
proxy]-- SSH Tunnel -->sshd diff --git a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md index 3f94e249eb2dc..7afaf2fc4cae4 100644 --- a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md +++ b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md @@ -48,14 +48,14 @@ Start RabbitMQ as follows: ```shell # make a Service for the StatefulSet to use -kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq-service.yaml +kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq/rabbitmq-service.yaml ``` ``` service "rabbitmq-service" created ``` ```shell -kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq-statefulset.yaml +kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml ``` ``` statefulset "rabbitmq" created diff --git a/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html index 6a15b53c9eab6..04dadeed81637 100644 --- a/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html @@ -17,23 +17,19 @@
-
-

Objectives

-
    -
  • Scale an app using kubectl.
  • -
-
-
+ +

Objectives

+
    +
  • Scale an app using kubectl.
  • +
+

Scaling an application

Previously we created a Deployment, and then exposed it publicly via a Service. The Deployment created only one Pod for running our application. When traffic increases, we will need to scale the application to keep up with user demand.

If you haven't worked through the earlier sections, start from Using minikube to create a cluster.

-

Scaling is accomplished by changing the number of replicas in a Deployment

- {{< note >}} -

If you are trying this after the previous section, you may have deleted the Service exposing the Deployment. In that case, please expose the Deployment again using the following command:

kubectl expose deployment/kubernetes-bootcamp --type="NodePort" --port 8080

- {{< /note >}} +

Scaling is accomplished by changing the number of replicas in a Deployment.

@@ -47,7 +43,14 @@

Summary:

-
+ +
+
+ {{< note >}} +

If you are trying this after the previous section, you may have deleted the Service exposing the Deployment. In that case, please expose the Deployment again using the following command:

kubectl expose deployment/kubernetes-bootcamp --type="NodePort" --port 8080

+ {{< /note >}} +
+
diff --git a/content/en/releases/patch-releases.md b/content/en/releases/patch-releases.md index 10d29c31813f2..ac275027489c3 100644 --- a/content/en/releases/patch-releases.md +++ b/content/en/releases/patch-releases.md @@ -78,9 +78,9 @@ releases may also occur in between these. | Monthly Patch Release | Cherry Pick Deadline | Target date | | --------------------- | -------------------- | ----------- | -| February 2024 | 2024-02-09 | 2024-02-14 | | March 2024 | 2024-03-08 | 2024-03-13 | | April 2024 | 2024-04-12 | 2024-04-17 | +| May 2024 | 2024-05-10 | 2024-05-15 | ## Detailed Release History for Active Branches diff --git a/content/ja/blog/_posts/2023-01-06-unhealthy-pod-eviction-policy-for-pdb.md b/content/ja/blog/_posts/2023-01-06-unhealthy-pod-eviction-policy-for-pdb.md new file mode 100644 index 0000000000000..eafaf5e8fc0bb --- /dev/null +++ b/content/ja/blog/_posts/2023-01-06-unhealthy-pod-eviction-policy-for-pdb.md @@ -0,0 +1,91 @@ +--- +layout: blog +title: "Kubernetes 1.26: PodDisruptionBudgetによって保護された不健全なPodに対する退避ポリシー" +date: 2023-01-06 +slug: "unhealthy-pod-eviction-policy-for-pdbs" +--- + +**著者:** Filip Křepinský (Red Hat), Morten Torkildsen (Google), Ravi Gudimetla (Apple) + + +アプリケーションの中断がその可用性に影響を与えないようにすることは、簡単な作業ではありません。 +先月リリースされたKubernetes v1.26では、[PodDisruptionBudget](/docs/concepts/workloads/pods/disruptions/#pod-disruption-budgets) (PDB) に +_不健全なPodの退避ポリシー_ を指定して、ノード管理操作中に可用性を維持できるようになりました。 +この記事では、アプリケーション所有者が中断をより柔軟に管理できるようにするために、PDBにどのような変更が導入されたのかを詳しく説明します。 + +## これはどのような問題を解決しますか?{#what-problem-does-this-solve} + +APIによって開始されるPodの退避では、PodDisruptionBudget(PDB)が考慮されます。 +これは、退避によるPodへの[自発的な中断](/ja/docs/concepts/scheduling-eviction/#pod-disruption)の要求は保護されたアプリケーションを中断してはならず、 +PDBの`.status.currentHealthy`が`.status.desiredHealthy`を下回ってはいけないことを意味します。 +[Unhealthy](/docs/tasks/run-application/configure-pdb/#healthiness-of-a-pod)な実行中のPodはPDBステータスにはカウントされませんが、 +これらの退避はアプリケーションが中断されない場合にのみ可能です。 +これにより、中断されたアプリケーションやまだ開始されていないアプリケーションが、退避によって追加のダウンタイムが発生することなく、できるだけ早く可用性を達成できるようになります。 + +残念ながら、これは手動の介入なしでノードをドレインしたいクラスター管理者にとって問題を引き起こします。 +(バグまたは構成ミスにより)Podが`CrashLoopBackOff`状態になっているアプリケーション、または単に準備ができていないPodがあるアプリケーションが誤動作している場合、このタスクはさらに困難になります。 +アプリケーションのすべてのPodが正常でない場合、PDBの違反により退避リクエストは失敗します。その場合、ノードのドレインは進行できません。 + +一方で、次の目的で従来の動作に依存するユーザーもいます。 + +- 基盤となるリソースまたはストレージを保護しているPodの削除によって引き起こされるデータ損失を防止する +- アプリケーションに対して可能な限り最高の可用性を実現する + +Kubernetes 1.26では、PodDisruptionBudget APIに新しい実験的フィールド`.spec.unhealthyPodEvictionPolicy`が導入されました。 +このフィールドを有効にすると、これらの要件の両方をサポートできるようになります。 + +## どのように機能しますか?{#how-does-it-work} + +APIによって開始される退避は、Podの安全な終了をトリガーするプロセスです。 +このプロセスは、APIを直接呼び出すか、`kubectl drain`コマンドを使用するか、クラスター内の他のアクターを使用して開始できます。 +このプロセス中に、十分な数のPodが常にクラスター内で実行されていることを確認するために、すべてのPodの削除が適切なPDBと照合されます。 + +次のポリシーにより、PDBの作成者は、プロセスが不健全なPodを処理する方法をより詳細に制御できるようになります。 + +`IfHealthyBudget`と`AlwaysAllow`の2つのポリシーから選択できます。 + +前者の`IfHealthyBudget`は、従来の動作に従って、デフォルトで得られる最高の可用性を実現します。 +不健全なPodは、アプリケーションが利用可能な最小数の`.status.desiredHealthy`だけPodがある場合にのみ中断できます。 + +PDBの`spec.unhealthyPodEvictionPolicy`フィールドを`AlwaysAllow`に設定することにより、アプリケーションにとってベストエフォートの可用性を選択することになります。 +このポリシーを使用すると、不健全なPodをいつでも削除できます。これにより、クラスターの保守とアップグレードが容易になります。 + +多くの場合、`AlwaysAllow`がより良い選択であると考えられますが、一部の重要なワークロードでは、 +不健全なPodであってもノードドレインやAPIによって開始される他の形式の退避から保護する方が望ましい場合もあります。 + +## どのように利用できますか?{#how-do-i-use-it} + +これはアルファ機能であるため、kube-apiserverに対してコマンドライン引数`--feature-gates=PDBUnhealthyPodEvictionPolicy=true`を指定して +`PDBUnhealthyPodEvictionPolicy`[フィーチャーゲート](/ja/docs/reference/command-line-tools-reference/feature-gates/)を有効にする必要があります。 + +ここに例を示します。クラスターでフィーチャーゲートを有効にし、プレーンなWebサーバーを実行するDeploymentをすでに定義していると仮定します。 +そのDeploymentのPodに`app: nginx`というラベルを付けました。 +回避可能な中断を制限したいと考えており、このアプリにはベストエフォートの可用性で十分であることがわかっています。 +WebサーバーのPodが不健全な場合でも、退避を許可することにしました。 +不健全なPodを排除するための`AlwaysAllow`ポリシーを使用して、このアプリケーションを保護するPDBを作成します。 + +```yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: nginx-pdb +spec: + selector: + matchLabels: + app: nginx + maxUnavailable: 1 + unhealthyPodEvictionPolicy: AlwaysAllow +``` + + +## もっと学ぶには?{#how-can-i-learn-more} + + +- KEPを読んでください: [Unhealthy Pod Eviction Policy for PDBs](https://github.com/kubernetes/enhancements/tree/master/keps/sig-apps/3017-pod-healthy-policy-for-pdb) +- PodDisruptionBudgetについてのドキュメントを読んでください: [Unhealthy Pod Eviction Policy](/docs/tasks/run-application/configure-pdb/#unhealthy-pod-eviction-policy) +- [PodDisruptionBudget](/docs/concepts/workloads/pods/disruptions/#pod-disruption-budgets)、[draining of Nodes](/docs/tasks/administer-cluster/safely-drain-node/)および[evictions](/ja/docs/concepts/scheduling-eviction/api-eviction/)についてKubernetesドキュメントを確認してください + + +## どうすれば参加できますか?{#how-do-i-get-involved} + +フィードバックがある場合は、Slackの[#sig-apps](https://kubernetes.slack.com/archives/C18NZM5K9) チャンネル(必要な場合は https://slack.k8s.io/ にアクセスして招待を受けてください)、またはSIG Appsメーリングリストにご連絡ください。kubernetes-sig-apps@googlegroups.com diff --git a/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/index.md b/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/index.md new file mode 100644 index 0000000000000..9e1cf673b3600 --- /dev/null +++ b/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/index.md @@ -0,0 +1,86 @@ +--- +layout: blog +title: "SIG Releaseスポットライト(リリース・チーム・サブプロジェクト)" +date: 2024-01-15 +slug: sig-release-spotlight-2023 +--- + +**筆者:** Nitish Kumar + +リリース・スペシャル・インタレスト・グループ(SIG Release)は、Kubernetesが4ヶ月ごとに最先端の機能とバグ修正でその刃を研ぐ場所です。Kubernetesのような大きなプロジェクトが、新バージョンをリリースするまでのタイムラインをどのように効率的に管理しているのか、またリリースチームの内部はどのようになっているのか、考えたことはありますか?このような疑問に興味がある方、もっと知りたい方、SIG Releaseの仕事に関わりたい方は、ぜひ読んでみてください! + +SIG ReleaseはKubernetesの開発と進化において重要な役割を担っています。その主な責任は、Kubernetesの新バージョンのリリースプロセスを管理することです。[通常3〜4ヶ月ごと](https://www.kubernetes.dev/resources/release/)の定期的なリリースサイクルで運営されています。このサイクルの間、Kubernetesリリースチームは他のSIGやコントリビューターと密接に連携し、円滑でうまく調整されたリリースを保証します。これには、リリーススケジュールの計画、コードフリーズとテストフェーズの期限の設定、バイナリ、ドキュメント、リリースノートなどのリリース成果物の作成が含まれます。 + +さらに読み進める前に、SIG Releaseにはリリース・エンジニアリングとリリース・チームという2つのサブプロジェクトがあることに注意してください。 + +このブログ記事では、[Nitish Kumar](https://twitter.com/nitishfy)がSIG Releaseのテクニカル・リーダーであるVerónica López (PlanetScale)にインタビューし、Release Teamサブプロジェクトにスポットライトを当て、リリース・プロセスがどのように見えるか、そして参加する方法について説明します。 + +1. **最初の計画から最終的なリリースまで、Kubernetesの新バージョンの典型的なリリースプロセスはどのようなものですか?スムーズなリリースを保証するために使用している特定の方法論やツールはありますか?** + + Kubernetesの新バージョンのリリースプロセスは、十分に構造化されたコミュニティ主導の取り組みです。私たちが従う特定の方法論やツールはありませんが、物事を整理しておくための一連の手順を記載したカレンダーはあります。完全なリリースプロセスは次のようになります: + +- **リリースチームの立ち上げ**: 新しいリリースのさまざまなコンポーネントの管理を担当するKubernetesコミュニティのボランティアを含むリリースチームの結成から始めます。これは通常、前のリリースが終了する前に行われます。チームが結成されると、リリースチームリーダーとブランチマネージャーが通常の成果物のカレンダーを提案する間に、新しいメンバーがオンボードされます。例として、SIG Releaseのリポジトリに作成された[v1.29チーム結成のissue](https://github.com/kubernetes/sig-release/issues/2307)を見てください。コントリビューターがリリースチームの一員になるには、通常リリースシャドウプログラムを通りますが、それがSIG Releaseに参加する唯一の方法というわけではありません。 + +- **初期段階**: 各リリースサイクルの最初の数週間で、SIG ReleaseはKubernetes機能強化提案(KEPs)で概説された新機能や機能強化の進捗を熱心に追跡します。これらの機能のすべてがまったく新しいものではありませんが、多くの場合、アルファ段階から始まり、その後ベータ段階に進み、最終的には安定したステータスに到達します。 + +- **機能の成熟段階**: 通常、コミュニティからのフィードバックを集めるため、実験的な新機能を含むアルファ・リリースを2、3回行い、その後、機能がより安定し、バグの修正が中心となるベータ・リリースを2、3回行います。この段階でのユーザーからのフィードバックは非常に重要で、この段階で発生する可能性のあるバグやその他の懸念に対処するために、追加のベータ・リリースを作成しなければならないこともあります。これがクリアされると、実際のリリースの前にリリース候補(RC)を作成します。このサイクルを通じて、リリースノートやユーザーガイドなどのドキュメントの更新や改善に努めます。 + +- **安定化段階**: 新リリースの数週間前にコードフリーズを実施し、この時点以降は新機能の追加を禁止します。メインリリースと並行して、私たちはKubernetesの古い公式サポートバージョンのパッチを毎月作成し続けているので、Kubernetesバージョンのライフサイクルはその後数ヶ月に及ぶと言えます。完全なリリースサイクル全体を通して、リリースノートやユーザーガイドを含むドキュメントの更新と改善に努めます。 + + {{< figure src="sig-release-overview.png" alt="リリースチームのオンボーディング; 初期段階; 機能の成熟段階; 安定化段階" >}} + +2. **各リリースで安定性と新機能の導入のバランスをどのように扱っていますか?どのような基準で、どの機能をリリースに含めるかを決定するのですか?** + + 終わりのないミッションですが、重要なのは私たちのプロセスとガイドラインを尊重することだと考えています。私たちのガイドラインは、このプロジェクトに豊富な知識と経験をもたらしてくれるコミュニティの何十人ものメンバーから、何時間にもわたって議論とフィードバックを重ねた結果です。もし厳格なガイドラインがなかったら、私たちの注意を必要とするもっと生産的な議題に時間を使う代わりに、同じ議論を何度も繰り返してしまうでしょう。すべての重要な例外は、チームメンバーの大半の合意を必要とするため、品質を確保することができます。 + + 何がリリースになるかを決定するプロセスは、リリースチームがワークフローを引き継ぐずっと前から始まっています。各SIGと経験豊富なコントリビューターが、機能や変更を含めるかどうかを決定します。その後、リリースチームが、それらの貢献がドキュメント、テスト、後方互換性などの要件を満たしていることを確認し、正式に許可します。同様のプロセスは月例パッチリリースのチェリーピックでも行われ、完全なKEPを必要とするPRや、影響を受けるすべてのブランチを含まない修正は受け入れないという厳しいポリシーがあります。 + +3. **Kubernetesの開発とリリース中に遭遇した最も大きな課題は何ですか?これらの課題をどのように克服しましたか?** + + リリースのサイクルごとに、さまざまな課題が発生します。新たに発見されたCVE(Common Vulnerabilities and Exposures)のような土壇場の問題に取り組んだり、内部ツール内のバグを解決したり、以前のリリースの機能によって引き起こされた予期せぬリグレッションに対処したりすることもあります。私たちがしばしば直面するもう1つの障害は、私たちのチームは大規模ですが、私たちのほとんどがボランティアベースで貢献していることです。時には人手が足りないと感じることもありますが、私たちは常に組織化し、うまくやりくりしています。 + +4. **新しい貢献者として、SIG Releaseに参加するための理想的な道はどのようなものでしょうか?誰もが自分のタスクに忙殺されているコミュニティで、効果的に貢献するために適切なタスクを見つけるにはどうすればいいのでしょうか?** + + オープンソースコミュニティへの関わり方は人それぞれです。SIG Releaseは、リリースを出荷できるように自分たちでツールを書くという、自分勝手なチームです。[SIG K8s Infra](https://github.com/kubernetes/community/blob/master/sig-k8s-infra/README.md)のような他のSIGとのコラボレーションも多いのですが、私たちが使用するツールはすべて、コストを削減しつつ、私たちの大規模な技術的ニーズに合わせて作られたものでなければなりません。このため、「単に」リリースを作成するだけでなく、さまざまなタイプのプロジェクトを手伝ってくれるボランティアを常に探しています。 + + 私たちの現在のプロジェクトでは、[Go](https://go.dev/)プログラミング、Kubernetes内部の理解、Linuxパッケージング、サプライチェーンセキュリティ、テクニカルライティング、一般的なオープンソースプロジェクトのメンテナンスなどのスキルが必要です。このスキルセットは、プロジェクトの成長とともに常に進化しています。 + + 理想的な道筋として、私たちはこう提案します: + + - どのように機能が管理されているか、リリースカレンダー、リリースチームの全体的な構造など、コードに慣れる。 + - [Slack](https://communityinviter.com/apps/kubernetes/community)(#sig-release)などのKubernetesコミュニティのコミュニケーションチャンネルに参加する。 + - コミュニティ全員が参加できる[SIG Releaseウィークリーミーティング](https://github.com/kubernetes/community/tree/master/sig-release#meetings)に参加する。これらのミーティングに参加することは、あなたのスキルセットや興味に関連すると思われる進行中のプロジェクトや将来のプロジェクトについて学ぶ素晴らしい方法です。 + + 経験豊富な貢献者は皆、かつてあなたのような立場にあったことを忘れないでください。遠慮せずに質問し、議論に参加し、貢献するための小さな一歩を踏み出しましょう。 + + {{< figure src="sig-release-meetings.png" alt="SIG Releaseに関する質問" >}} + +5. **リリースシャドウプログラムとは何ですか?また、他の様々なSIGに含まれるシャドウプログラムとの違いは何ですか?** + + リリースシャドウプログラムは、Kubernetesのリリースサイクルを通して、リリースチームの経験豊富なメンバーをシャドウイングする機会を提供します。これは、Kubernetesのリリースに必要な、サブチームにまたがるすべての困難な仕事を見るまたとないチャンスです。多くの人は、私たちの仕事は3ヶ月ごとにリリースを切ることだけだと思っていますが、それは氷山の一角にすぎません。 + + 私たちのプログラムは通常、特定のKubernetesリリースサイクルに沿っており、それは約3ヶ月の予測可能なタイムラインを持っています。このプログラムではKubernetesの新機能を書くことはありませんが、リリースチームは新リリースと何千人ものコントリビューターとの最後のステップであるため、高い責任感が求められます。 + +6. **一般的に、次のKubernetesリリースのリリースシャドウ/リリースリードとしてボランティアに参加する人に求める資格は何ですか?** + + どの役割もある程度の技術的能力を必要としますが、Goの実践的な経験やKubernetes APIに精通していることを必要とするものもあれば、技術的な内容を明確かつ簡潔に伝えるのが得意な人を必要とするものもあります。技術的な専門知識よりも、熱意とコミットメントを重視しています。もしあなたが正しい姿勢を持っていて、Kubernetesやリリース・エンジニアリングの仕事を楽しんでいることが伝われば、たとえそれがあなたが余暇を利用して立ち上げた個人的なプロジェクトであったとしても、チームは必ずあなたを指導します。セルフスターターであること、そして質問をすることを恐れないことは、私たちのチームであなたを大きく前進させます。 + +7. **リリースシャドープログラムに何度も不合格になった人に何を勧めますか?** + + 応募し続けることです。 + + リリースサイクルごとに応募者数が飛躍的に増えているため、選ばれるのが難しくなり、落胆することもありますが、不採用になったからといって、あなたに才能がないというわけではないことを知っておいてください。すべての応募者を受け入れることは現実的に不可能です、しかし、ここに私たちが提案する代替案があります。: + + 毎週開催されるKubernetes SIGのリリースミーティングに参加して、自己紹介をし、チームや私たちが取り組んでいるプロジェクトに慣れてください。 + + リリースチームはSIG Releaseに参加する方法の1つですが、私たちは常に手伝ってくれる人を探しています。繰り返しになりますが、一定の技術的な能力に加えて、私たちが最も求めている特性は、信頼できる人であり、それには時間が必要です。 + + {{< figure src="sig-release-motivation.png" alt="SIG Releaseのモチベーション" >}} + +8. **リリースチームがKubernetes v1.28に特に期待している進行中の取り組みや今後の機能について教えてください。これらの進歩は、Kubernetesの長期的なビジョンとどのように整合しているのでしょうか?** + + Kubernetesのパッケージをコミュニティインフラ上でついに公開できることに興奮しています。数年前からやりたいと思っていたことですが、移行する前に整えなければならない技術的な意味合いが多いプロジェクトです。それが終われば、生産性を向上させ、ワークフロー全体をコントロールできるようになります。 + +## 最後に + +さて、この対談はここで終わりですが、学習はこれで終わりではありません。このインタビューが、SIG Releaseが何をしているのか、そしてどのように手助けを始めたらいいのか、ある程度わかっていただけたと思います。重要なこととして、この記事はSIG Releaseの最初のサブプロジェクトであるリリース・チームを取り上げています。次回のSIG Releaseのスポットライトブログでは、Release Engineeringサブプロジェクトにスポットライトを当て、その活動内容や参加方法について紹介します。最後に、SIG Releaseの運営方法についてより深く理解するために、[SIG Release憲章](https://github.com/kubernetes/community/tree/master/sig-release)をご覧ください。 diff --git a/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/sig-release-meetings.png b/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/sig-release-meetings.png new file mode 100644 index 0000000000000..96c01b6a4dceb Binary files /dev/null and b/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/sig-release-meetings.png differ diff --git a/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/sig-release-motivation.png b/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/sig-release-motivation.png new file mode 100644 index 0000000000000..02cb9429ebe30 Binary files /dev/null and b/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/sig-release-motivation.png differ diff --git a/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/sig-release-overview.png b/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/sig-release-overview.png new file mode 100644 index 0000000000000..0361c3e22d181 Binary files /dev/null and b/content/ja/blog/_posts/2024-01-15-SIG-Release-Spotlight/sig-release-overview.png differ diff --git a/content/ja/docs/concepts/workloads/controllers/job.md b/content/ja/docs/concepts/workloads/controllers/job.md index cdf9dc9f4243b..42180f95cc5c3 100644 --- a/content/ja/docs/concepts/workloads/controllers/job.md +++ b/content/ja/docs/concepts/workloads/controllers/job.md @@ -83,7 +83,7 @@ apiVersion: batch/v1 kind: Job metadata: annotations: batch.kubernetes.io/job-tracking: "" - ... + ... creationTimestamp: "2022-11-10T17:53:53Z" generation: 1 labels: @@ -334,7 +334,7 @@ Pod失敗ポリシーまたはPod失敗のバックオフポリシーのいず - `spec.podFailurePolicy.rules`で指定したPod失敗ポリシーのルールが順番に評価されます。あるPodの失敗がルールに一致すると、残りのルールは無視されます。Pod失敗に一致するルールがない場合は、デフォルトの処理が適用されます。 - `spec.podFailurePolicy.rules[*].onExitCodes.containerName`を指定することで、ルールを特定のコンテナに制限することができます。指定しない場合、ルールはすべてのコンテナに適用されます。指定する場合は、Pod テンプレート内のコンテナ名または`initContainer`名のいずれかに一致する必要があります。 - Pod失敗ポリシーが`spec.podFailurePolicy.rules[*].action`にマッチしたときに実行されるアクションを指定できます。指定可能な値は以下のとおりです。 - - `FailJob`: PodのJobを`Failed`としてマークし、実行中の Pod をすべて終了させる必要があることを示します。 + - `FailJob`: PodのJobを`Failed`としてマークし、実行中の Pod をすべて終了させる必要があることを示します。 - `Ignore`: `.spec.backoffLimit`のカウンターは加算されず、代替のPodが作成すべきであることを示します。 - `Count`: Podがデフォルトの方法で処理されるべきであることを示します。`.spec.backoffLimit`のカウンターが加算されます。 @@ -574,7 +574,7 @@ Events: この機能により、Jobが開始する前にスケジューリング命令を更新でき、カスタムキューコントローラーがPodの配置に影響を与えることができるようになります。同時に実際のPodからNodeへの割り当てをkube-schedulerにオフロードする能力を提供します。これは一時停止されたJobの中で、一度も一時停止解除されたことのないJobに対してのみ許可されます。 -JobのPodテンプレートで更新可能なフィールドはnodeAffinity、nodeSelector、tolerations、labelsとannotations、[スケジューリングゲート](/docs/concepts/scheduling-eviction/pod-scheduling-readiness/)です。 +JobのPodテンプレートで更新可能なフィールドはnodeAffinity、nodeSelector、tolerations、labelsとannotations、[スケジューリングゲート](/docs/concepts/scheduling-eviction/pod-scheduling-readiness/)です。 ### 独自のPodセレクターを指定 {#specifying-your-own-pod-selector} @@ -636,7 +636,7 @@ spec: コントロールプレーンは任意のJobに属するPodを追跡し、そのPodがAPIサーバーから削除されたかどうか認識します。そのためJobコントローラーはファイナライザー`batch.kubernetes.io/job-tracking`を持つPodを作成します。コントローラーがファイナライザーを削除するのは、PodがJobステータスに反映された後なので、他のコントローラーやユーザがPodを削除することができます。 -Kubernetes 1.26にアップグレードする前、またはフィーチャーゲート`JobTrackingWithFinalizers`が有効になる前に作成されたJobは、Podファイナライザーを使用せずに追跡されます。Job{{< glossary_tooltip term_id="controller" text="コントローラー" >}}は、クラスタに存在するPodのみに基づいて、`succeeded`Podと`failed`Podのステータスカウンタを更新します。クラスタからPodが削除されると、コントロールプレーンはJobの進捗を見失う可能性があります。 +Kubernetes 1.26にアップグレードする前、またはフィーチャーゲート`JobTrackingWithFinalizers`が有効になる前に作成されたJobは、Podファイナライザーを使用せずに追跡されます。Job{{< glossary_tooltip term_id="controller" text="コントローラー" >}}は、クラスターに存在するPodのみに基づいて、`succeeded`Podと`failed`Podのステータスカウンタを更新します。クラスターからPodが削除されると、コントロールプレーンはJobの進捗を見失う可能性があります。 Jobが`batch.kubernetes.io/job-tracking`というアノテーションを持っているかどうかをチェックすることで、コントロールプレーンがPodファイナライザーを使ってJobを追跡しているかどうかを判断できます。Jobからこのアノテーションを手動で追加したり削除したりしては**いけません**。代わりに、JobがPodファイナライザーを使用して追跡されていることを確認するために、Jobを再作成することができます。 @@ -652,7 +652,7 @@ Jobが`batch.kubernetes.io/job-tracking`というアノテーションを持っ ### 単なるPod {#bare-pods} -Podが動作しているノードが再起動または故障した場合、Podは終了し、再起動されません。しかし、終了したPodを置き換えるため、Jobが新しいPodを作成します。このため、たとえアプリケーションが1つのPodしか必要としない場合でも、単なるPodではなくJobを使用することをお勧めします。 +Podが動作しているノードが再起動または故障した場合、Podは終了し、再起動されません。しかし、終了したPodを置き換えるため、Jobが新しいPodを作成します。このため、たとえアプリケーションが1つのPodしか必要としない場合でも、単なるPodではなくJobを使用することをお勧めします。 ### Replication Controller {#replication-controller} diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 0efeb9daccc43..4c4ecc2b83634 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -122,23 +122,31 @@ kubeadmは`kubelet`や`kubectl`をインストールまたは管理**しない** {{< tabs name="k8s_install" >}} {{% tab name="Ubuntu、Debian、またはHypriotOS" %}} -1. `apt`のパッケージ一覧を更新し、Kubernetesの`apt`リポジトリを利用するのに必要なパッケージをインストールします: +1. `apt`のパッケージ一覧を更新し、Kubernetesの`apt`リポジトリーを利用するのに必要なパッケージをインストールします: ```shell sudo apt-get update + # apt-transport-httpsはダミーパッケージの可能性があります。その場合、そのパッケージはスキップできます sudo apt-get install -y apt-transport-https ca-certificates curl gpg ``` -2. Google Cloudの公開鍵をダウンロードします: +2. Kubernetesパッケージリポジトリーの公開署名キーをダウンロードします。すべてのリポジトリーに同じ署名キーが使用されるため、URL内のバージョンは無視できます: ```shell - curl -fsSL https://dl.k8s.io/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg + # `/etc/apt/keyrings`フォルダーが存在しない場合は、curlコマンドの前に作成する必要があります。下記の備考を参照してください。 + # sudo mkdir -p -m 755 /etc/apt/keyrings + curl -fsSL https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg ``` -3. Kubernetesの`apt`リポジトリを追加します: +{{< note >}} +Debian 12とUbuntu 22.04より古いリリースでは、`/etc/apt/keyrings`フォルダーはデフォルトでは存在しないため、curlコマンドの前に作成する必要があります。 +{{< /note >}} + +3. 適切なKubernetes `apt`リポジトリーを追加します。このリポジトリーには、Kubernetes {{< skew currentVersion >}}用のパッケージのみがあることに注意してください。他のKubernetesマイナーバージョンの場合は、目的のマイナーバージョンに一致するようにURL内のKubernetesマイナーバージョンを変更する必要があります(インストールする予定のKubernetesバージョンのドキュメントも読んでください): ```shell - echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list + # これにより、/etc/apt/sources.list.d/kubernetes.listにある既存の設定が上書きされます + echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list ``` 4. `apt`のパッケージ一覧を更新し、kubelet、kubeadm、kubectlをインストールします。そしてバージョンを固定します: @@ -148,10 +156,6 @@ kubeadmは`kubelet`や`kubectl`をインストールまたは管理**しない** sudo apt-get install -y kubelet kubeadm kubectl sudo apt-mark hold kubelet kubeadm kubectl ``` -{{< note >}} -Debian 12やUbuntu 22.04より古いリリースでは、`/etc/apt/keyrings`はデフォルトでは存在しません。 -必要に応じてこのディレクトリを作成し、誰でも読み取り可能で、管理者のみ書き込み可能にすることができます。 -{{< /note >}} {{% /tab %}} {{% tab name="CentOS、RHEL、またはFedora" %}} diff --git a/content/ja/docs/setup/release/_index.md b/content/ja/docs/setup/release/_index.md deleted file mode 100644 index 8c812f72def53..0000000000000 --- a/content/ja/docs/setup/release/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "リリースノートおよびバージョンスキュー" -weight: 10 ---- diff --git a/content/ja/docs/tasks/tools/_index.md b/content/ja/docs/tasks/tools/_index.md index 0d78f6ccd82e1..21bc70b1404e8 100644 --- a/content/ja/docs/tasks/tools/_index.md +++ b/content/ja/docs/tasks/tools/_index.md @@ -1,6 +1,6 @@ --- title: "ツールのインストール" -description: Kubernetesのツールをローカルのコンピュータ上にセットアップします。 +description: Kubernetesのツールをローカルのコンピューター上にセットアップします。 weight: 10 no_list: true --- diff --git a/content/ja/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/ja/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html index 42e373f3cb786..1097da6f408e6 100644 --- a/content/ja/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html +++ b/content/ja/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -27,7 +27,7 @@

目標

Kubernetesクラスター

- Kubernetesは、単一のユニットとして機能するように接続された、可用性の高いコンピュータのクラスターをまとめあげます。Kubernetesの抽象化により、コンテナ化されたアプリケーションを個々のマシンに特に結び付けることなくクラスターにデプロイできます。この新しいデプロイモデルを利用するには、アプリケーションを個々のホストから切り離す方法でアプリケーションをパッケージ化(つまり、コンテナ化)する必要があります。コンテナ化されたアプリケーションは、アプリケーションがホストに深く統合されたパッケージとして特定のマシンに直接インストールされていた従来のデプロイモデルよりも柔軟で、より迅速に利用可能です。Kubernetesはより効率的な方法で、クラスター全体のアプリケーションコンテナの配布とスケジューリングを自動化します。Kubernetesはオープンソースのプラットフォームであり、プロダクションレディです。 + Kubernetesは、単一のユニットとして機能するように接続された、可用性の高いコンピューターのクラスターをまとめあげます。Kubernetesの抽象化により、コンテナ化されたアプリケーションを個々のマシンに特に結び付けることなくクラスターにデプロイできます。この新しいデプロイモデルを利用するには、アプリケーションを個々のホストから切り離す方法でアプリケーションをパッケージ化(つまり、コンテナ化)する必要があります。コンテナ化されたアプリケーションは、アプリケーションがホストに深く統合されたパッケージとして特定のマシンに直接インストールされていた従来のデプロイモデルよりも柔軟で、より迅速に利用可能です。Kubernetesはより効率的な方法で、クラスター全体のアプリケーションコンテナの配布とスケジューリングを自動化します。Kubernetesはオープンソースのプラットフォームであり、プロダクションレディです。

Kubernetesクラスターは以下の2種類のリソースで構成されています:

    @@ -47,7 +47,7 @@

    まとめ:

- Kubernetesは、コンピュータクラスター内およびコンピュータクラスター間でのアプリケーションコンテナの配置(スケジューリング)および実行を調整する、プロダクショングレードのオープンソースプラットフォームです。 + Kubernetesは、コンピュータークラスター内およびコンピュータークラスター間でのアプリケーションコンテナの配置(スケジューリング)および実行を調整する、プロダクショングレードのオープンソースプラットフォームです。

diff --git a/content/ja/docs/setup/release/version-skew-policy.md b/content/ja/releases/version-skew-policy.md similarity index 98% rename from content/ja/docs/setup/release/version-skew-policy.md rename to content/ja/releases/version-skew-policy.md index bd1875aa41942..2d85ae15f72d9 100644 --- a/content/ja/docs/setup/release/version-skew-policy.md +++ b/content/ja/releases/version-skew-policy.md @@ -1,7 +1,8 @@ --- -title: Kubernetesバージョンとバージョンスキューサポートポリシー -content_type: concept -weight: 30 +title: バージョンスキューポリシー +type: docs +description: > + さまざまなKubernetesコンポーネント間でサポートされる最大のバージョンスキュー。 --- diff --git a/content/zh-cn/docs/concepts/containers/images.md b/content/zh-cn/docs/concepts/containers/images.md index ac3b849b5bed7..846cbb0517ab2 100644 --- a/content/zh-cn/docs/concepts/containers/images.md +++ b/content/zh-cn/docs/concepts/containers/images.md @@ -300,6 +300,31 @@ which is 300 seconds (5 minutes). Kubernetes 会增加每次尝试之间的延迟,直到达到编译限制,即 300 秒(5 分钟)。 + +### 基于运行时类的镜像拉取 {#image-pull-per-runtime-class} + +{{< feature-state for_k8s_version="v1.29" state="alpha" >}} + + +Kubernetes 包含了根据 Pod 的 RuntimeClass 来执行镜像拉取的 Alpha 支持。 + + +如果你启用了 `RuntimeClassInImageCriApi` [特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/), +kubelet 会通过一个元组(镜像名称,运行时处理程序)而不仅仅是镜像名称或镜像摘要来引用容器镜像。 +你的{{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}} +可能会根据选定的运行时处理程序调整其行为。 +基于运行时类来拉取镜像对于基于 VM 的容器(如 Windows Hyper-V 容器)会有帮助。 + diff --git a/content/zh-cn/docs/concepts/security/_index.md b/content/zh-cn/docs/concepts/security/_index.md index 6043801a77077..9c475a157ad75 100644 --- a/content/zh-cn/docs/concepts/security/_index.md +++ b/content/zh-cn/docs/concepts/security/_index.md @@ -2,4 +2,253 @@ title: "安全" weight: 85 description: 确保云原生工作负载安全的一组概念。 +simple_list: true --- + + + + +Kubernetes 文档的这一部分内容的旨在引导你学习如何更安全地运行工作负载, +以及维护 Kubernetes 集群的基本安全性。 + +Kubernetes 基于云原生架构,并借鉴了 +{{< glossary_tooltip text="CNCF" term_id="cncf" >}} 有关云原生信息安全良好实践的建议。 + + +请阅读[云原生安全和 Kubernetes](/zh-cn/docs/concepts/security/cloud-native-security/), +了解有关如何保护集群及其上运行的应用程序的更广泛背景信息。 + + +## Kubernetes 安全机制 {#security-mechanisms} + +Kubernetes 包含多个 API 和安全组件, +以及定义[策略](#policies)的方法,这些策略可以作为你的信息安全管理的一部分。 + + +### 控制平面保护 + +任何 Kubernetes 集群的一个关键安全机制是[控制对 Kubernetes API 的访问](/zh-cn/docs/concepts/security/controlling-access)。 + + +Kubernetes 希望你配置并使用 TLS, +以便在控制平面内以及控制平面与其客户端之间提供[传输中的数据加密](/zh-cn/docs/tasks/tls/managing-tls-in-a-cluster/)。 +你还可以为 Kubernetes 控制平面中存储的数据启用静态加密; +这与对你自己的工作负载数据使用静态加密不同,后者可能也是一个好主意。 + + +### Secret + +[Secret](/zh-cn/docs/concepts/configuration/secret/) API +为需要保密的配置值提供基本保护。 + + +### 工具负载保护 + +实施 [Pod 安全标准](/zh-cn/docs/concepts/security/pod-security-standards/)以确保 +Pod 及其容器得到适当隔离。如果需要,你还可以使用 +[RuntimeClass](/zh-cn/docs/concepts/containers/runtime-class) 来配置自定义隔离。 + + +[网络策略(NetworkPolicy)](/zh-cn/docs/concepts/services-networking/network-policies/) +可让控制 Pod 之间或 Pod 与集群外部网络之间的网络流量。 + + +### 审计 + +Kubernetes [审计日志记录](/zh-cn/docs/tasks/debug/debug-cluster/audit/)提供了一组与安全相关、 +按时间顺序排列的记录,记录了集群中的操作序列。 +集群审计用户、使用 Kubernetes API 的应用程序以及控制平面本身生成的活动。 + + +## 云提供商安全 + +{{% thirdparty-content vendor="true" %}} + +如果你在自己的硬件或不同的云平台上运行 Kubernetes 集群,请参阅对应云平台的文档以了解安全最佳实践。 +以下是一些流行云提供商的安全文档的链接: + + +{{< table caption="Cloud provider security" >}} + +IaaS 提供商 | 链接 | +-------------------- | ------------ | +阿里云 | https://www.alibabacloud.com/trust-center | +亚马逊网络服务 | https://aws.amazon.com/security | +谷歌云平台 | https://cloud.google.com/security | +华为云 | https://www.huaweicloud.com/intl/en-us/securecenter/overallsafety | +IBM 云 | https://www.ibm.com/cloud/security | +微软 Azure | https://docs.microsoft.com/en-us/azure/security/azure-security | +Oracle 云基础设施| https://www.oracle.com/security | +VMware vSphere | https://www.vmware.com/security/hardening-guides | + +{{< /table >}} + + +## 策略 + +你可以使用 Kubernetes 原生机制定义安全策略,例如 +[NetworkPolicy](/zh-cn/docs/concepts/services-networking/network-policies/)(对网络数据包过滤的声明式控制) +或 [ValidatingAdmisisonPolicy](/zh-cn/docs/reference/access -authn-authz/validating-admission-policy/) +(对某人可以使用 Kubernetes API 进行哪些更改的声明性限制)。 + + +你还可以依赖 Kubernetes 周边更广泛的生态系统的策略实现。 +Kubernetes 提供了扩展机制,让这些生态系统项目在源代码审查、 +容器镜像审批、API 访问控制、网络等方面实施自己的策略控制。 + + +有关策略机制和 Kubernetes 的更多信息,请阅读[策略](/zh-cn/docs/concepts/policy/)。 + +## {{% heading "whatsnext" %}} + + +了解相关的 Kubernetes 安全主题: + +* [保护集群](/zh-cn/docs/tasks/administer-cluster/secure-a-cluster/) +* Kubernetes 中的[已知漏洞](/zh-cn/docs/reference/issues-security/official-cve-feed/)(以及更多信息的链接) +* [传输中的数据加密](/zh-cn/docs/tasks/tls/managing-tls-in-a-cluster/)(针对控制平面) +* [静态数据加密](/zh-cn/docs/tasks/administer-cluster/encrypt-data/) +* [控制对 Kubernetes API 的访问](/zh-cn/docs/concepts/security/controlling-access) +* Pod 的 [网络策略](/zh-cn/docs/concepts/services-networking/network-policies/) +* [Kubernetes 中的 Secret](/zh-cn/docs/concepts/configuration/secret/) +* [Pod 安全标准](/zh-cn/docs/concepts/security/pod-security-standards/) +* [运行时类](/zh-cn/docs/concepts/containers/runtime-class) + + +了解上下文: + + + +* [云原生安全和 Kubernetes](/zh-cn/docs/concepts/security/cloud-native-security/) + + +获取认证: + +* [Kubernetes 安全专家认证](https://training.linuxfoundation.org/certification/certified-kubernetes-security-specialist/)和官方培训课程。 + +阅读本节的更多内容: + diff --git a/content/zh-cn/docs/concepts/security/overview.md b/content/zh-cn/docs/concepts/security/overview.md deleted file mode 100644 index 7ad06477d7950..0000000000000 --- a/content/zh-cn/docs/concepts/security/overview.md +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: 云原生安全概述 -description: > - 在云原生安全的背景下思考 Kubernetes 安全模型。 -content_type: concept -weight: 1 ---- - - - - -本概述定义了一个模型,用于在 Cloud Native 安全性上下文中考虑 Kubernetes 安全性。 - - -{{< warning >}} -此容器安全模型只提供建议,而不是经过验证的信息安全策略。 -{{< /warning >}} - - - - -## 云原生安全的 4 个 C {#the-4c-s-of-cloud-native-security} - -你可以分层去考虑安全性,云原生安全的 4 个 C 分别是云(Cloud)、集群(Cluster)、容器(Container)和代码(Code)。 - - -{{< note >}} -这种分层方法增强了[深度防护方法](https://en.wikipedia.org/wiki/Defense_in_depth_(computing))在安全性方面的 -防御能力,该方法被广泛认为是保护软件系统的最佳实践。 - -{{< /note >}} - -{{< figure src="/images/docs/4c.png" title="云原生安全的 4C" >}} - - -云原生安全模型的每一层都是基于下一个最外层,代码层受益于强大的基础安全层(云、集群、容器)。 -你无法通过在代码层解决安全问题来为基础层中糟糕的安全标准提供保护。 - - -## 云 {#cloud} - - -在许多方面,云(或者位于同一位置的服务器,或者是公司数据中心)是 Kubernetes 集群中的 -[可信计算基](https://en.wikipedia.org/wiki/Trusted_computing_base)。 -如果云层容易受到攻击(或者被配置成了易受攻击的方式),就不能保证在此基础之上构建的组件是安全的。 -每个云提供商都会提出安全建议,以在其环境中安全地运行工作负载。 - - -### 云提供商安全性 {#cloud-provider-security} - -如果你是在你自己的硬件或者其他不同的云提供商上运行 Kubernetes 集群, -请查阅相关文档来获取最好的安全实践。 - -下面是一些比较流行的云提供商的安全性文档链接: - -{{< table caption="云提供商安全" >}} - -IaaS 提供商 | 链接 | --------------------- | ------------ | -Alibaba Cloud | https://www.alibabacloud.com/trust-center | -Amazon Web Services | https://aws.amazon.com/security | -Google Cloud Platform | https://cloud.google.com/security | -Huawei Cloud | https://www.huaweicloud.com/intl/zh-cn/securecenter/overallsafety | -IBM Cloud | https://www.ibm.com/cloud/security | -Microsoft Azure | https://docs.microsoft.com/en-us/azure/security/azure-security | -Oracle Cloud Infrastructure | https://www.oracle.com/security | -VMWare VSphere | https://www.vmware.com/security/hardening-guides | - -{{< /table >}} - - -### 基础设施安全 {#infrastructure-security} - -关于在 Kubernetes 集群中保护你的基础设施的建议: - -{{< table caption="基础设施安全" >}} - -Kubernetes 基础架构关注领域 | 建议 | ---------------------------------------------- | -------------- | -通过网络访问 API 服务(控制平面)|所有对 Kubernetes 控制平面的访问不允许在 Internet 上公开,同时应由网络访问控制列表控制,该列表包含管理集群所需的 IP 地址集。| -通过网络访问 Node(节点)| 节点应配置为 _仅能_ 从控制平面上通过指定端口来接受(通过网络访问控制列表)连接,以及接受 NodePort 和 LoadBalancer 类型的 Kubernetes 服务连接。如果可能的话,这些节点不应完全暴露在公共互联网上。| -Kubernetes 访问云提供商的 API | 每个云提供商都需要向 Kubernetes 控制平面和节点授予不同的权限集。为集群提供云提供商访问权限时,最好遵循对需要管理的资源的[最小特权原则](https://en.wikipedia.org/wiki/Principle_of_least_privilege)。[Kops 文档](https://github.com/kubernetes/kops/blob/master/docs/iam_roles.md#iam-roles)提供有关 IAM 策略和角色的信息。| -访问 etcd | 对 etcd(Kubernetes 的数据存储)的访问应仅限于控制平面。根据配置情况,你应该尝试通过 TLS 来使用 etcd。更多信息可以在 [etcd 文档](https://github.com/etcd-io/etcd/tree/master/Documentation)中找到。| -etcd 加密 | 在所有可能的情况下,最好对所有存储进行静态数据加密,并且由于 etcd 拥有整个集群的状态(包括机密信息),因此其磁盘更应该进行静态数据加密。| - -{{< /table >}} - - -## 集群 {#cluster} - -保护 Kubernetes 有两个方面需要注意: - -* 保护可配置的集群组件 -* 保护在集群中运行的应用程序 - - -### 集群组件 {#cluster-components} - -如果想要保护集群免受意外或恶意的访问,采取良好的信息管理实践,请阅读并遵循有关[保护集群](/zh-cn/docs/tasks/administer-cluster/securing-a-cluster/)的建议。 - - -### 集群中的组件(你的应用) {#cluster-applications} - -根据你的应用程序的受攻击面,你可能需要关注安全性的特定面,比如: -如果你正在运行中的一个服务(A 服务)在其他资源链中很重要,并且所运行的另一工作负载(服务 B) -容易受到资源枯竭的攻击,则如果你不限制服务 B 的资源的话,损害服务 A 的风险就会很高。 -下表列出了安全性关注的领域和建议,用以保护 Kubernetes 中运行的工作负载: - -工作负载安全性关注领域 | 建议 | ------------------------------- | --------------------- | -RBAC 授权(访问 Kubernetes API) | https://kubernetes.io/zh-cn/docs/reference/access-authn-authz/rbac/ -认证方式 | https://kubernetes.io/zh-cn/docs/concepts/security/controlling-access/ -应用程序 Secret 管理 (并在 etcd 中对其进行静态数据加密) | https://kubernetes.io/zh-cn/docs/concepts/configuration/secret/
https://kubernetes.io/zh-cn/docs/tasks/administer-cluster/encrypt-data/ -确保 Pod 符合定义的 Pod 安全标准 | https://kubernetes.io/zh-cn/docs/concepts/security/pod-security-standards/#policy-instantiation -服务质量(和集群资源管理)| https://kubernetes.io/zh-cn/docs/tasks/configure-pod-container/quality-service-pod/ -网络策略 | https://kubernetes.io/zh-cn/docs/concepts/services-networking/network-policies/ -Kubernetes Ingress 的 TLS 支持 | https://kubernetes.io/zh-cn/docs/concepts/services-networking/ingress/#tls - - -## 容器 {#container} - -容器安全性不在本指南的探讨范围内。下面是一些探索此主题的建议和连接: - -容器关注领域 | 建议 | ------------------------------- | -------------- | -容器漏洞扫描和操作系统依赖安全性 | 作为镜像构建的一部分,你应该扫描你的容器里的已知漏洞。 -镜像签名和执行 | 对容器镜像进行签名,以维护对容器内容的信任。 -禁止特权用户 | 构建容器时,请查阅文档以了解如何在具有最低操作系统特权级别的容器内部创建用户,以实现容器的目标。 -使用带有较强隔离能力的容器运行时 | 选择提供较强隔离能力的[容器运行时类](/zh-cn/docs/concepts/containers/runtime-class/)。 - -## 代码 {#code} - -应用程序代码是你最能够控制的主要攻击面之一,虽然保护应用程序代码不在 Kubernetes 安全主题范围内,但以下是保护应用程序代码的建议: - - -### 代码安全性 {#code-security} - -{{< table caption="代码安全" >}} - -代码关注领域 | 建议 | --------------------------| -------------- | -仅通过 TLS 访问 | 如果你的代码需要通过 TCP 通信,请提前与客户端执行 TLS 握手。除少数情况外,请加密传输中的所有内容。更进一步,加密服务之间的网络流量是一个好主意。这可以通过被称为双向 TLS 或 [mTLS](https://en.wikipedia.org/wiki/Mutual_authentication) 的过程来完成,该过程对两个证书持有服务之间的通信执行双向验证。 | -限制通信端口范围 | 此建议可能有点不言自明,但是在任何可能的情况下,你都只应公开服务上对于通信或度量收集绝对必要的端口。| -第三方依赖性安全 | 最好定期扫描应用程序的第三方库以了解已知的安全漏洞。每种编程语言都有一个自动执行此检查的工具。 | -静态代码分析 | 大多数语言都提供给了一种方法,来分析代码段中是否存在潜在的不安全的编码实践。只要有可能,你都应该使用自动工具执行检查,该工具可以扫描代码库以查找常见的安全错误,一些工具可以在以下连接中找到: https://owasp.org/www-community/Source_Code_Analysis_Tools | -动态探测攻击 | 你可以对服务运行一些自动化工具,来尝试一些众所周知的服务攻击。这些攻击包括 SQL 注入、CSRF 和 XSS。[OWASP Zed Attack](https://www.zaproxy.org/) 代理工具是最受欢迎的动态分析工具之一。 | - -{{< /table >}} - -## {{% heading "whatsnext" %}} - - -学习了解相关的 Kubernetes 安全主题: - -* [Pod 安全标准](/zh-cn/docs/concepts/security/pod-security-standards/) -* [Pod 的网络策略](/zh-cn/docs/concepts/services-networking/network-policies/) -* [控制对 Kubernetes API 的访问](/zh-cn/docs/concepts/security/controlling-access/) -* [保护你的集群](/zh-cn/docs/tasks/administer-cluster/securing-a-cluster/) -* 为控制面[加密通信中的数据](/zh-cn/docs/tasks/tls/managing-tls-in-a-cluster/) -* [加密静止状态的数据](/zh-cn/docs/tasks/administer-cluster/encrypt-data/) -* [Kubernetes 中的 Secret](/zh-cn/docs/concepts/configuration/secret/) -* [运行时类](/zh-cn/docs/concepts/containers/runtime-class) - diff --git a/content/zh-cn/docs/concepts/workloads/pods/init-containers.md b/content/zh-cn/docs/concepts/workloads/pods/init-containers.md index 8bd8972d3ca9b..6102518d6df92 100644 --- a/content/zh-cn/docs/concepts/workloads/pods/init-containers.md +++ b/content/zh-cn/docs/concepts/workloads/pods/init-containers.md @@ -28,6 +28,14 @@ array (which describes app containers). 你可以在 Pod 的规约中与用来描述应用容器的 `containers` 数组平行的位置指定 Init 容器。 + +在 Kubernetes 中,[边车容器](/zh-cn/docs/concepts/workloads/pods/sidecar-containers/) +是在主应用容器之前启动并**持续运行**的容器。本文介绍 Init 容器:在 Pod 初始化期间完成运行的容器。 + ### 与普通容器的不同之处 {#differences-from-regular-containers} @@ -104,13 +104,60 @@ Init 容器支持应用容器的全部字段和特性,包括资源限制、 然而,Init 容器对资源请求和限制的处理稍有不同, 在下面[容器内的资源共享](#resource-sharing-within-containers)节有说明。 -同时 Init 容器不支持 `lifecycle`、`livenessProbe`、`readinessProbe` 和 `startupProbe`, -因为它们必须在 Pod 就绪之前运行完成。 + +常规的 Init 容器(即不包括边车容器)不支持 `lifecycle`、`livenessProbe`、`readinessProbe` 或 +`startupProbe` 字段。Init 容器必须在 Pod 准备就绪之前完成运行;而边车容器在 Pod 的生命周期内继续运行, +它支持一些探针。有关边车容器的细节请参阅[边车容器](/zh-cn/docs/concepts/workloads/pods/sidecar-containers/)。 + 如果为一个 Pod 指定了多个 Init 容器,这些容器会按顺序逐个运行。 每个 Init 容器必须运行成功,下一个才能够运行。当所有的 Init 容器运行完成时, Kubernetes 才会为 Pod 初始化应用容器并像平常一样运行。 + +### 与边车容器的不同之处 {#differences-from-sidecar-containers} + +Init 容器在主应用容器启动之前运行并完成其任务。 +与[边车容器](/zh-cn/docs/concepts/workloads/pods/sidecar-containers)不同, +Init 容器不会持续与主容器一起运行。 + + +Init 容器按顺序完成运行,等到所有 Init 容器成功完成之后,主容器才会启动。 + +Init 容器不支持 `lifecycle`、`livenessProbe`、`readinessProbe` 或 `startupProbe`, +而边车容器支持所有这些[探针](/zh-cn/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe)以控制其生命周期。 + + +Init 容器与主应用容器共享资源(CPU、内存、网络),但不直接与主应用容器进行交互。 +不过这些容器可以使用共享卷进行数据交换。 + -#### 边车容器 API {#api-for-sidecar-containers} - -{{< feature-state for_k8s_version="v1.28" state="alpha" >}} - - -Kubernetes 自 1.28 版本起引入了一个名为 `SidecarContainers` 的 Alpha 特性门控, -允许你为 Init 容器指定独立于 Pod 和其他 Init 容器的 `restartPolicy`。 -你还可以添加容器[探针](/zh-cn/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe)来控制 -Init 容器的生命周期。 - - -如果 Init 容器被创建时 `restartPolicy` 设置为 `Always`,则 Init 容器将启动并在整个 Pod -的生命期内保持运行,这对于运行与主应用容器分离的支持服务非常有用。 - -如果为该 Init 容器指定了 `readinessProbe`,则其结果将用于确定 Pod 的 `ready` 状态。 - - -由于这些容器以 Init 容器的形式定义,所以它们具有与其他 Init 容器相同的按序执行和顺序保证优势, -从而允许使用这些容器与其他 Init 容器混合在一起构造复杂的 Pod 初始化流程。 - -与常规的 Init 容器相比,只要 kubelet 将边车风格的 Init 容器的 `started` 容器状态设置为 true, -边车风格的 Init 容器会继续运行,下一个 Init 容器可以开始启动。 -到达该状态的前提是,要么需要容器中有进程正在运行且未定义启动探针,要么其 `startupProbe` 的结果是成功的。 - - -此特性可用于以更稳健的方式实现边车容器模式,这是因为如果某个边车容器失败,kubelet 总会重新启动它。 - -以下是一个具有两个容器的 Deployment 示例,其中一个是边车: - -{{% code_sample language="yaml" file="application/deployment-sidecar.yaml" %}} - - -此特性也可用于运行带有边车的 Job,因为在主容器完成后,边车容器不会阻止 Job 完成。 - -以下是一个具有两个容器的 Job 示例,其中一个是边车: - -{{% code_sample language="yaml" file="application/job/job-sidecar.yaml" %}} - #### 容器内的资源共享 {#resource-sharing-within-containers} -在给定的 Init 容器执行顺序下,资源使用适用于如下规则: +在给定的 Init、边车和应用容器执行顺序下,资源使用适用于如下规则: 配额和限制适用于有效 Pod 的请求和限制值。 -Pod 级别的 cgroups 是基于有效 Pod 的请求和限制值,和调度器相同。 + +Pod 级别的控制组(Cgroup)是基于 Pod 的有效 request 和 limit,与调度器相同。 +这部分内容也出现在[边车容器](/zh-cn/docs/concepts/workloads/pods/sidecar-containers/)页面上。 +如果你正在编辑这部分内容,请同时修改两处。 +{{< /comment >}} + -* 阅读[创建包含 Init 容器的 Pod](/zh-cn/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container) -* 学习如何[调试 Init 容器](/zh-cn/docs/tasks/debug/debug-application/debug-init-containers/) -* 阅读 [kubelet](/zh-cn/docs/reference/command-line-tools-reference/kubelet/) 和 +进一步了解以下内容: + +* [创建包含 Init 容器的 Pod](/zh-cn/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container) +* [调试 Init 容器](/zh-cn/docs/tasks/debug/debug-application/debug-init-containers/) +* [kubelet](/zh-cn/docs/reference/command-line-tools-reference/kubelet/) 和 [kubectl](/zh-cn/docs/reference/kubectl/) 的概述。 -* 了解探针的[类型](/zh-cn/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe): +* [探针类型](/zh-cn/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe): 存活态探针、就绪态探针、启动探针。 +* [边车容器](/zh-cn/docs/concepts/workloads/pods/sidecar-containers)。 diff --git a/content/zh-cn/docs/concepts/workloads/pods/pod-lifecycle.md b/content/zh-cn/docs/concepts/workloads/pods/pod-lifecycle.md index ba1c6d8f14490..e533fbe698a91 100644 --- a/content/zh-cn/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/zh-cn/docs/concepts/workloads/pods/pod-lifecycle.md @@ -320,7 +320,7 @@ Pod 级别的 `restartPolicy` 字段:在 Kubernetes 中,Sidecar 被定义为 When the kubelet is handling container restarts according to the configured restart policy, that only applies to restarts that make replacement containers inside the same Pod and running on the same node. After containers in a Pod exit, the kubelet -restarts them with an exponential back-off delay (10s, 20s,40s, …), that is capped at +restarts them with an exponential back-off delay (10s, 20s, 40s, …), that is capped at five minutes. Once a container has executed for 10 minutes without any problems, the kubelet resets the restart backoff timer for that container. [Sidecar containers and Pod lifecycle](/docs/concepts/workloads/pods/sidecar-containers/#sidecar-containers-and-pod-lifecycle) diff --git a/content/zh-cn/docs/reference/command-line-tools-reference/feature-gates/readonly-apidata-volumes.md b/content/zh-cn/docs/reference/command-line-tools-reference/feature-gates/readonly-apidata-volumes.md new file mode 100644 index 0000000000000..98b0348d9eea0 --- /dev/null +++ b/content/zh-cn/docs/reference/command-line-tools-reference/feature-gates/readonly-apidata-volumes.md @@ -0,0 +1,36 @@ +--- +# Removed from Kubernetes +title: ReadOnlyAPIDataVolumes +content_type: feature_gate + +_build: + list: never + render: false + +stages: + - stage: beta + defaultValue: true + fromVersion: "1.8" + toVersion: "1.9" + - stage: stable + fromVersion: "1.10" + toVersion: "1.10" + +removed: true +--- + + +请参阅以只读方式挂载的 [`configMap`](/zh-cn/docs/concepts/storage/volumes/#configmap)、 +[`secret`](/zh-cn/docs/concepts/storage/volumes/#secret)、 +[`downwardAPI`](/zh-cn/docs/concepts/storage/volumes/#downwardapi) +和 [`projected`](/zh-cn/docs/concepts/storage/volumes/#projected) 卷。 + +自 Kubernetes v1.10 起,这些卷类型始终是只读的,无法选择其它模式。 diff --git a/content/zh-cn/docs/reference/config-api/kubeadm-config.v1beta3.md b/content/zh-cn/docs/reference/config-api/kubeadm-config.v1beta3.md index 2dda92ebd1170..a70e41f17e8ab 100644 --- a/content/zh-cn/docs/reference/config-api/kubeadm-config.v1beta3.md +++ b/content/zh-cn/docs/reference/config-api/kubeadm-config.v1beta3.md @@ -496,7 +496,7 @@ for, so other administrators can know its purpose. expires
-meta/v1.Time +meta/v1.Time

ClusterConfiguration 包含一个 kubeadm 集群的集群范围配置信息。

@@ -818,10 +818,12 @@ interface and use that, but in case that process fails you may set the desired v

certificateKey 用来设置一个密钥,该密钥将对 uploadcerts init -阶段上传到集群中某 Secret 内的密钥和证书加密。

+阶段上传到集群中某 Secret 内的密钥和证书加密。 +证书密钥是十六进制编码的字符串,是长度为 32 字节的 AES 密钥。

skipPhases
@@ -1074,7 +1076,7 @@ APIServer 包含集群中 API 服务器部署所必需的设置。

BootstrapTokenDiscovery 用来设置基于引导令牌的服务发现选项。

@@ -1529,7 +1531,7 @@ HostPathMount contains elements describing volumes that are mounted from the hos pathType
-core/v1.HostPathType +core/v1.HostPathType

certificateKey 是在添加新的控制面节点时用来解密所下载的 -Secret 中的证书的密钥。对应的加密密钥在 InitConfiguration 结构中。

+Secret 中的证书的密钥。对应的加密密钥在 InitConfiguration 结构中。 +证书密钥是十六进制编码的字符串,是长度为 32 字节的 AES 密钥。

@@ -1645,7 +1649,7 @@ Secret 中的证书的密钥。对应的加密密钥在 InitConfiguration 结构 - [Etcd](#kubeadm-k8s-io-v1beta3-Etcd)

LocalEtcd 描述的是 kubeadm 要使用的本地 etcd 集群。

@@ -1729,7 +1733,7 @@ signing certificate. - [ClusterConfiguration](#kubeadm-k8s-io-v1beta3-ClusterConfiguration)

Networking 中包含描述集群网络配置的元素。

@@ -1784,7 +1788,7 @@ Networking contains elements describing cluster's networking configuration

NodeRegistrationOptions 包含向集群中注册新的控制面或节点所需要的信息; 节点注册可能通过 "kubeadm init" 或 "kubeadm join" 完成。

@@ -1823,7 +1827,7 @@ This information will be annotated to the Node API object, for later re-use. taints [必需]
-[]core/v1.Taint +[]core/v1.Taint @@ -573,7 +573,7 @@ kubectl label pods my-pod new-label=awesome # 添加标签 kubectl label pods my-pod new-label- # 移除标签 kubectl label pods my-pod new-label=new-value --overwrite # 覆盖现有的值 kubectl annotate pods my-pod icon-url=http://goo.gl/XXBTWq # 添加注解 -kubectl annotate pods my-pod icon- # 移除注解 +kubectl annotate pods my-pod icon-url- # 移除注解 kubectl autoscale deployment foo --min=2 --max=10 # 对 "foo" Deployment 自动扩缩容 ``` diff --git a/content/zh-cn/docs/reference/kubernetes-api/other-resources/validating-admission-policy-binding-list-v1alpha1.md b/content/zh-cn/docs/reference/kubernetes-api/other-resources/validating-admission-policy-binding-list-v1beta1.md similarity index 55% rename from content/zh-cn/docs/reference/kubernetes-api/other-resources/validating-admission-policy-binding-list-v1alpha1.md rename to content/zh-cn/docs/reference/kubernetes-api/other-resources/validating-admission-policy-binding-list-v1beta1.md index dffd13e539838..1c9700de6b271 100644 --- a/content/zh-cn/docs/reference/kubernetes-api/other-resources/validating-admission-policy-binding-list-v1alpha1.md +++ b/content/zh-cn/docs/reference/kubernetes-api/other-resources/validating-admission-policy-binding-list-v1beta1.md @@ -8,18 +8,9 @@ description: "" title: "ValidatingAdmissionPolicyBindingList v1beta1" weight: 1 --- - `apiVersion: admissionregistration.k8s.io/v1beta1` `import "k8s.io/api/admissionregistration/v1beta1"` + + diff --git a/content/zh-cn/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/zh-cn/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index e02fa6c56f125..bbac127f41312 100644 --- a/content/zh-cn/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/zh-cn/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -620,9 +620,15 @@ See a list of add-ons that implement the [Kubernetes 网络模型](/zh-cn/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-network-model)的附加组件列表。 +请参阅[安装插件](/zh-cn/docs/concepts/cluster-administration/addons/#networking-and-network-policy)页面, +了解 Kubernetes 支持的网络插件的非详尽列表。 + 你可以使用以下命令在控制平面节点或具有 kubeconfig 凭据的节点上安装 Pod 网络附加组件: ```bash @@ -707,6 +713,19 @@ scheduler will then be able to schedule Pods everywhere. 污点的节点(包括控制平面节点)上移除该污点。 这意味着调度程序将能够在任何地方调度 Pod。 + +此外,你可以执行以下命令从控制平面节点中删除 +[`node.kubernetes.io/exclude-from-external-load-balancers`](/zh-cn/docs/reference/labels-annotations-taints/#node-kubernetes-io-exclude-from-external-load-balancers) +标签,这会将其从后端服务器列表中排除: + +```bash +kubectl label nodes --all node.kubernetes.io/exclude-from-external-load-balancers- +``` + @@ -1006,64 +1025,6 @@ options. 有关此子命令及其选项的更多信息,请参见 [`kubeadm reset`](/zh-cn/docs/reference/setup-tools/kubeadm/kubeadm-reset/) 参考文档。 - - - -## 下一步 {#whats-next} - - -* 使用 [Sonobuoy](https://github.com/heptio/sonobuoy) 验证集群是否正常运行。 -* 有关使用 kubeadm 升级集群的详细信息, - 请参阅[升级 kubeadm 集群](/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)。 -* 在 [kubeadm 参考文档](/zh-cn/docs/reference/setup-tools/kubeadm/)中了解有关 `kubeadm` 进阶用法的信息。 -* 了解有关 Kubernetes [概念](/zh-cn/docs/concepts/)和 [`kubectl`](/zh-cn/docs/reference/kubectl/)的更多信息。 -* 有关 Pod 网络附加组件的更多列表,请参见[集群网络](/zh-cn/docs/concepts/cluster-administration/networking/)页面。 -* 请参阅[附加组件列表](/zh-cn/docs/concepts/cluster-administration/addons/)以探索其他附加组件, - 包括用于 Kubernetes 集群的日志记录、监视、网络策略、可视化和控制的工具。 -* 配置集群如何处理集群事件的日志以及在 Pod 中运行的应用程序。 - 有关所涉及内容的概述,请参见[日志架构](/zh-cn/docs/concepts/cluster-administration/logging/)。 - - -### 反馈 {#feedback} - - -* 有关漏洞,访问 [kubeadm GitHub issue tracker](https://github.com/kubernetes/kubeadm/issues) -* 有关支持,访问 - [#kubeadm](https://kubernetes.slack.com/messages/kubeadm/) Slack 频道 -* 常规的 SIG Cluster Lifecycle 开发 Slack 频道: - [#sig-cluster-lifecycle](https://kubernetes.slack.com/messages/sig-cluster-lifecycle/) -* SIG Cluster Lifecycle 的 [SIG 资料](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle#readme) -* SIG Cluster Lifecycle 邮件列表: - [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) - @@ -1260,3 +1221,61 @@ If you are running into difficulties with kubeadm, please consult our --> 如果你在使用 kubeadm 时遇到困难, 请查阅我们的[故障排除文档](/zh-cn/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/)。 + + + + +## 下一步 {#whats-next} + + +* 使用 [Sonobuoy](https://github.com/heptio/sonobuoy) 验证集群是否正常运行。 +* 有关使用 kubeadm 升级集群的详细信息, + 请参阅[升级 kubeadm 集群](/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)。 +* 在 [kubeadm 参考文档](/zh-cn/docs/reference/setup-tools/kubeadm/)中了解有关 `kubeadm` 进阶用法的信息。 +* 了解有关 Kubernetes [概念](/zh-cn/docs/concepts/)和 [`kubectl`](/zh-cn/docs/reference/kubectl/)的更多信息。 +* 有关 Pod 网络附加组件的更多列表,请参见[集群网络](/zh-cn/docs/concepts/cluster-administration/networking/)页面。 +* 请参阅[附加组件列表](/zh-cn/docs/concepts/cluster-administration/addons/)以探索其他附加组件, + 包括用于 Kubernetes 集群的日志记录、监视、网络策略、可视化和控制的工具。 +* 配置集群如何处理集群事件的日志以及在 Pod 中运行的应用程序。 + 有关所涉及内容的概述,请参见[日志架构](/zh-cn/docs/concepts/cluster-administration/logging/)。 + + +### 反馈 {#feedback} + + +* 有关漏洞,访问 [kubeadm GitHub issue tracker](https://github.com/kubernetes/kubeadm/issues) +* 有关支持,访问 + [#kubeadm](https://kubernetes.slack.com/messages/kubeadm/) Slack 频道 +* 常规的 SIG Cluster Lifecycle 开发 Slack 频道: + [#sig-cluster-lifecycle](https://kubernetes.slack.com/messages/sig-cluster-lifecycle/) +* SIG Cluster Lifecycle 的 [SIG 资料](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle#readme) +* SIG Cluster Lifecycle 邮件列表: + [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) diff --git a/content/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index 254ba6914690b..c066100703dcd 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -82,7 +82,7 @@ The upgrade workflow at high level is the following: they could be running CoreDNS Pods or other critical workloads. For more information see [Draining nodes](/docs/tasks/administer-cluster/safely-drain-node/). - The Kubernetes project recommends that you match your kubelet and kubeadm versions. - You can instead use an a version of kubelet that is older than kubeadm, provided it is within the + You can instead use a version of kubelet that is older than kubeadm, provided it is within the range of supported versions. For more details, please visit [kubeadm's skew against the kubelet](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#kubeadm-s-skew-against-the-kubelet). - All containers are restarted after upgrade, because the container spec hash value is changed. @@ -139,28 +139,37 @@ Find the latest patch release for Kubernetes {{< skew currentVersion >}} using t 使用操作系统的包管理器找到最新的补丁版本 Kubernetes {{< skew currentVersion >}}: {{< tabs name="k8s_install_versions" >}} -{{% tab name="Ubuntu, Debian or HypriotOS" %}} +{{% tab name="Ubuntu、Debian 或 HypriotOS" %}} + ```shell # 在列表中查找最新的 {{< skew currentVersion >}} 版本 # 它看起来应该是 {{< skew currentVersion >}}.x-*,其中 x 是最新的补丁版本 -apt update -apt-cache madison kubeadm +sudo apt update +sudo apt-cache madison kubeadm ``` {{% /tab %}} -{{% tab name="CentOS, RHEL or Fedora" %}} +{{% tab name="CentOS、RHEL 或 Fedora" %}} + ```shell # 在列表中查找最新的 {{< skew currentVersion >}} 版本 # 它看起来应该是 {{< skew currentVersion >}}.x-*,其中 x 是最新的补丁版本 -yum list --showduplicates kubeadm --disableexcludes=kubernetes +sudo yum list --showduplicates kubeadm --disableexcludes=kubernetes ``` {{% /tab %}} @@ -195,27 +204,35 @@ Pick a control plane node that you wish to upgrade first. It must have the `/etc 1. 升级 kubeadm: {{< tabs name="k8s_install_kubeadm_first_cp" >}} - {{% tab name="Ubuntu, Debian or HypriotOS" %}} + {{% tab name="Ubuntu、Debian 或 HypriotOS" %}} ```shell # 用最新的补丁版本号替换 {{< skew currentVersion >}}.x-* 中的 x - apt-mark unhold kubeadm && \ - apt-get update && apt-get install -y kubeadm='{{< skew currentVersion >}}.x-*' && \ - apt-mark hold kubeadm + sudo apt-mark unhold kubeadm && \ + sudo apt-get update && sudo apt-get install -y kubeadm='{{< skew currentVersion >}}.x-*' && \ + sudo apt-mark hold kubeadm ``` {{% /tab %}} - {{% tab name="CentOS, RHEL or Fedora" %}} + {{% tab name="CentOS、RHEL 或 Fedora" %}} ```shell # 用最新的补丁版本号替换 {{< skew currentVersion >}}.x-* 中的 x - yum install -y kubeadm-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes + sudo yum install -y kubeadm-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes ``` {{% /tab %}} @@ -236,7 +253,7 @@ Pick a control plane node that you wish to upgrade first. It must have the `/etc 3. 验证升级计划: ```shell - kubeadm upgrade plan + sudo kubeadm upgrade plan ``` ```shell # 用最新的补丁版本替换 {{< skew currentVersion >}}.x-* 中的 x - apt-mark unhold kubelet kubectl && \ - apt-get update && apt-get install -y kubelet='{{< skew currentVersion >}}.x-*' kubectl='{{< skew currentVersion >}}.x-*' && \ - apt-mark hold kubelet kubectl + sudo apt-mark unhold kubelet kubectl && \ + sudo apt-get update && sudo apt-get install -y kubelet='{{< skew currentVersion >}}.x-*' kubectl='{{< skew currentVersion >}}.x-*' && \ + sudo apt-mark hold kubelet kubectl ``` {{% /tab %}} - {{% tab name="CentOS, RHEL or Fedora" %}} + {{% tab name="CentOS、RHEL 或 Fedora" %}} ```shell # 用最新的补丁版本号替换 {{< skew currentVersion >}}.x-* 中的 x - yum install -y kubelet-'{{< skew currentVersion >}}.x-*' kubectl-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes + sudo yum install -y kubelet-'{{< skew currentVersion >}}.x-*' kubectl-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes ``` {{% /tab %}} @@ -517,9 +542,9 @@ This command is idempotent and eventually makes sure that the actual state is th 此命令是幂等的,并最终确保实际状态是你声明的期望状态。 -要从故障状态恢复,你还可以运行 `kubeadm upgrade apply --force` 而无需更改集群正在运行的版本。 +要从故障状态恢复,你还可以运行 `sudo kubeadm upgrade apply --force` 而无需更改集群正在运行的版本。 ```shell # 将 {{< skew currentVersion >}}.x-* 中的 x 替换为最新的补丁版本 -apt-mark unhold kubeadm && \ -apt-get update && apt-get install -y kubeadm='{{< skew currentVersion >}}.x-*' && \ -apt-mark hold kubeadm +sudo apt-mark unhold kubeadm && \ +sudo apt-get update && sudo apt-get install -y kubeadm='{{< skew currentVersion >}}.x-*' && \ +sudo apt-mark hold kubeadm ``` {{% /tab %}} {{% tab name="CentOS、RHEL 或 Fedora" %}} ```shell # 将 {{< skew currentVersion >}}.x-* 中的 x 替换为最新的补丁版本 -yum install -y kubeadm-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes +sudo yum install -y kubeadm-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes ``` {{% /tab %}} {{< /tabs >}} @@ -105,8 +113,11 @@ Prepare the node for maintenance by marking it unschedulable and evicting the wo 将节点标记为不可调度并驱逐所有负载,准备节点的维护: ```shell # 在控制平面节点上执行此命令 @@ -126,22 +137,30 @@ kubectl drain --ignore-daemonsets {{< tabs name="k8s_kubelet_and_kubectl" >}} {{% tab name="Ubuntu、Debian 或 HypriotOS" %}} ```shell # 将 {{< skew currentVersion >}}.x-* 中的 x 替换为最新的补丁版本 - apt-mark unhold kubelet kubectl && \ - apt-get update && apt-get install -y kubelet='{{< skew currentVersion >}}.x-*' kubectl='{{< skew currentVersion >}}.x-*' && \ - apt-mark hold kubelet kubectl + sudo apt-mark unhold kubelet kubectl && \ + sudo apt-get update && sudo apt-get install -y kubelet='{{< skew currentVersion >}}.x-*' kubectl='{{< skew currentVersion >}}.x-*' && \ + sudo apt-mark hold kubelet kubectl ``` {{% /tab %}} {{% tab name="CentOS、RHEL 或 Fedora" %}} ```shell # 将 {{< skew currentVersion >}}.x-* 中的 x 替换为最新的补丁版本 - yum install -y kubelet-'{{< skew currentVersion >}}.x-*' kubectl-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes + sudo yum install -y kubelet-'{{< skew currentVersion >}}.x-*' kubectl-'{{< skew currentVersion >}}.x-*' --disableexcludes=kubernetes ``` {{% /tab %}} {{< /tabs >}} @@ -166,8 +185,11 @@ Bring the node back online by marking it schedulable: 通过将节点标记为可调度,让节点重新上线: ```shell # 在控制平面节点上执行此命令 diff --git a/content/zh-cn/docs/tasks/extend-kubernetes/socks5-proxy-access-api.md b/content/zh-cn/docs/tasks/extend-kubernetes/socks5-proxy-access-api.md index 9d25d1f87fb01..872b391e621cf 100644 --- a/content/zh-cn/docs/tasks/extend-kubernetes/socks5-proxy-access-api.md +++ b/content/zh-cn/docs/tasks/extend-kubernetes/socks5-proxy-access-api.md @@ -64,7 +64,7 @@ Figure 1 represents what you're going to achieve in this task. graph LR; subgraph local[Local client machine] - client([client])-- local
traffic .-> local_ssh[Local SSH
SOCKS5 proxy]; + client([client])-. local
traffic .-> local_ssh[Local SSH
SOCKS5 proxy]; end local_ssh[SSH
SOCKS5
proxy]-- SSH Tunnel --\>sshd @@ -86,9 +86,9 @@ graph LR; graph LR; subgraph local[本地客户端机器] - client([客户端])-- 本地
流量.-> local_ssh[本地 SSH
SOCKS5 代理]; + client([客户端])-. 本地
流量.-> local_ssh[本地 SSH
SOCKS5 代理]; end - ocal_ssh[SSH
SOCKS5
代理]-- SSH 隧道 -->sshd + local_ssh[SSH
SOCKS5
代理]-- SSH 隧道 -->sshd subgraph remote[远程服务器] sshd[SSH
服务器]-- 本地流量 -->service1; diff --git a/content/zh-cn/docs/tasks/job/coarse-parallel-processing-work-queue.md b/content/zh-cn/docs/tasks/job/coarse-parallel-processing-work-queue.md index 80a193750d5cd..f7fa4c95c47bb 100644 --- a/content/zh-cn/docs/tasks/job/coarse-parallel-processing-work-queue.md +++ b/content/zh-cn/docs/tasks/job/coarse-parallel-processing-work-queue.md @@ -92,14 +92,14 @@ Start RabbitMQ as follows: --> ```shell # 为 StatefulSet 创建一个 Service 来使用 -kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq-service.yaml +kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq/rabbitmq-service.yaml ``` ``` service "rabbitmq-service" created ``` ```shell -kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq-statefulset.yaml +kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml ``` ``` diff --git a/data/i18n/en/en.toml b/data/i18n/en/en.toml index 4ada028a4737a..31a1bc5bd335f 100644 --- a/data/i18n/en/en.toml +++ b/data/i18n/en/en.toml @@ -151,6 +151,12 @@ other = "Until" [feature_state] other = "FEATURE STATE:" +[feature_state_kubernetes_label] +other = "Kubernetes" + +[feature_state_feature_gate_tooltip] +other = "Feature Gate:" + [feedback_heading] other = "Feedback" @@ -541,10 +547,13 @@ other="""Third party content advice""" [thirdparty_message_single_item] other = """🛇 This item links to a third party project or product that is not part of Kubernetes itself.
More information""" - [thirdparty_message_disclaimer] other = """

Items on this page refer to third party products or projects that provide functionality required by Kubernetes. The Kubernetes project authors aren't responsible for those third-party products or projects. See the CNCF website guidelines for more details.

You should read the content guide before proposing a change that adds an extra third-party link.

""" +[thirdparty_message_vendor] +other = """Items on this page refer to vendors external to Kubernetes. The Kubernetes project authors aren't responsible for those third-party products or projects. To add a vendor, product or project to this list, read the content guide before submitting a change. More information.""" + + [ui_search_placeholder] other = "Search this site" diff --git a/data/releases/schedule.yaml b/data/releases/schedule.yaml index b8a6ce0f6292d..70f9c4c3d622e 100644 --- a/data/releases/schedule.yaml +++ b/data/releases/schedule.yaml @@ -5,12 +5,15 @@ schedules: - release: 1.29 releaseDate: 2023-12-13 next: - release: 1.29.2 - cherryPickDeadline: 2024-02-09 - targetDate: 2024-02-14 + release: 1.29.3 + cherryPickDeadline: 2024-03-08 + targetDate: 2024-03-12 maintenanceModeStartDate: 2024-12-28 endOfLifeDate: 2025-02-28 previousPatches: + - release: 1.29.2 + cherryPickDeadline: 2024-02-09 + targetDate: 2024-02-14 - release: 1.29.1 cherryPickDeadline: 2024-01-12 targetDate: 2024-01-17 @@ -19,12 +22,15 @@ schedules: - release: 1.28 releaseDate: 2023-08-15 next: - release: 1.28.7 - cherryPickDeadline: 2024-02-09 - targetDate: 2024-02-14 + release: 1.28.8 + cherryPickDeadline: 2024-03-08 + targetDate: 2024-03-12 maintenanceModeStartDate: 2024-08-28 endOfLifeDate: 2024-10-28 previousPatches: + - release: 1.28.7 + cherryPickDeadline: 2024-02-09 + targetDate: 2024-02-14 - release: 1.28.6 cherryPickDeadline: 2023-01-12 targetDate: 2024-01-17 @@ -54,10 +60,13 @@ schedules: maintenanceModeStartDate: 2024-04-28 endOfLifeDate: 2024-06-28 next: - release: 1.27.11 - cherryPickDeadline: 2024-02-09 - targetDate: 2024-02-14 + release: 1.27.12 + cherryPickDeadline: 2024-03-08 + targetDate: 2024-03-13 previousPatches: + - release: 1.27.11 + cherryPickDeadline: 2024-02-09 + targetDate: 2024-02-14 - release: 1.27.10 cherryPickDeadline: 2023-01-12 targetDate: 2024-01-17 @@ -100,10 +109,11 @@ schedules: maintenanceModeStartDate: 2023-12-28 endOfLifeDate: 2024-02-28 next: - release: 1.26.14 - cherryPickDeadline: 2024-02-09 - targetDate: 2024-02-14 + release: Not Planned previousPatches: + - release: 1.26.14 + cherryPickDeadline: 2024-02-09 + targetDate: 2024-02-14 - release: 1.26.13 cherryPickDeadline: 2023-01-12 targetDate: 2024-01-17 diff --git a/layouts/docs/glossary.html b/layouts/docs/glossary.html index 78b1c348dd230..74772bb3e5ee6 100644 --- a/layouts/docs/glossary.html +++ b/layouts/docs/glossary.html @@ -43,7 +43,7 @@

{{ .Title }}

{{ .Title }}
{{ with .Params.aka }} - {{ T "layouts_docs_glossary_aka" }}:{{ delimit . ", " }} + {{ T "layouts_docs_glossary_aka" }}: {{ delimit . ", " }}
{{ end }} {{ .Summary }} [+] diff --git a/layouts/shortcodes/feature-state.html b/layouts/shortcodes/feature-state.html index 828241f50ee34..822f68d822c08 100644 --- a/layouts/shortcodes/feature-state.html +++ b/layouts/shortcodes/feature-state.html @@ -2,10 +2,56 @@ {{ $state := .Get "state" }} {{ $for_k8s_version := .Get "for_k8s_version" | default (.Page.Param "version")}} {{ $is_valid := strings.Contains $valid_states $state }} -{{ if not $is_valid }} -{{ errorf "%q is not a valid feature-state, use one of %q" $state $valid_states }} -{{ else }} -
- {{ T "feature_state" }} Kubernetes {{ $for_k8s_version }} [{{ $state }}] -
+{{ $feature_gate_name := .Get "feature_gate_name" }} + + +{{ if not $feature_gate_name }} + {{ if not $is_valid }} + {{ errorf "%q is not a valid feature-state, use one of %q" $state $valid_states }} + {{ else }} +
+ {{ T "feature_state" }} {{T "feature_state_kubernetes_label" }} {{ $for_k8s_version }} [{{ $state }}] +
+ {{ end }} + +{{- else -}} + + + + {{- $featureGateDescriptionFilesPath := "/docs/reference/command-line-tools-reference/feature-gates" -}} + + + {{- with index (where .Site.Sites "Language.Lang" "eq" "en") 0 -}} + {{- with .GetPage $featureGateDescriptionFilesPath -}} + + + {{- $sortedFeatureGates := sort (.Resources.ByType "page") -}} + {{- $featureGateFound := false -}} + + + {{- range $featureGateFile := $sortedFeatureGates -}} + {{- $featureGateNameFromFile := $featureGateFile.Params.Title -}} + + {{- if eq $featureGateNameFromFile $feature_gate_name -}} + + {{- $currentStage := index $featureGateFile.Params.stages (sub (len $featureGateFile.Params.stages) 1) -}} + {{- with $currentStage -}} + + +
+ {{ T "feature_state" }} {{T "feature_state_kubernetes_label" }} v{{ .fromVersion }} [{{ .stage }}] +
+ + {{- $featureGateFound = true -}} + {{- end -}} + {{- end -}} + {{- end -}} + + + {{- if not $featureGateFound -}} + {{- errorf "Invalid feature gate: '%s' is not recognized or lacks a matching description file in '%s'" $feature_gate_name (print "en" $featureGateDescriptionFilesPath) -}} + {{- end -}} + + {{- end -}} + {{- end -}} {{ end }} diff --git a/layouts/shortcodes/thirdparty-content.html b/layouts/shortcodes/thirdparty-content.html index 36fbdef13e04b..a252e93c3dd38 100644 --- a/layouts/shortcodes/thirdparty-content.html +++ b/layouts/shortcodes/thirdparty-content.html @@ -1,8 +1,13 @@ {{- $single := .Get "single" | default "false" -}} +{{- $vendor_message := .Get "vendor" | default "false" -}}