diff --git a/bootstrap/kubeadm/config/manager/manager.yaml b/bootstrap/kubeadm/config/manager/manager.yaml index 8f5805b24425..b006f1b00886 100644 --- a/bootstrap/kubeadm/config/manager/manager.yaml +++ b/bootstrap/kubeadm/config/manager/manager.yaml @@ -41,3 +41,5 @@ spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 6e4d23e7ad32..9a928eca182b 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -42,3 +42,5 @@ spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane diff --git a/controlplane/kubeadm/config/manager/manager.yaml b/controlplane/kubeadm/config/manager/manager.yaml index 99b372b44e30..baa32cd401af 100644 --- a/controlplane/kubeadm/config/manager/manager.yaml +++ b/controlplane/kubeadm/config/manager/manager.yaml @@ -41,3 +41,5 @@ spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 4a29c10a1b39..011fef2775eb 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -1516,7 +1516,7 @@ func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *control node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Labels: map[string]string{"node-role.kubernetes.io/master": ""}, + Labels: map[string]string{"node-role.kubernetes.io/control-plane": ""}, }, } diff --git a/controlplane/kubeadm/internal/workload_cluster.go b/controlplane/kubeadm/internal/workload_cluster.go index f386a8be10da..6b18651cb3ab 100644 --- a/controlplane/kubeadm/internal/workload_cluster.go +++ b/controlplane/kubeadm/internal/workload_cluster.go @@ -36,6 +36,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/retry" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -51,13 +52,14 @@ import ( ) const ( - kubeProxyKey = "kube-proxy" - kubeadmConfigKey = "kubeadm-config" - kubeletConfigKey = "kubelet" - cgroupDriverKey = "cgroupDriver" - labelNodeRoleControlPlane = "node-role.kubernetes.io/master" - clusterStatusKey = "ClusterStatus" - clusterConfigurationKey = "ClusterConfiguration" + kubeProxyKey = "kube-proxy" + kubeadmConfigKey = "kubeadm-config" + kubeletConfigKey = "kubelet" + cgroupDriverKey = "cgroupDriver" + labelNodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200 + labelNodeRoleControlPlane = "node-role.kubernetes.io/control-plane" + clusterStatusKey = "ClusterStatus" + clusterConfigurationKey = "ClusterConfiguration" ) var ( @@ -121,14 +123,31 @@ type Workload struct { var _ WorkloadCluster = &Workload{} func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) { - nodes := &corev1.NodeList{} - labels := map[string]string{ - labelNodeRoleControlPlane: "", - } - if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil { - return nil, err + controlPlaneNodes := &corev1.NodeList{} + controlPlaneNodeNames := sets.NewString() + + for _, label := range []string{labelNodeRoleOldControlPlane, labelNodeRoleControlPlane} { + nodes := &corev1.NodeList{} + if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(map[string]string{ + label: "", + })); err != nil { + return nil, err + } + + for i := range nodes.Items { + node := nodes.Items[i] + + // Continue if we already added that node. + if controlPlaneNodeNames.Has(node.Name) { + continue + } + + controlPlaneNodeNames.Insert(node.Name) + controlPlaneNodes.Items = append(controlPlaneNodes.Items, node) + } } - return nodes, nil + + return controlPlaneNodes, nil } func (w *Workload) getConfigMap(ctx context.Context, configMap ctrlclient.ObjectKey) (*corev1.ConfigMap, error) { diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go index 2b44f0d90549..5f44f7e131c9 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go @@ -980,6 +980,9 @@ func fakeNode(name string, options ...fakeNodeOption) *corev1.Node { p := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: name, + Labels: map[string]string{ + labelNodeRoleControlPlane: "", + }, }, } for _, opt := range options { diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index d1035302e5c9..704e91096629 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -38,6 +38,78 @@ import ( "sigs.k8s.io/cluster-api/util/yaml" ) +func TestGetControlPlaneNodes(t *testing.T) { + tests := []struct { + name string + nodes []corev1.Node + expectedNodes []string + }{ + { + name: "Return control plane nodes", + nodes: []corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "control-plane-node-with-old-label", + Labels: map[string]string{ + labelNodeRoleOldControlPlane: "", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "control-plane-node-with-both-labels", + Labels: map[string]string{ + labelNodeRoleOldControlPlane: "", + labelNodeRoleControlPlane: "", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "control-plane-node-with-new-label", + Labels: map[string]string{ + labelNodeRoleControlPlane: "", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "worker-node", + Labels: map[string]string{}, + }, + }, + }, + expectedNodes: []string{ + "control-plane-node-with-both-labels", + "control-plane-node-with-old-label", + "control-plane-node-with-new-label", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + objs := []client.Object{} + for i := range tt.nodes { + objs = append(objs, &tt.nodes[i]) + } + fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build() + + w := &Workload{ + Client: fakeClient, + } + nodes, err := w.getControlPlaneNodes(ctx) + g.Expect(err).ToNot(HaveOccurred()) + var actualNodes []string + for _, n := range nodes.Items { + actualNodes = append(actualNodes, n.Name) + } + g.Expect(actualNodes).To(Equal(tt.expectedNodes)) + }) + } +} + func TestUpdateKubeProxyImageInfo(t *testing.T) { tests := []struct { name string diff --git a/docs/book/src/user/troubleshooting.md b/docs/book/src/user/troubleshooting.md index 69f9e0013dc0..b3be3f88a122 100644 --- a/docs/book/src/user/troubleshooting.md +++ b/docs/book/src/user/troubleshooting.md @@ -35,7 +35,10 @@ kubectl label nodes node-role.kubernetes.io/worker="" For convenience, here is an example one-liner to do this post installation ``` +# Kubernetes 1.19 (kubeadm 1.19 sets only the node-role.kubernetes.io/master label) kubectl get nodes --no-headers -l '!node-role.kubernetes.io/master' -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | xargs -I{} kubectl label node {} node-role.kubernetes.io/worker='' +# Kubernetes >= 1.20 (kubeadm >= 1.20 sets the node-role.kubernetes.io/control-plane label) +kubectl get nodes --no-headers -l '!node-role.kubernetes.io/control-plane' -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | xargs -I{} kubectl label node {} node-role.kubernetes.io/worker='' ``` ## Cluster API with Docker diff --git a/test/framework/deployment_helpers.go b/test/framework/deployment_helpers.go index 704d3c0e0c8f..405a6d57edfb 100644 --- a/test/framework/deployment_helpers.go +++ b/test/framework/deployment_helpers.go @@ -34,6 +34,7 @@ import ( "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/kubernetes" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -42,6 +43,11 @@ import ( "sigs.k8s.io/cluster-api/test/framework/internal/log" ) +const ( + nodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200 + nodeRoleControlPlane = "node-role.kubernetes.io/control-plane" +) + // WaitForDeploymentsAvailableInput is the input for WaitForDeploymentsAvailable. type WaitForDeploymentsAvailableInput struct { Getter Getter @@ -306,10 +312,23 @@ func DeployUnevictablePod(ctx context.Context, input DeployUnevictablePodInput) }, } if input.ControlPlane != nil { - workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/master": ""} + serverVersion, err := workloadClient.ServerVersion() + Expect(err).ToNot(HaveOccurred()) + + // Use the control-plane label for Kubernetes version >= v1.20.0. + if utilversion.MustParseGeneric(serverVersion.String()).AtLeast(utilversion.MustParseGeneric("v1.20.0")) { + workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{nodeRoleControlPlane: ""} + } else { + workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{nodeRoleOldControlPlane: ""} + } + workloadDeployment.Spec.Template.Spec.Tolerations = []corev1.Toleration{ { - Key: "node-role.kubernetes.io/master", + Key: nodeRoleOldControlPlane, + Effect: "NoSchedule", + }, + { + Key: nodeRoleControlPlane, Effect: "NoSchedule", }, } diff --git a/test/infrastructure/docker/config/manager/manager.yaml b/test/infrastructure/docker/config/manager/manager.yaml index 4454c900483b..fa2488f99735 100644 --- a/test/infrastructure/docker/config/manager/manager.yaml +++ b/test/infrastructure/docker/config/manager/manager.yaml @@ -44,6 +44,8 @@ spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane volumes: - name: dockersock hostPath: