From f0fbf7a87a430825b95d2003694ab1c15d7cf8ec Mon Sep 17 00:00:00 2001 From: cedric lamoriniere Date: Thu, 24 Oct 2019 11:43:47 +0200 Subject: [PATCH] =?UTF-8?q?[cluster-autoscaler]=20Allow=20=E2=80=9Ccustom?= =?UTF-8?q?=E2=80=9D=20DaemonSet=20pods?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Create `utils/pod` package: pod kind detection functions. * Update DaemonSet Pod detection logic: introduce the annotation `cluster-autoscaler.kubernetes.io/daemonset-pod` to declare a Pod as a DaemonSet Pod. * Update `simulator` package to use the new `utils/pod` package function. * Cleanup `getRequiredPodsForNode()` function. Signed-off-by: cedric lamoriniere --- cluster-autoscaler/simulator/cluster.go | 17 +- cluster-autoscaler/simulator/cluster_test.go | 6 +- cluster-autoscaler/simulator/drain.go | 2 - cluster-autoscaler/simulator/nodes.go | 55 ++--- cluster-autoscaler/simulator/nodes_test.go | 119 ++++++++++- cluster-autoscaler/utils/drain/drain.go | 40 ++-- cluster-autoscaler/utils/drain/drain_test.go | 24 ++- cluster-autoscaler/utils/pod/pod.go | 62 ++++++ cluster-autoscaler/utils/pod/pod_test.go | 199 +++++++++++++++++++ 9 files changed, 445 insertions(+), 79 deletions(-) create mode 100644 cluster-autoscaler/utils/pod/pod.go create mode 100644 cluster-autoscaler/utils/pod/pod_test.go diff --git a/cluster-autoscaler/simulator/cluster.go b/cluster-autoscaler/simulator/cluster.go index 0b9b290439c..0e82e29584e 100644 --- a/cluster-autoscaler/simulator/cluster.go +++ b/cluster-autoscaler/simulator/cluster.go @@ -22,18 +22,18 @@ import ( "math/rand" "time" - "k8s.io/autoscaler/cluster-autoscaler/utils/drain" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/glogx" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" + pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" scheduler_util "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" "k8s.io/autoscaler/cluster-autoscaler/utils/tpu" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" apiv1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/klog" @@ -201,11 +201,11 @@ func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulernodeinf podsRequest := resource.MustParse("0") for _, pod := range nodeInfo.Pods() { // factor daemonset pods out of the utilization calculations - if skipDaemonSetPods && isDaemonSet(pod) { + if skipDaemonSetPods && pod_util.IsDaemonSetPod(pod) { continue } // factor mirror pods out of the utilization calculations - if skipMirrorPods && drain.IsMirrorPod(pod) { + if skipMirrorPods && pod_util.IsMirrorPod(pod) { continue } for _, container := range pod.Spec.Containers { @@ -318,12 +318,3 @@ func shuffleNodes(nodes []*apiv1.Node) []*apiv1.Node { } return result } - -func isDaemonSet(pod *apiv1.Pod) bool { - for _, ownerReference := range pod.ObjectMeta.OwnerReferences { - if ownerReference.Kind == "DaemonSet" { - return true - } - } - return false -} diff --git a/cluster-autoscaler/simulator/cluster_test.go b/cluster-autoscaler/simulator/cluster_test.go index 34a6697fb57..985f2a0a232 100644 --- a/cluster-autoscaler/simulator/cluster_test.go +++ b/cluster-autoscaler/simulator/cluster_test.go @@ -51,7 +51,11 @@ func TestUtilization(t *testing.T) { daemonSetPod3 := BuildTestPod("p3", 100, 200000) daemonSetPod3.OwnerReferences = GenerateOwnerReferences("ds", "DaemonSet", "apps/v1", "") - nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod, pod2, daemonSetPod3) + daemonSetPod4 := BuildTestPod("p4", 100, 200000) + daemonSetPod4.OwnerReferences = GenerateOwnerReferences("ds", "CustomDaemonSet", "crd/v1", "") + daemonSetPod4.Annotations = map[string]string{"cluster-autoscaler.kubernetes.io/daemonset-pod": "true"} + + nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod, pod2, daemonSetPod3, daemonSetPod4) utilInfo, err = CalculateUtilization(node, nodeInfo, true, false, gpuLabel) assert.NoError(t, err) assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01) diff --git a/cluster-autoscaler/simulator/drain.go b/cluster-autoscaler/simulator/drain.go index 6a3e6518fb2..8257611a237 100644 --- a/cluster-autoscaler/simulator/drain.go +++ b/cluster-autoscaler/simulator/drain.go @@ -39,7 +39,6 @@ func FastGetPodsToMove(nodeInfo *schedulernodeinfo.NodeInfo, skipNodesWithSystem pods, err := drain.GetPodsForDeletionOnNodeDrain( nodeInfo.Pods(), pdbs, - false, skipNodesWithSystemPods, skipNodesWithLocalStorage, false, @@ -67,7 +66,6 @@ func DetailedGetPodsForMove(nodeInfo *schedulernodeinfo.NodeInfo, skipNodesWithS pods, err := drain.GetPodsForDeletionOnNodeDrain( nodeInfo.Pods(), pdbs, - false, skipNodesWithSystemPods, skipNodesWithLocalStorage, true, diff --git a/cluster-autoscaler/simulator/nodes.go b/cluster-autoscaler/simulator/nodes.go index e5eab30dd6a..3f668045ee4 100644 --- a/cluster-autoscaler/simulator/nodes.go +++ b/cluster-autoscaler/simulator/nodes.go @@ -17,13 +17,11 @@ limitations under the License. package simulator import ( - "time" - apiv1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1beta1" - "k8s.io/autoscaler/cluster-autoscaler/utils/drain" - "k8s.io/autoscaler/cluster-autoscaler/utils/errors" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" ) // getRequiredPodsForNode returns a list of pods that would appear on the node if the @@ -31,36 +29,8 @@ import ( // drain command to get the list. func getRequiredPodsForNode(nodename string, podsForNodes map[string][]*apiv1.Pod) ([]*apiv1.Pod, errors.AutoscalerError) { allPods := podsForNodes[nodename] - podsToRemoveList, err := drain.GetPodsForDeletionOnNodeDrain( - allPods, - []*policyv1.PodDisruptionBudget{}, // PDBs are irrelevant when considering new node. - true, // Force all removals. - false, - false, - false, // Setting this to true requires listers to be not-null. - nil, - 0, - time.Now()) - if err != nil { - return []*apiv1.Pod{}, errors.ToAutoscalerError(errors.InternalError, err) - } - - podsToRemoveMap := make(map[string]struct{}) - for _, pod := range podsToRemoveList { - podsToRemoveMap[pod.SelfLink] = struct{}{} - } - - podsOnNewNode := make([]*apiv1.Pod, 0) - for _, pod := range allPods { - if pod.DeletionTimestamp != nil { - continue - } - if _, found := podsToRemoveMap[pod.SelfLink]; !found { - podsOnNewNode = append(podsOnNewNode, pod) - } - } - return podsOnNewNode, nil + return filterRequiredPodsForNode(allPods), nil } // BuildNodeInfoForNode build a NodeInfo structure for the given node as if the node was just created. @@ -75,3 +45,20 @@ func BuildNodeInfoForNode(node *apiv1.Node, podsForNodes map[string][]*apiv1.Pod } return result, nil } + +func filterRequiredPodsForNode(allPods []*apiv1.Pod) []*apiv1.Pod { + var selectedPods []*apiv1.Pod + + for id, pod := range allPods { + // Ignore pod in deletion phase + if pod.DeletionTimestamp != nil { + continue + } + + if pod_util.IsMirrorPod(pod) || pod_util.IsDaemonSetPod(pod) { + selectedPods = append(selectedPods, allPods[id]) + } + } + + return selectedPods +} diff --git a/cluster-autoscaler/simulator/nodes_test.go b/cluster-autoscaler/simulator/nodes_test.go index 78a6e90460b..488852c887a 100644 --- a/cluster-autoscaler/simulator/nodes_test.go +++ b/cluster-autoscaler/simulator/nodes_test.go @@ -18,12 +18,17 @@ package simulator import ( "testing" + "time" + + "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/kubelet/types" - "github.com/stretchr/testify/assert" + pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" + . "k8s.io/autoscaler/cluster-autoscaler/utils/test" ) func TestRequiredPodsForNode(t *testing.T) { @@ -73,3 +78,115 @@ func TestRequiredPodsForNode(t *testing.T) { assert.Equal(t, 1, len(pods)) assert.Equal(t, "pod2", pods[0].Name) } + +func Test_filterRequiredPodsForNode(t *testing.T) { + nodeName := "node1" + pod1 := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "pod1", + SelfLink: "pod1", + }, + Spec: apiv1.PodSpec{ + NodeName: nodeName, + }, + } + // Manifest pod. + mirrorPod := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mirrorPod", + Namespace: "kube-system", + SelfLink: "mirrorPod", + Annotations: map[string]string{ + types.ConfigMirrorAnnotationKey: "something", + }, + }, + Spec: apiv1.PodSpec{ + NodeName: nodeName, + }, + } + now := metav1.NewTime(time.Now()) + podDeleted := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podDeleted", + SelfLink: "podDeleted", + Annotations: map[string]string{ + types.ConfigMirrorAnnotationKey: "something", + }, + DeletionTimestamp: &now, + }, + Spec: apiv1.PodSpec{ + NodeName: nodeName, + }, + } + + podDaemonset := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podDaemonset", + SelfLink: "podDaemonset", + OwnerReferences: GenerateOwnerReferences("ds", "DaemonSet", "apps/v1", ""), + Annotations: map[string]string{ + types.ConfigSourceAnnotationKey: types.FileSource, + }, + }, + Spec: apiv1.PodSpec{ + NodeName: nodeName, + }, + } + + podDaemonsetAnnotation := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podDaemonset2", + SelfLink: "podDaemonset2", + OwnerReferences: GenerateOwnerReferences("ds2", "CustomDaemonset", "crd/v1", ""), + Annotations: map[string]string{ + pod_util.DaemonSetPodAnnotationKey: "true", + }, + }, + Spec: apiv1.PodSpec{ + NodeName: nodeName, + }, + } + + tests := []struct { + name string + inputPods []*apiv1.Pod + want []*apiv1.Pod + }{ + { + name: "nil input pod list", + inputPods: nil, + want: []*apiv1.Pod{}, + }, + { + name: "should return only mirrorPod", + inputPods: []*apiv1.Pod{pod1, mirrorPod}, + want: []*apiv1.Pod{mirrorPod}, + }, + { + name: "should ignore podDeleted", + inputPods: []*apiv1.Pod{pod1, mirrorPod, podDeleted}, + want: []*apiv1.Pod{mirrorPod}, + }, + { + name: "should return daemonset pod", + inputPods: []*apiv1.Pod{pod1, podDaemonset}, + want: []*apiv1.Pod{podDaemonset}, + }, + { + name: "should return daemonset pods with", + inputPods: []*apiv1.Pod{pod1, podDaemonset, podDaemonsetAnnotation}, + want: []*apiv1.Pod{podDaemonset, podDaemonsetAnnotation}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := filterRequiredPodsForNode(tt.inputPods); !apiequality.Semantic.DeepEqual(got, tt.want) { + t.Errorf("filterRequiredPodsForNode() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/cluster-autoscaler/utils/drain/drain.go b/cluster-autoscaler/utils/drain/drain.go index fa56ffeadce..3c3471b28c3 100644 --- a/cluster-autoscaler/utils/drain/drain.go +++ b/cluster-autoscaler/utils/drain/drain.go @@ -22,10 +22,12 @@ import ( apiv1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" - "k8s.io/kubernetes/pkg/kubelet/types" + pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" ) const ( @@ -44,7 +46,6 @@ const ( func GetPodsForDeletionOnNodeDrain( podList []*apiv1.Pod, pdbs []*policyv1.PodDisruptionBudget, - deleteAll bool, skipNodesWithSystemPods bool, skipNodesWithLocalStorage bool, checkReferences bool, // Setting this to true requires client to be not-null. @@ -62,7 +63,7 @@ func GetPodsForDeletionOnNodeDrain( } for _, pod := range podList { - if IsMirrorPod(pod) { + if pod_util.IsMirrorPod(pod) { continue } @@ -107,24 +108,17 @@ func GetPodsForDeletionOnNodeDrain( } else { replicated = true } - } else if refKind == "DaemonSet" { - if checkReferences { - ds, err := listers.DaemonSetLister().DaemonSets(controllerNamespace).Get(controllerRef.Name) - - // Assume the only reason for an error is because the DaemonSet is - // gone/missing, not for any other cause. TODO(mml): something more - // sophisticated than this - if err == nil && ds != nil { - // Otherwise, treat daemonset-managed pods as unmanaged since - // DaemonSet Controller currently ignores the unschedulable bit. - // FIXME(mml): Add link to the issue concerning a proper way to drain - // daemonset pods, probably using taints. - daemonsetPod = true - } else { + } else if pod_util.IsDaemonSetPod(pod) { + daemonsetPod = true + // don't have listener for other DaemonSet kind + // TODO: we should use a generic client for checking the reference. + if checkReferences && refKind == "DaemonSet" { + _, err := listers.DaemonSetLister().DaemonSets(controllerNamespace).Get(controllerRef.Name) + if apierrors.IsNotFound(err) { return []*apiv1.Pod{}, fmt.Errorf("daemonset for %s/%s is not present, err: %v", pod.Namespace, pod.Name, err) + } else if err != nil { + return []*apiv1.Pod{}, fmt.Errorf("error when trying to get daemonset for %s/%s , err: %v", pod.Namespace, pod.Name, err) } - } else { - daemonsetPod = true } } else if refKind == "Job" { if checkReferences { @@ -180,7 +174,7 @@ func GetPodsForDeletionOnNodeDrain( continue } - if !deleteAll && !safeToEvict && !terminal { + if !safeToEvict && !terminal { if !replicated { return []*apiv1.Pod{}, fmt.Errorf("%s/%s is not replicated", pod.Namespace, pod.Name) } @@ -210,12 +204,6 @@ func ControllerRef(pod *apiv1.Pod) *metav1.OwnerReference { return metav1.GetControllerOf(pod) } -// IsMirrorPod checks whether the pod is a mirror pod. -func IsMirrorPod(pod *apiv1.Pod) bool { - _, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey] - return found -} - // isPodTerminal checks whether the pod is in a terminal state. func isPodTerminal(pod *apiv1.Pod) bool { // pod will never be restarted diff --git a/cluster-autoscaler/utils/drain/drain_test.go b/cluster-autoscaler/utils/drain/drain_test.go index 05fcb142646..cafdf54993b 100644 --- a/cluster-autoscaler/utils/drain/drain_test.go +++ b/cluster-autoscaler/utils/drain/drain_test.go @@ -103,6 +103,20 @@ func TestDrain(t *testing.T) { }, } + cdsPod := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + OwnerReferences: GenerateOwnerReferences(ds.Name, "CustomDaemonSet", "crd/v1", ""), + Annotations: map[string]string{ + "cluster-autoscaler.kubernetes.io/daemonset-pod": "true", + }, + }, + Spec: apiv1.PodSpec{ + NodeName: "node", + }, + } + job := batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job", @@ -372,6 +386,13 @@ func TestDrain(t *testing.T) { expectFatal: false, expectPods: []*apiv1.Pod{}, }, + { + description: "DS-managed pod by a custom Daemonset", + pods: []*apiv1.Pod{cdsPod}, + pdbs: []*policyv1.PodDisruptionBudget{}, + expectFatal: false, + expectPods: []*apiv1.Pod{}, + }, { description: "Job-managed pod", pods: []*apiv1.Pod{jobPod}, @@ -540,8 +561,7 @@ func TestDrain(t *testing.T) { registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, dsLister, rcLister, jobLister, rsLister, ssLister) - pods, err := GetPodsForDeletionOnNodeDrain(test.pods, test.pdbs, - false, true, true, true, registry, 0, time.Now()) + pods, err := GetPodsForDeletionOnNodeDrain(test.pods, test.pdbs, true, true, true, registry, 0, time.Now()) if test.expectFatal { if err == nil { diff --git a/cluster-autoscaler/utils/pod/pod.go b/cluster-autoscaler/utils/pod/pod.go new file mode 100644 index 00000000000..fbad0c2ade3 --- /dev/null +++ b/cluster-autoscaler/utils/pod/pod.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "k8s.io/kubernetes/pkg/kubelet/types" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // DaemonSetPodAnnotationKey - annotation use to informs the cluster-autoscaler controller when a pod needs to be considered as a Daemonset's Pod. + DaemonSetPodAnnotationKey = "cluster-autoscaler.kubernetes.io/daemonset-pod" +) + +// IsDaemonSetPod returns true if the Pod should be considered as Pod managed by a DaemonSet +func IsDaemonSetPod(pod *apiv1.Pod) bool { + controllerRef := metav1.GetControllerOf(pod) + if controllerRef != nil && controllerRef.Kind == "DaemonSet" { + return true + } + + if val, ok := pod.Annotations[DaemonSetPodAnnotationKey]; ok && val == "true" { + return true + } + + return false +} + +// IsMirrorPod checks whether the pod is a mirror pod. +func IsMirrorPod(pod *apiv1.Pod) bool { + if pod.ObjectMeta.Annotations == nil { + return false + } + _, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey] + return found +} + +// IsStaticPod returns true if the pod is a static pod. +func IsStaticPod(pod *apiv1.Pod) bool { + if pod.Annotations != nil { + if source, ok := pod.Annotations[types.ConfigSourceAnnotationKey]; ok == true { + return source != types.ApiserverSource + } + } + return false +} diff --git a/cluster-autoscaler/utils/pod/pod_test.go b/cluster-autoscaler/utils/pod/pod_test.go new file mode 100644 index 00000000000..088c85a1e4a --- /dev/null +++ b/cluster-autoscaler/utils/pod/pod_test.go @@ -0,0 +1,199 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "testing" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/kubelet/types" +) + +func TestIsDaemonSetPod(t *testing.T) { + tests := []struct { + name string + pod *apiv1.Pod + want bool + }{ + { + name: "Pod with empty OwnerRef", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + }, + want: false, + }, + { + name: "Pod with empty OwnerRef.Kind == DaemonSet", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + OwnerReferences: []metav1.OwnerReference{ + { + Controller: newBool(true), + Kind: "DaemonSet", + }, + }, + }, + }, + want: true, + }, + { + name: "Pod with annotation but not with `DaemonSetPodAnnotationKey`", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Annotations: make(map[string]string), + }, + }, + want: false, + }, + { + name: "Pod with `DaemonSetPodAnnotationKey` annotation but wrong value", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Annotations: map[string]string{ + DaemonSetPodAnnotationKey: "bad value", + }, + }, + }, + want: false, + }, + { + name: "Pod with `DaemonSetPodAnnotationKey:true` annotation", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Annotations: map[string]string{ + DaemonSetPodAnnotationKey: "true", + }, + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsDaemonSetPod(tt.pod); got != tt.want { + t.Errorf("IsDaemonSetPod() = %v, want %v", got, tt.want) + } + }) + } +} + +func newBool(b bool) *bool { + return &b +} + +func TestIsMirrorPod(t *testing.T) { + tests := []struct { + name string + pod *apiv1.Pod + want bool + }{ + { + name: "pod with ConfigMirrorAnnotationKey", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Annotations: map[string]string{ + types.ConfigMirrorAnnotationKey: "", + }, + }, + }, + want: true, + }, + { + name: "pod with without ConfigMirrorAnnotationKey", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Annotations: map[string]string{ + types.ConfigMirrorAnnotationKey: "", + }, + }, + }, + want: true, + }, + { + name: "pod with nil Annotations map", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsMirrorPod(tt.pod); got != tt.want { + t.Errorf("IsMirrorPod() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIsStaticPod(t *testing.T) { + tests := []struct { + name string + pod *apiv1.Pod + want bool + }{ + { + name: "not a static pod", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + }, + want: false, + }, + { + name: "is a static pod", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Annotations: map[string]string{ + types.ConfigSourceAnnotationKey: types.FileSource, + }, + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsStaticPod(tt.pod); got != tt.want { + t.Errorf("IsStaticPod() = %v, want %v", got, tt.want) + } + }) + } +}