diff --git a/pkg/manager/leaderelection.go b/pkg/manager/leaderelection.go index a717ee895..5548c9e6f 100644 --- a/pkg/manager/leaderelection.go +++ b/pkg/manager/leaderelection.go @@ -28,7 +28,7 @@ func (k *KubeMacPoolManager) waitToStartLeading(poolManger *poolmanager.PoolMana return errors.Wrap(err, "failed to start pool manager routines") } - err = k.UpdateLeaderLabel() + err = updateLeaderLabel(k.runtimeManager.GetClient(), k.podName, k.podNamespace) if err != nil { return errors.Wrap(err, "failed marking pod as leader") } @@ -40,68 +40,68 @@ func (k *KubeMacPoolManager) waitToStartLeading(poolManger *poolmanager.PoolMana return nil } -// Adds the leader label to elected pod and removes it from all the other pods, if exists -func (k *KubeMacPoolManager) UpdateLeaderLabel() error { - logger := logf.Log.WithName("UpdateLeaderLabel") +// By setting this status to true in all pods, we declare the kubemacpool as ready and allow the webhooks to start running. +func (k *KubeMacPoolManager) setLeadershipConditions(status corev1.ConditionStatus) error { podList := corev1.PodList{} err := k.runtimeManager.GetClient().List(context.TODO(), &podList, &client.ListOptions{Namespace: k.podNamespace}) if err != nil { return errors.Wrap(err, "failed to list kubemacpool manager pods") } - for _, pod := range podList.Items { err = retry.RetryOnConflict(retry.DefaultRetry, func() error { podKey := types.NamespacedName{Namespace: k.podNamespace, Name: pod.Name} err := k.runtimeManager.GetClient().Get(context.TODO(), podKey, &pod) if err != nil { - return errors.Wrap(err, "failed to get kubemacpool manager pod") + return errors.Wrap(err, "failed to get kubemacpool manager pods") } - _, exist := pod.Labels[names.LEADER_LABEL] - if pod.Name == k.podName && !exist { - logger.V(1).Info("add the label to the elected leader", "Pod Name", pod.Name) - pod.Labels[names.LEADER_LABEL] = "true" - } else if exist { - logger.V(1).Info("deleting leader label from old leader", "Pod Name", pod.Name) - delete(pod.Labels, names.LEADER_LABEL) - } else { - return nil - } + pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{Type: names.LEADER_READY_CONDITION_TYPE, Status: status, LastProbeTime: metav1.Time{}}) - return k.runtimeManager.GetClient().Status().Update(context.TODO(), &pod) + err = k.runtimeManager.GetClient().Status().Update(context.TODO(), &pod) + return err }) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to updating kubemacpool leader label in pod %s", pod.Name)) + return errors.Wrap(err, "failed to update Leadership readiness gate status to kubemacpool manager pods") } } - return nil } -// By setting this status to true in all pods, we declare the kubemacpool as ready and allow the webhooks to start running. -func (k *KubeMacPoolManager) setLeadershipConditions(status corev1.ConditionStatus) error { +// Adds the leader label to elected pod and removes it from all the other pods, if exists +func updateLeaderLabel(kubeClient client.Client, leaderPodName, managerNamespace string) error { + logger := logf.Log.WithName("UpdateLeaderLabel") podList := corev1.PodList{} - err := k.runtimeManager.GetClient().List(context.TODO(), &podList, &client.ListOptions{Namespace: k.podNamespace}) + err := kubeClient.List(context.TODO(), &podList, &client.ListOptions{Namespace: managerNamespace}) if err != nil { return errors.Wrap(err, "failed to list kubemacpool manager pods") } + for _, pod := range podList.Items { err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - podKey := types.NamespacedName{Namespace: k.podNamespace, Name: pod.Name} - err := k.runtimeManager.GetClient().Get(context.TODO(), podKey, &pod) + podKey := types.NamespacedName{Namespace: managerNamespace, Name: pod.Name} + err := kubeClient.Get(context.TODO(), podKey, &pod) if err != nil { - return errors.Wrap(err, "failed to get kubemacpool manager pods") + return errors.Wrap(err, "failed to get kubemacpool manager pod") } - pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{Type: names.LEADER_READY_CONDITION_TYPE, Status: status, LastProbeTime: metav1.Time{}}) + if pod.Name == leaderPodName { + logger.Info("add the label to the elected leader", "Pod Name", pod.Name) + if len(pod.Labels) == 0 { + pod.Labels = make(map[string]string) + } + pod.Labels[names.LEADER_LABEL] = "true" + } else { + logger.Info("deleting leader label if exists", "Pod Name", pod.Name) + delete(pod.Labels, names.LEADER_LABEL) + } - err = k.runtimeManager.GetClient().Status().Update(context.TODO(), &pod) - return err + return kubeClient.Status().Update(context.TODO(), &pod) }) + if err != nil { - return errors.Wrap(err, "failed to update Leadership readiness gate status to kubemacpool manager pods") + return errors.Wrap(err, fmt.Sprintf("failed to updating kubemacpool leader label in pod %s", pod.Name)) } } + return nil } diff --git a/pkg/manager/manager_suite_test.go b/pkg/manager/manager_suite_test.go new file mode 100644 index 000000000..8be0893af --- /dev/null +++ b/pkg/manager/manager_suite_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The KubeMacPool Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestPoolManager(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Manager Suite") +} diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go new file mode 100644 index 000000000..d18742487 --- /dev/null +++ b/pkg/manager/manager_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The KubeMacPool Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/extensions/table" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/k8snetworkplumbingwg/kubemacpool/pkg/names" +) + +var _ = Describe("leader election", func() { + leaderPodName := "leaderPod" + loosingPodName := "loosingPod" + leaderLabelValue := "true" + + leaderPod := v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: leaderPodName, Namespace: names.MANAGER_NAMESPACE}} + looserPod := v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: loosingPodName, Namespace: names.MANAGER_NAMESPACE}} + + createEnvironment := func(fakeObjectsForClient ...runtime.Object) client.Client { + fakeClient := fake.NewFakeClient(fakeObjectsForClient...) + + return fakeClient + } + + Describe("Internal Functions", func() { + Context("When leader Pod is passed for leader label update", func() { + table.DescribeTable("Should update the leader label in all pods", func(leaderPodFormerLabels, looserPodFormerLabels string) { + By("Adding the initial label state of the pods prior to winning the election") + initiatePodLabels(&leaderPod, leaderPodFormerLabels, leaderLabelValue) + initiatePodLabels(&looserPod, looserPodFormerLabels, leaderLabelValue) + + By("Initiating the Environment") + kubeClient := createEnvironment(&leaderPod, &looserPod) + + By("running label update method") + err := updateLeaderLabel(kubeClient, leaderPodName, names.MANAGER_NAMESPACE) + Expect(err).ToNot(HaveOccurred(), "should successfully update kubemacpool leader labels") + + By("checking the leader pod has the leader label") + checkLeaderPod := v1.Pod{} + err = kubeClient.Get(context.TODO(), types.NamespacedName{Namespace: names.MANAGER_NAMESPACE, Name: leaderPodName}, &checkLeaderPod) + Expect(err).ToNot(HaveOccurred(), "should successfully get the kubemacpool leader pod") + Expect(checkLeaderPod.Labels).To(HaveLen(1), "leader pod should have only 1 label") + Expect(checkLeaderPod.Labels[names.LEADER_LABEL]).To(Equal(leaderLabelValue), "leader pod should have the leader label value") + + By("checking the non-leader pod has no leader label") + checkLooserPod := v1.Pod{} + err = kubeClient.Get(context.TODO(), types.NamespacedName{Namespace: names.MANAGER_NAMESPACE, Name: loosingPodName}, &checkLooserPod) + Expect(err).ToNot(HaveOccurred(), "should successfully get the kubemacpool non-leader pod") + Expect(checkLooserPod.Labels).To(HaveLen(0), "non-leader pod should not have any labels") + }, + table.Entry("all pods don't have a former leader label", "", ""), + table.Entry("leader pod already has leader label from former election", names.LEADER_LABEL, ""), + table.Entry("looser pod already has leader label from former election", "", names.LEADER_LABEL), + table.Entry("all pods have a former leader label", names.LEADER_LABEL, names.LEADER_LABEL), + ) + }) + }) +}) + +func initiatePodLabels(pod *v1.Pod, label string, labelValue string) { + if len(label) != 0 { + pod.Labels = make(map[string]string) + pod.Labels[label] = labelValue + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b13d98cbc..b3ac0929d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -501,6 +501,7 @@ sigs.k8s.io/controller-runtime/pkg/cache/internal sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/client/config +sigs.k8s.io/controller-runtime/pkg/client/fake sigs.k8s.io/controller-runtime/pkg/controller sigs.k8s.io/controller-runtime/pkg/envtest sigs.k8s.io/controller-runtime/pkg/event @@ -509,6 +510,7 @@ sigs.k8s.io/controller-runtime/pkg/healthz sigs.k8s.io/controller-runtime/pkg/internal/controller sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/log +sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/internal/testing/integration sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/addr diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go new file mode 100644 index 000000000..be688d5e8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -0,0 +1,405 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/testing" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +type versionedTracker struct { + testing.ObjectTracker +} + +type fakeClient struct { + tracker versionedTracker + scheme *runtime.Scheme +} + +var _ client.Client = &fakeClient{} + +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +// NewFakeClient creates a new fake client for testing. +// You can choose to initialize it with a slice of runtime.Object. +// Deprecated: use NewFakeClientWithScheme. You should always be +// passing an explicit Scheme. +func NewFakeClient(initObjs ...runtime.Object) client.Client { + return NewFakeClientWithScheme(scheme.Scheme, initObjs...) +} + +// NewFakeClientWithScheme creates a new fake client with the given scheme +// for testing. +// You can choose to initialize it with a slice of runtime.Object. +func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.Client { + tracker := testing.NewObjectTracker(clientScheme, scheme.Codecs.UniversalDecoder()) + for _, obj := range initObjs { + err := tracker.Add(obj) + if err != nil { + panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err)) + } + } + return &fakeClient{ + tracker: versionedTracker{tracker}, + scheme: clientScheme, + } +} + +func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + if accessor.GetResourceVersion() != "" { + return apierrors.NewBadRequest("resourceVersion can not be set for Create requests") + } + accessor.SetResourceVersion("1") + return t.ObjectTracker.Create(gvr, obj, ns) +} + +func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %v", err) + } + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName()) + if err != nil { + return err + } + oldAccessor, err := meta.Accessor(oldObject) + if err != nil { + return err + } + if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() { + return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) + } + if oldAccessor.GetResourceVersion() == "" { + oldAccessor.SetResourceVersion("0") + } + intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64) + if err != nil { + return fmt.Errorf("can not convert resourceVersion %q to int: %v", oldAccessor.GetResourceVersion(), err) + } + intResourceVersion++ + accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10)) + return t.ObjectTracker.Update(gvr, obj, ns) +} + +func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + o, err := c.tracker.Get(gvr, key.Namespace, key.Name) + if err != nil { + return err + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + _, _, err = decoder.Decode(j, nil, obj) + return err +} + +func (c *fakeClient) List(ctx context.Context, obj runtime.Object, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + OriginalKind := gvk.Kind + + if !strings.HasSuffix(gvk.Kind, "List") { + return fmt.Errorf("non-list type %T (kind %q) passed as output", obj, gvk) + } + // we need the non-list GVK, so chop off the "List" from the end of the kind + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, listOpts.Namespace) + if err != nil { + return err + } + + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(OriginalKind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + _, _, err = decoder.Decode(j, nil, obj) + if err != nil { + return err + } + + if listOpts.LabelSelector != nil { + objs, err := meta.ExtractList(obj) + if err != nil { + return err + } + filteredObjs, err := objectutil.FilterWithLabels(objs, listOpts.LabelSelector) + if err != nil { + return err + } + err = meta.SetList(obj, filteredObjs) + if err != nil { + return err + } + } + return nil +} + +func (c *fakeClient) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error { + createOptions := &client.CreateOptions{} + createOptions.ApplyOptions(opts) + + for _, dryRunOpt := range createOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + if accessor.GetName() == "" && accessor.GetGenerateName() != "" { + base := accessor.GetGenerateName() + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength))) + } + + return c.tracker.Create(gvr, obj, accessor.GetNamespace()) +} + +func (c *fakeClient) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOption) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + delOptions := client.DeleteOptions{} + delOptions.ApplyOptions(opts) + + //TODO: implement propagation + return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) +} + +func (c *fakeClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...client.DeleteAllOfOption) error { + gvk, err := apiutil.GVKForObject(obj, scheme.Scheme) + if err != nil { + return err + } + + dcOptions := client.DeleteAllOfOptions{} + dcOptions.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace) + if err != nil { + return err + } + + objs, err := meta.ExtractList(o) + if err != nil { + return err + } + filteredObjs, err := objectutil.FilterWithLabels(objs, dcOptions.LabelSelector) + if err != nil { + return err + } + for _, o := range filteredObjs { + accessor, err := meta.Accessor(o) + if err != nil { + return err + } + err = c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) + if err != nil { + return err + } + } + return nil +} + +func (c *fakeClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { + updateOptions := &client.UpdateOptions{} + updateOptions.ApplyOptions(opts) + + for _, dryRunOpt := range updateOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + return c.tracker.Update(gvr, obj, accessor.GetNamespace()) +} + +func (c *fakeClient) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { + patchOptions := &client.PatchOptions{} + patchOptions.ApplyOptions(opts) + + for _, dryRunOpt := range patchOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + data, err := patch.Data(obj) + if err != nil { + return err + } + + reaction := testing.ObjectReaction(c.tracker) + handled, o, err := reaction(testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data)) + if err != nil { + return err + } + if !handled { + panic("tracker could not handle patch method") + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + _, _, err = decoder.Decode(j, nil, obj) + return err +} + +func (c *fakeClient) Status() client.StatusWriter { + return &fakeStatusWriter{client: c} +} + +func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return gvr, nil +} + +type fakeStatusWriter struct { + client *fakeClient +} + +func (sw *fakeStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { + // TODO(droot): This results in full update of the obj (spec + status). Need + // a way to update status field only. + return sw.client.Update(ctx, obj, opts...) +} + +func (sw *fakeStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { + // TODO(droot): This results in full update of the obj (spec + status). Need + // a way to update status field only. + return sw.client.Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go new file mode 100644 index 000000000..a45d70332 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package fake provides a fake client for testing. + +Deprecated: please use pkg/envtest for testing. This package will be dropped +before the v1.0.0 release. + +An fake client is backed by its simple object store indexed by GroupVersionResource. +You can create a fake client with optional objects. + + client := NewFakeClient(initObjs...) // initObjs is a slice of runtime.Object + +You can invoke the methods defined in the Client interface. + +When it doubt, it's almost always better not to use this package and instead use +envtest.Environment with a real client and API server. +*/ +package fake diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go new file mode 100644 index 000000000..8513846e2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectutil + +import ( + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// FilterWithLabels returns a copy of the items in objs matching labelSel +func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { + outItems := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outItems = append(outItems, obj.DeepCopyObject()) + } + return outItems, nil +}