diff --git a/cmd/ipam-controller/app/app.go b/cmd/ipam-controller/app/app.go index b659627..832a542 100644 --- a/cmd/ipam-controller/app/app.go +++ b/cmd/ipam-controller/app/app.go @@ -21,7 +21,6 @@ import ( "github.com/go-logr/logr" "github.com/spf13/cobra" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -40,12 +39,12 @@ import ( // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" "github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-controller/app/options" "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/allocator" - configmapctrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/controllers/configmap" nodectrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/controllers/node" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/selector" + poolctrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/controllers/pool" "github.com/Mellanox/nvidia-k8s-ipam/pkg/version" ) @@ -99,7 +98,7 @@ func RunController(ctx context.Context, config *rest.Config, opts *options.Optio logger.Info("start IPAM controller", "version", version.GetVersionString(), "config", opts.ConfigMapName, - "configNamespace", opts.ConfigMapNamespace) + "configNamespace", opts.ConfigMapNamespace, "poolsNamespace", opts.PoolsNamespace) scheme := runtime.NewScheme() @@ -108,13 +107,17 @@ func RunController(ctx context.Context, config *rest.Config, opts *options.Optio return err } + if err := ipamv1alpha1.AddToScheme(scheme); err != nil { + logger.Error(err, "failed to register ipamv1alpha1 scheme") + return err + } + mgr, err := ctrl.NewManager(config, ctrl.Options{ Scheme: scheme, NewCache: cache.BuilderWithOptions(cache.Options{ - SelectorsByObject: cache.SelectorsByObject{&corev1.ConfigMap{}: cache.ObjectSelector{ + SelectorsByObject: cache.SelectorsByObject{&ipamv1alpha1.IPPool{}: cache.ObjectSelector{ Field: fields.AndSelectors( - fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", opts.ConfigMapName)), - fields.ParseSelectorOrDie(fmt.Sprintf("metadata.namespace=%s", opts.ConfigMapNamespace))), + fields.ParseSelectorOrDie(fmt.Sprintf("metadata.namespace=%s", opts.PoolsNamespace))), }}, }), MetricsBindAddress: opts.MetricsAddr, @@ -132,30 +135,27 @@ func RunController(ctx context.Context, config *rest.Config, opts *options.Optio } netAllocator := allocator.New() - nodeSelector := selector.New() - configEventCH := make(chan event.GenericEvent, 1) + nodeEventCH := make(chan event.GenericEvent, 1) if err = (&nodectrl.NodeReconciler{ - Allocator: netAllocator, - Selector: nodeSelector, - ConfigEventCh: configEventCH, - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Allocator: netAllocator, + NodeEventCh: nodeEventCH, + PoolsNamespace: opts.PoolsNamespace, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { logger.Error(err, "unable to create controller", "controller", "Node") return err } - if err = (&configmapctrl.ConfigMapReconciler{ - Allocator: netAllocator, - Selector: nodeSelector, - ConfigEventCh: configEventCH, - ConfigMapName: opts.ConfigMapName, - ConfigMapNamespace: opts.ConfigMapNamespace, - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + if err = (&poolctrl.PoolReconciler{ + Allocator: netAllocator, + NodeEventCh: nodeEventCH, + PoolsNamespace: opts.PoolsNamespace, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { - logger.Error(err, "unable to create controller", "controller", "ConfigMap") + logger.Error(err, "unable to create controller", "controller", "IPPool") return err } diff --git a/cmd/ipam-controller/app/app_suite_test.go b/cmd/ipam-controller/app/app_suite_test.go index c65b1bb..1cb15f5 100644 --- a/cmd/ipam-controller/app/app_suite_test.go +++ b/cmd/ipam-controller/app/app_suite_test.go @@ -21,9 +21,12 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" ) const ( @@ -46,17 +49,25 @@ func TestApp(t *testing.T) { var _ = BeforeSuite(func() { By("bootstrapping test environment") - testEnv = &envtest.Environment{} + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{"../../../deploy/crds"}, + CRDInstallOptions: envtest.CRDInstallOptions{ + ErrorIfPathMissing: true, + }, + } ctx, cFunc = context.WithCancel(context.Background()) var err error + err = ipamv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + // cfg is defined in this file globally. cfg, err = testEnv.Start() Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - k8sClient, err = client.New(cfg, client.Options{}) + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) diff --git a/cmd/ipam-controller/app/app_test.go b/cmd/ipam-controller/app/app_test.go index ab9b56a..32faee3 100644 --- a/cmd/ipam-controller/app/app_test.go +++ b/cmd/ipam-controller/app/app_test.go @@ -15,7 +15,7 @@ package app_test import ( "context" - "fmt" + "reflect" "time" . "github.com/onsi/ginkgo/v2" @@ -23,15 +23,13 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" - apiErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" "github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-controller/app" "github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-controller/app/options" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/config" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" ) const ( @@ -40,48 +38,6 @@ const ( pool3Name = "pool3" ) -func updateConfigMap(data string) { - d := map[string]string{config.ConfigMapKey: data} - err := k8sClient.Create(ctx, &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: TestConfigMapName, Namespace: TestNamespace}, - Data: d, - }) - if err == nil { - return - } - if apiErrors.IsAlreadyExists(err) { - configMap := &corev1.ConfigMap{} - Expect(k8sClient.Get( - ctx, types.NamespacedName{Name: TestConfigMapName, Namespace: TestNamespace}, configMap)).NotTo(HaveOccurred()) - configMap.Data = d - Expect(k8sClient.Update( - ctx, configMap)).NotTo(HaveOccurred()) - } else { - Expect(err).NotTo(HaveOccurred()) - } -} - -var validConfig = fmt.Sprintf(` - { - "pools": { - "%s": { "subnet": "192.168.0.0/16", "perNodeBlockSize": 10 , "gateway": "192.168.0.1"}, - "%s": { "subnet": "172.16.0.0/16", "perNodeBlockSize": 50 , "gateway": "172.16.0.1"} - } - } -`, pool1Name, pool2Name) - -// ranges for two nodes only can be allocated -var validConfig2 = fmt.Sprintf(` - { - "pools": { - "%s": { "subnet": "172.17.0.0/24", "perNodeBlockSize": 100 , "gateway": "172.17.0.1"} - }, - "nodeSelector": {"foo": "bar"} - } -`, pool3Name) - -var invalidConfig = `{{{` - func createNode(name string) *corev1.Node { node := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}} Expect(k8sClient.Create(ctx, node)).NotTo(HaveOccurred()) @@ -99,13 +55,39 @@ func updateNode(node *corev1.Node) *corev1.Node { return node } -func getRangeFromNode(nodeName string) map[string]*pool.IPPool { - node := getNode(nodeName) - poolCfg, err := pool.NewConfigReader(node) - if err != nil { - return nil +func getRangeForNode(nodeName string, poolName string) *ipamv1alpha1.Allocation { + allocations := getAllocationsFromIPPools(poolName) + + for _, a := range allocations { + alloc := a + if a.NodeName == nodeName { + return &alloc + } + } + return nil +} + +func getAllocationsFromIPPools(poolName string) []ipamv1alpha1.Allocation { + pool := &ipamv1alpha1.IPPool{} + Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: TestNamespace, Name: poolName}, pool)).NotTo(HaveOccurred()) + return pool.Status.Allocations +} + +func checkAllocationExists(allocations []ipamv1alpha1.Allocation, allocation ipamv1alpha1.Allocation) bool { + for _, a := range allocations { + if reflect.DeepEqual(a, allocation) { + return true + } } - return poolCfg.GetPools() + return false +} + +func updatePoolSpec(poolName string, spec ipamv1alpha1.IPPoolSpec) { + pool := &ipamv1alpha1.IPPool{} + Expect(k8sClient.Get( + ctx, types.NamespacedName{Name: poolName, Namespace: TestNamespace}, pool)).NotTo(HaveOccurred()) + pool.Spec = spec + Expect(k8sClient.Update(ctx, pool)).NotTo(HaveOccurred()) } // WaitAndCheckForStability wait for condition and then check it is stable for 1 second @@ -124,39 +106,63 @@ var _ = Describe("App", func() { cfg1pools := []string{pool1Name, pool2Name} - By("Create valid cfg1") - updateConfigMap(validConfig) - - By("Set annotation with valid ranges for node1") - node1 := createNode(testNode1) - node1InitialRanges := map[string]*pool.IPPool{pool1Name: { - Name: pool1Name, - Subnet: "192.168.0.0/16", - StartIP: "192.168.0.11", - EndIP: "192.168.0.20", - Gateway: "192.168.0.1", + By("Create valid pools") + pool1Spec := ipamv1alpha1.IPPoolSpec{ + Subnet: "192.168.0.0/16", + Gateway: "192.168.0.1", + PerNodeBlockSize: 10, + } + pool1 := &ipamv1alpha1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: pool1Name, + Namespace: TestNamespace, + }, + Spec: pool1Spec, + } + Expect(k8sClient.Create(ctx, pool1)).NotTo(HaveOccurred()) + + pool2 := &ipamv1alpha1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: pool2Name, + Namespace: TestNamespace, + }, + Spec: ipamv1alpha1.IPPoolSpec{ + Subnet: "172.16.0.0/16", + Gateway: "172.16.0.1", + PerNodeBlockSize: 50, + }, + } + Expect(k8sClient.Create(ctx, pool2)).NotTo(HaveOccurred()) + + By("Update Pools Status with valid ranges for node1 and invalide for node2 (wrong IP count)") + createNode(testNode1) + createNode(testNode2) + node1InitialRanges := map[string]ipamv1alpha1.Allocation{pool1Name: { + NodeName: testNode1, + StartIP: "192.168.0.11", + EndIP: "192.168.0.20", }, pool2Name: { - Name: pool2Name, - Subnet: "172.16.0.0/16", - StartIP: "172.16.0.1", - EndIP: "172.16.0.50", - Gateway: "172.16.0.1", - }} - Expect(pool.SetIPBlockAnnotation(node1, node1InitialRanges)).NotTo(HaveOccurred()) - Expect(updateNode(node1)) - - By("Set annotation with invalid ranges for node2") - // create annotation for node2 with invalid config (wrong IP count) - node2 := createNode(testNode2) - node2InitialRanges := map[string]*pool.IPPool{pool1Name: { - Name: pool1Name, - Subnet: "192.168.0.0/16", - StartIP: "192.168.0.11", - EndIP: "192.168.0.14", - Gateway: "192.168.0.1", + NodeName: testNode1, + StartIP: "172.16.0.1", + EndIP: "172.16.0.50", }} - Expect(pool.SetIPBlockAnnotation(node2, node2InitialRanges)).NotTo(HaveOccurred()) - Expect(updateNode(node2)) + pool1.Status = ipamv1alpha1.IPPoolStatus{ + Allocations: []ipamv1alpha1.Allocation{ + node1InitialRanges[pool1Name], + { + NodeName: testNode2, + StartIP: "192.168.0.11", + EndIP: "192.168.0.14", + }, + }, + } + pool2.Status = ipamv1alpha1.IPPoolStatus{ + Allocations: []ipamv1alpha1.Allocation{ + node1InitialRanges[pool2Name], + }, + } + Expect(k8sClient.Status().Update(ctx, pool1)).NotTo(HaveOccurred()) + Expect(k8sClient.Status().Update(ctx, pool2)).NotTo(HaveOccurred()) By("Start controller") @@ -170,71 +176,101 @@ var _ = Describe("App", func() { ConfigMapNamespace: TestNamespace, MetricsAddr: "0", // disable ProbeAddr: "0", // disable + PoolsNamespace: TestNamespace, })).NotTo(HaveOccurred()) close(controllerStopped) }() By("Create node3 without annotation") - createNode(testNode3) By("Check how controller handled the state") WaitAndCheckForStability(func(g Gomega) { uniqStartEndIPs := map[string]struct{}{} - node1Ranges := getRangeFromNode(testNode1) - node2Ranges := getRangeFromNode(testNode2) - node3Ranges := getRangeFromNode(testNode3) - for _, r := range []map[string]*pool.IPPool{node1Ranges, node2Ranges, node3Ranges} { - g.Expect(r).To(HaveLen(len(cfg1pools))) - for _, p := range cfg1pools { - if r[p] != nil { - uniqStartEndIPs[r[p].StartIP] = struct{}{} - uniqStartEndIPs[r[p].EndIP] = struct{}{} - g.Expect(r[p].Gateway).NotTo(BeEmpty()) - g.Expect(r[p].Subnet).NotTo(BeEmpty()) - } + pool1Allocations := getAllocationsFromIPPools(pool1Name) + pool2Allocations := getAllocationsFromIPPools(pool2Name) + for _, r := range [][]ipamv1alpha1.Allocation{pool1Allocations, pool2Allocations} { + g.Expect(r).To(HaveLen(3)) // 3 allocations, 1 for each node + for _, a := range r { + uniqStartEndIPs[a.StartIP] = struct{}{} + uniqStartEndIPs[a.EndIP] = struct{}{} } } // we should have unique start/end IPs for each node for each pool g.Expect(uniqStartEndIPs).To(HaveLen(len(cfg1pools) * 3 * 2)) // node1 should have restored ranges - g.Expect(node1Ranges).To(Equal(node1InitialRanges)) + g.Expect(checkAllocationExists(pool1Allocations, node1InitialRanges[pool1Name])).To(BeTrue()) + g.Expect(checkAllocationExists(pool2Allocations, node1InitialRanges[pool2Name])).To(BeTrue()) }, 15, 2) - By("Set invalid config for controller") - updateConfigMap(invalidConfig) - time.Sleep(time.Second) - - By("Set valid cfg2, which ignores all nodes") - - updateConfigMap(validConfig2) - - By("Wait for controller to remove annotations") - WaitAndCheckForStability(func(g Gomega) { - g.Expect(getRangeFromNode(testNode1)).To(BeNil()) - g.Expect(getRangeFromNode(testNode2)).To(BeNil()) - g.Expect(getRangeFromNode(testNode3)).To(BeNil()) - }, 15, 2) - - By("Update nodes to match selector in cfg2") + By("Set invalid config for pool1") + invalidPool1Spec := ipamv1alpha1.IPPoolSpec{ + Subnet: "192.168.0.0/16", + Gateway: "10.10.0.1", + PerNodeBlockSize: 10, + } + updatePoolSpec(pool1Name, invalidPool1Spec) + Eventually(func(g Gomega) bool { + pool := &ipamv1alpha1.IPPool{} + Expect(k8sClient.Get( + ctx, types.NamespacedName{Name: pool1Name, Namespace: TestNamespace}, pool)).NotTo(HaveOccurred()) + return len(pool.Status.Allocations) == 0 + }, 30, 5).Should(BeTrue()) + + By("Create Pool3, with selector which ignores all nodes") + // ranges for two nodes only can be allocated + pool3Spec := ipamv1alpha1.IPPoolSpec{ + Subnet: "172.17.0.0/24", + Gateway: "172.17.0.1", + PerNodeBlockSize: 100, + NodeSelector: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"bar"}, + }, + }, + MatchFields: nil, + }, + }, + }, + } + pool3 := &ipamv1alpha1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: pool3Name, + Namespace: TestNamespace, + }, + Spec: pool3Spec, + } + Expect(k8sClient.Create(ctx, pool3)).NotTo(HaveOccurred()) + + Consistently(func(g Gomega) bool { + pool := &ipamv1alpha1.IPPool{} + Expect(k8sClient.Get( + ctx, types.NamespacedName{Name: pool3Name, Namespace: TestNamespace}, pool)).NotTo(HaveOccurred()) + return len(pool.Status.Allocations) == 0 + }, 30, 5).Should(BeTrue()) + + By("Update nodes to match selector in pool3") // node1 should have range - node1 = getNode(testNode1) + node1 := getNode(testNode1) node1.Labels = map[string]string{"foo": "bar"} updateNode(node1) WaitAndCheckForStability(func(g Gomega) { - g.Expect(getRangeFromNode(testNode1)).NotTo(BeNil()) + g.Expect(getRangeForNode(testNode1, pool3Name)).NotTo(BeNil()) }, 15, 2) // node2 should have range - node2 = getNode(testNode2) + node2 := getNode(testNode2) node2.Labels = map[string]string{"foo": "bar"} - var node2Ranges map[string]*pool.IPPool updateNode(node2) WaitAndCheckForStability(func(g Gomega) { - node2Ranges = getRangeFromNode(testNode2) - g.Expect(node2Ranges).NotTo(BeNil()) + g.Expect(getRangeForNode(testNode2, pool3Name)).NotTo(BeNil()) }, 15, 2) // node3 should have no range, because no free ranges available @@ -242,18 +278,21 @@ var _ = Describe("App", func() { node3.Labels = map[string]string{"foo": "bar"} updateNode(node3) WaitAndCheckForStability(func(g Gomega) { - g.Expect(getRangeFromNode(testNode3)).To(BeNil()) + g.Expect(getRangeForNode(testNode3, pool3Name)).To(BeNil()) }, 15, 5) + node2Range := getRangeForNode(testNode2, pool3Name) // remove label from node2, node3 should have a range now node2 = getNode(testNode2) node2.Labels = nil updateNode(node2) WaitAndCheckForStability(func(g Gomega) { - node3Ranges := getRangeFromNode(testNode3) - g.Expect(node3Ranges).NotTo(BeNil()) + node3Range := getRangeForNode(testNode3, pool3Name) + g.Expect(node3Range).NotTo(BeNil()) // should reuse ranges from node2 - g.Expect(node3Ranges).To(Equal(node2Ranges)) + matchRange := node3Range.StartIP == node2Range.StartIP && + node3Range.EndIP == node2Range.EndIP + g.Expect(matchRange).To(BeTrue()) }, 15, 2) By("Stop controller") diff --git a/cmd/ipam-controller/app/options/options.go b/cmd/ipam-controller/app/options/options.go index 1e323a0..db8962b 100644 --- a/cmd/ipam-controller/app/options/options.go +++ b/cmd/ipam-controller/app/options/options.go @@ -32,6 +32,7 @@ func New() *Options { LeaderElectionNamespace: "kube-system", ConfigMapName: "nvidia-k8s-ipam-config", ConfigMapNamespace: "kube-system", + PoolsNamespace: "kube-system", } } @@ -44,6 +45,7 @@ type Options struct { LeaderElectionNamespace string ConfigMapName string ConfigMapNamespace string + PoolsNamespace string } // AddNamedFlagSets register flags for common options in NamedFlagSets @@ -70,6 +72,8 @@ func (o *Options) AddNamedFlagSets(sharedFS *cliflag.NamedFlagSets) { o.ConfigMapName, "The name of the ConfigMap which holds controller configuration") controllerFS.StringVar(&o.ConfigMapNamespace, "config-namespace", o.ConfigMapNamespace, "The name of the namespace where ConfigMap with controller configuration exist") + controllerFS.StringVar(&o.PoolsNamespace, "ippools-namespace", + o.PoolsNamespace, "The name of the namespace to watch for IPPools CRs") } // Validate registered options diff --git a/deploy/nv-ipam.yaml b/deploy/nv-ipam.yaml index ae029d4..493a261 100644 --- a/deploy/nv-ipam.yaml +++ b/deploy/nv-ipam.yaml @@ -135,6 +135,22 @@ rules: - get - list - watch + - apiGroups: + - nv-ipam.nvidia.com + resources: + - ippools + verbs: + - get + - list + - watch + - apiGroups: + - nv-ipam.nvidia.com + resources: + - ippools/status + verbs: + - get + - update + - patch - apiGroups: - coordination.k8s.io resources: @@ -244,6 +260,7 @@ spec: - --config-namespace=$(POD_NAMESPACE) - --leader-elect=true - --leader-elect-namespace=$(POD_NAMESPACE) + - --ippools-namespace=$(POD_NAMESPACE) env: - name: POD_NAMESPACE valueFrom: diff --git a/go.mod b/go.mod index 7767783..5aafe3d 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( k8s.io/apimachinery v0.26.4 k8s.io/client-go v0.26.4 k8s.io/component-base v0.26.4 + k8s.io/component-helpers v0.26.4 k8s.io/klog/v2 v2.90.1 sigs.k8s.io/controller-runtime v0.14.6 ) diff --git a/go.sum b/go.sum index 0406159..569787f 100644 --- a/go.sum +++ b/go.sum @@ -663,6 +663,8 @@ k8s.io/client-go v0.26.4 h1:/7P/IbGBuT73A+G97trf44NTPSNqvuBREpOfdLbHvD4= k8s.io/client-go v0.26.4/go.mod h1:6qOItWm3EwxJdl/8p5t7FWtWUOwyMdA8N9ekbW4idpI= k8s.io/component-base v0.26.4 h1:Bg2xzyXNKL3eAuiTEu3XE198d6z22ENgFgGQv2GGOUk= k8s.io/component-base v0.26.4/go.mod h1:lTuWL1Xz/a4e80gmIC3YZG2JCO4xNwtKWHJWeJmsq20= +k8s.io/component-helpers v0.26.4 h1:qbZrh8QmfL+Yn7lWEI/BPrvITGgkBy33djP5Tzsu2hA= +k8s.io/component-helpers v0.26.4/go.mod h1:2Siz5eWmaKu0khASXMTCfJuASZAbCPX9mtjlCe5IWRs= k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= diff --git a/pkg/ipam-controller/allocator/allocator.go b/pkg/ipam-controller/allocator/allocator.go index 97ea52a..f5155df 100644 --- a/pkg/ipam-controller/allocator/allocator.go +++ b/pkg/ipam-controller/allocator/allocator.go @@ -15,7 +15,6 @@ package allocator import ( "context" - "encoding/json" "errors" "fmt" "math" @@ -29,6 +28,8 @@ import ( "github.com/Mellanox/nvidia-k8s-ipam/pkg/ip" "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" + + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" ) var ErrNoFreeRanges = errors.New("no free IP ranges available") @@ -141,6 +142,11 @@ func (pa *poolAllocator) Load(ctx context.Context, allocData nodeAllocationInfo) log.Info("range check failed", "reason", err.Error()) return err } + a, ok := pa.allocations[allocData.Node] + if ok && allocData.allocatedRange.StartIP.Equal(a.StartIP) { + // Node Allocation already exists and is the same + return nil + } allocations := pa.getAllocationsAsSlice() for _, a := range allocations { // range size is always the same, then an overlap means the blocks are necessarily equal. @@ -200,6 +206,7 @@ type AllocationConfig struct { Subnet *net.IPNet Gateway net.IP PerNodeBlockSize int + NodeSelector *corev1.NodeSelector } func (pc *AllocationConfig) Equal(other *AllocationConfig) bool { @@ -214,82 +221,70 @@ func New() *Allocator { type Allocator struct { lock sync.Mutex allocators map[string]*poolAllocator - configured bool } -// IsConfigured returns true if allocator is configured -func (a *Allocator) IsConfigured() bool { +// IsPoolLoaded returns true if allocator is configured +func (a *Allocator) IsPoolLoaded(poolName string) bool { a.lock.Lock() defer a.lock.Unlock() - return a.configured + _, exist := a.allocators[poolName] + return exist } // Configure update allocator configuration -func (a *Allocator) Configure(ctx context.Context, configs []AllocationConfig) { +func (a *Allocator) Configure(ctx context.Context, config AllocationConfig) { a.lock.Lock() defer a.lock.Unlock() - a.configure(ctx, configs) + a.configureSinglePool(ctx, config) } // ConfigureAndLoadAllocations configures allocator and load data from the node objects -func (a *Allocator) ConfigureAndLoadAllocations(ctx context.Context, configs []AllocationConfig, nodes []corev1.Node) { +func (a *Allocator) ConfigureAndLoadAllocations(ctx context.Context, config AllocationConfig, p *ipamv1alpha1.IPPool) { log := logr.FromContextOrDiscard(ctx) a.lock.Lock() defer a.lock.Unlock() - a.configure(ctx, configs) - for i := range nodes { - node := nodes[i] - nodeLog := log.WithValues("node", node.Name) - poolCfg, err := pool.NewConfigReader(&node) + a.configureSinglePool(ctx, config) + poolData := a.allocators[config.PoolName] + for i := range p.Status.Allocations { + alloc := p.Status.Allocations[i] + p := &pool.IPPool{ + Name: p.Name, + Subnet: p.Spec.Subnet, + Gateway: p.Spec.Gateway, + StartIP: alloc.StartIP, + EndIP: alloc.EndIP, + } + allocInfo, err := ipPoolConfigToNodeAllocationInfo(alloc.NodeName, p) + logErr := func(err error) { + log.Info("ignore allocation info from node", "node", alloc.NodeName, + "pool", p.Name, "reason", err.Error()) + } if err != nil { - nodeLog.Info("skip loading data from the node", "reason", err.Error()) + logErr(err) continue } - // load allocators only for know pools (pools which are defined in the config) - for poolName, poolData := range a.allocators { - nodeIPPoolConfig := poolCfg.GetPoolByName(poolName) - allocInfo, err := ipPoolConfigToNodeAllocationInfo(node.Name, nodeIPPoolConfig) - logErr := func(err error) { - nodeLog.Info("ignore allocation info from node", - "pool", poolName, "reason", err.Error()) - } - if err != nil { - logErr(err) - continue - } - - if err := poolData.Load(ctx, allocInfo); err != nil { - logErr(err) - continue - } - a.allocators[poolName] = poolData + if err := poolData.Load(ctx, allocInfo); err != nil { + logErr(err) + continue } } - a.configured = true } -// Allocate allocates ranges for node from all pools -func (a *Allocator) Allocate(ctx context.Context, nodeName string) (map[string]*pool.IPPool, error) { - log := logr.FromContextOrDiscard(ctx).WithValues("node", nodeName) +// AllocateFromPool allocates ranges for node from specific pool +func (a *Allocator) AllocateFromPool(ctx context.Context, poolName string, nodeName string) (*pool.IPPool, error) { a.lock.Lock() defer a.lock.Unlock() - nodeAllocations := make(map[string]*pool.IPPool, len(a.allocators)) - for poolName, allocator := range a.allocators { - allocation, err := allocator.Allocate(ctx, nodeName) - if err != nil { - a.deallocate(ctx, nodeName) - return nil, err - } - nodeAllocations[poolName] = nodeAllocationInfoToIPPoolConfig(poolName, allocation) + allocator, ok := a.allocators[poolName] + if !ok { + return nil, fmt.Errorf("fail to allocate, pool %s does not exists", poolName) } - - if log.V(1).Enabled() { - //nolint:errchkjson - dump, _ := json.Marshal(nodeAllocations) - log.V(1).Info("allocated ranges", "ranges", dump) + allocation, err := allocator.Allocate(ctx, nodeName) + if err != nil { + a.deallocate(ctx, nodeName) + return nil, err } - return nodeAllocations, nil + return nodeAllocationInfoToIPPoolConfig(poolName, allocation), nil } func (a *Allocator) deallocate(ctx context.Context, nodeName string) { @@ -305,32 +300,36 @@ func (a *Allocator) Deallocate(ctx context.Context, nodeName string) { a.deallocate(ctx, nodeName) } -func (a *Allocator) configure(ctx context.Context, configs []AllocationConfig) { - log := logr.FromContextOrDiscard(ctx) - for _, cfg := range configs { - poolLog := log.WithValues("pool", cfg.PoolName, - "gateway", cfg.Gateway.String(), "subnet", cfg.Subnet.String(), "perNodeBlockSize", cfg.PerNodeBlockSize) - pAlloc, exist := a.allocators[cfg.PoolName] - if exist { - poolLog.Info("update IP pool allocator config") - pAlloc.Configure(ctx, cfg) - } else { - poolLog.Info("initialize IP pool allocator") - a.allocators[cfg.PoolName] = newPoolAllocator(cfg) - } +// Deallocate release all ranges allocated for node +func (a *Allocator) DeallocateFromPool(ctx context.Context, poolName string, nodeName string) { + a.lock.Lock() + defer a.lock.Unlock() + pAlloc, exist := a.allocators[poolName] + if exist { + pAlloc.Deallocate(ctx, nodeName) } - // remove outdated pools from controller state - for poolName := range a.allocators { - found := false - for _, cfg := range configs { - if poolName == cfg.PoolName { - found = true - break - } - } - if !found { - delete(a.allocators, poolName) - } +} + +// Remove Pool from Allocator +func (a *Allocator) RemovePool(ctx context.Context, poolName string) { + log := logr.FromContextOrDiscard(ctx) + a.lock.Lock() + defer a.lock.Unlock() + log.Info("remove IP Pool", "pool", poolName) + delete(a.allocators, poolName) +} + +func (a *Allocator) configureSinglePool(ctx context.Context, cfg AllocationConfig) { + log := logr.FromContextOrDiscard(ctx) + poolLog := log.WithValues("pool", cfg.PoolName, + "gateway", cfg.Gateway.String(), "subnet", cfg.Subnet.String(), "perNodeBlockSize", cfg.PerNodeBlockSize) + pAlloc, exist := a.allocators[cfg.PoolName] + if exist { + poolLog.Info("update IP pool allocator config") + pAlloc.Configure(ctx, cfg) + } else { + poolLog.Info("initialize IP pool allocator") + a.allocators[cfg.PoolName] = newPoolAllocator(cfg) } } diff --git a/pkg/ipam-controller/allocator/allocator_test.go b/pkg/ipam-controller/allocator/allocator_test.go index eae1dc1..5cfb17b 100644 --- a/pkg/ipam-controller/allocator/allocator_test.go +++ b/pkg/ipam-controller/allocator/allocator_test.go @@ -20,8 +20,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/allocator" "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" ) @@ -66,11 +67,11 @@ var _ = Describe("Allocator", func() { ctx = context.Background() }) - It("Allocated/Deallocate without config", func() { + It("Allocated/Deallocate not existing pool", func() { a := allocator.New() - alloc, err := a.Allocate(ctx, testNodeName1) - Expect(alloc).To(BeEmpty()) - Expect(err).To(BeNil()) + alloc, err := a.AllocateFromPool(ctx, "ghost", testNodeName1) + Expect(alloc).To(BeNil()) + Expect(err).To(HaveOccurred()) a.Deallocate(ctx, testNodeName1) }) @@ -78,53 +79,92 @@ var _ = Describe("Allocator", func() { pool1 := getPool1Config() pool2 := getPool2Config() a := allocator.New() - a.Configure(ctx, []allocator.AllocationConfig{pool1, pool2}) - node1Alloc, err := a.Allocate(ctx, testNodeName1) + a.Configure(ctx, pool1) + a.Configure(ctx, pool2) + node1AllocPool1, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + node1AllocPool2, err := a.AllocateFromPool(ctx, testPoolName2, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + Expect(node1AllocPool1.StartIP).To(BeEquivalentTo("192.168.0.1")) + Expect(node1AllocPool1.EndIP).To(BeEquivalentTo("192.168.0.15")) + Expect(node1AllocPool2.StartIP).To(BeEquivalentTo("172.16.0.1")) + Expect(node1AllocPool2.EndIP).To(BeEquivalentTo("172.16.0.10")) + + node1AllocSecondCall, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName1) + Expect(err).NotTo(HaveOccurred()) + Expect(node1AllocSecondCall).To(Equal(node1AllocPool1)) + + node1AllocSecondCall, err = a.AllocateFromPool(ctx, testPoolName2, testNodeName1) Expect(err).NotTo(HaveOccurred()) - Expect(node1Alloc[testPoolName1].StartIP).To(BeEquivalentTo("192.168.0.1")) - Expect(node1Alloc[testPoolName1].EndIP).To(BeEquivalentTo("192.168.0.15")) - Expect(node1Alloc[testPoolName2].StartIP).To(BeEquivalentTo("172.16.0.1")) - Expect(node1Alloc[testPoolName2].EndIP).To(BeEquivalentTo("172.16.0.10")) + Expect(node1AllocSecondCall).To(Equal(node1AllocPool2)) - node1AllocSecondCall, err := a.Allocate(ctx, testNodeName1) + node2AllocPool1, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName2) + Expect(err).NotTo(HaveOccurred()) + node2AllocPool2, err := a.AllocateFromPool(ctx, testPoolName2, testNodeName2) + Expect(err).NotTo(HaveOccurred()) + Expect(node2AllocPool1.StartIP).To(BeEquivalentTo("192.168.0.16")) + Expect(node2AllocPool1.EndIP).To(BeEquivalentTo("192.168.0.30")) + Expect(node2AllocPool2.StartIP).To(BeEquivalentTo("172.16.0.11")) + Expect(node2AllocPool2.EndIP).To(BeEquivalentTo("172.16.0.20")) + + node3AllocPool1, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName3) + Expect(err).NotTo(HaveOccurred()) + node3AllocPool2, err := a.AllocateFromPool(ctx, testPoolName2, testNodeName3) Expect(err).NotTo(HaveOccurred()) - Expect(node1AllocSecondCall).To(Equal(node1Alloc)) + Expect(node3AllocPool1.StartIP).To(BeEquivalentTo("192.168.0.31")) + Expect(node3AllocPool1.EndIP).To(BeEquivalentTo("192.168.0.45")) + Expect(node3AllocPool2.StartIP).To(BeEquivalentTo("172.16.0.21")) + Expect(node3AllocPool2.EndIP).To(BeEquivalentTo("172.16.0.30")) - node2Alloc, err := a.Allocate(ctx, testNodeName2) + node4AllocPool1, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName4) Expect(err).NotTo(HaveOccurred()) - Expect(node2Alloc[testPoolName1].StartIP).To(BeEquivalentTo("192.168.0.16")) - Expect(node2Alloc[testPoolName1].EndIP).To(BeEquivalentTo("192.168.0.30")) - Expect(node2Alloc[testPoolName2].StartIP).To(BeEquivalentTo("172.16.0.11")) - Expect(node2Alloc[testPoolName2].EndIP).To(BeEquivalentTo("172.16.0.20")) - - node3Alloc, err := a.Allocate(ctx, testNodeName3) - Expect(node3Alloc[testPoolName1].StartIP).To(BeEquivalentTo("192.168.0.31")) - Expect(node3Alloc[testPoolName1].EndIP).To(BeEquivalentTo("192.168.0.45")) - Expect(node3Alloc[testPoolName2].StartIP).To(BeEquivalentTo("172.16.0.21")) - Expect(node3Alloc[testPoolName2].EndIP).To(BeEquivalentTo("172.16.0.30")) - - node4Alloc, err := a.Allocate(ctx, testNodeName4) - Expect(node4Alloc[testPoolName1].StartIP).To(BeEquivalentTo("192.168.0.46")) - Expect(node4Alloc[testPoolName1].EndIP).To(BeEquivalentTo("192.168.0.60")) - Expect(node4Alloc[testPoolName2].StartIP).To(BeEquivalentTo("172.16.0.31")) - Expect(node4Alloc[testPoolName2].EndIP).To(BeEquivalentTo("172.16.0.40")) + node4AllocPool2, err := a.AllocateFromPool(ctx, testPoolName2, testNodeName4) + Expect(err).NotTo(HaveOccurred()) + Expect(node4AllocPool1.StartIP).To(BeEquivalentTo("192.168.0.46")) + Expect(node4AllocPool1.EndIP).To(BeEquivalentTo("192.168.0.60")) + Expect(node4AllocPool2.StartIP).To(BeEquivalentTo("172.16.0.31")) + Expect(node4AllocPool2.EndIP).To(BeEquivalentTo("172.16.0.40")) // deallocate for node3 and node1 a.Deallocate(ctx, testNodeName1) a.Deallocate(ctx, testNodeName3) // allocate again, testNodeName3 should have IPs from index 0, testNodeName3 IPs from index 2 - node3Alloc, err = a.Allocate(ctx, testNodeName3) - Expect(node3Alloc[testPoolName1].StartIP).To(BeEquivalentTo("192.168.0.1")) - Expect(node3Alloc[testPoolName1].EndIP).To(BeEquivalentTo("192.168.0.15")) - Expect(node3Alloc[testPoolName2].StartIP).To(BeEquivalentTo("172.16.0.1")) - Expect(node3Alloc[testPoolName2].EndIP).To(BeEquivalentTo("172.16.0.10")) - - node1Alloc, err = a.Allocate(ctx, testNodeName1) - Expect(node1Alloc[testPoolName1].StartIP).To(BeEquivalentTo("192.168.0.31")) - Expect(node1Alloc[testPoolName1].EndIP).To(BeEquivalentTo("192.168.0.45")) - Expect(node1Alloc[testPoolName2].StartIP).To(BeEquivalentTo("172.16.0.21")) - Expect(node1Alloc[testPoolName2].EndIP).To(BeEquivalentTo("172.16.0.30")) + node3AllocPool1, err = a.AllocateFromPool(ctx, testPoolName1, testNodeName3) + Expect(err).NotTo(HaveOccurred()) + node3AllocPool2, err = a.AllocateFromPool(ctx, testPoolName2, testNodeName3) + Expect(err).NotTo(HaveOccurred()) + Expect(node3AllocPool1.StartIP).To(BeEquivalentTo("192.168.0.1")) + Expect(node3AllocPool1.EndIP).To(BeEquivalentTo("192.168.0.15")) + Expect(node3AllocPool2.StartIP).To(BeEquivalentTo("172.16.0.1")) + Expect(node3AllocPool2.EndIP).To(BeEquivalentTo("172.16.0.10")) + + node1AllocPool1, err = a.AllocateFromPool(ctx, testPoolName1, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + node1AllocPool2, err = a.AllocateFromPool(ctx, testPoolName2, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + Expect(node1AllocPool1.StartIP).To(BeEquivalentTo("192.168.0.31")) + Expect(node1AllocPool1.EndIP).To(BeEquivalentTo("192.168.0.45")) + Expect(node1AllocPool2.StartIP).To(BeEquivalentTo("172.16.0.21")) + Expect(node1AllocPool2.EndIP).To(BeEquivalentTo("172.16.0.30")) + }) + + It("Deallocate from pool", func() { + pool1 := getPool1Config() + a := allocator.New() + a.Configure(ctx, pool1) + node1AllocPool1, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + Expect(node1AllocPool1.StartIP).To(BeEquivalentTo("192.168.0.1")) + Expect(node1AllocPool1.EndIP).To(BeEquivalentTo("192.168.0.15")) + + a.DeallocateFromPool(ctx, testPoolName1, testNodeName1) + + //Allocate to Node2, should get first range + node2AllocPool1, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName2) + Expect(err).NotTo(HaveOccurred()) + Expect(node2AllocPool1.StartIP).To(BeEquivalentTo("192.168.0.1")) + Expect(node2AllocPool1.EndIP).To(BeEquivalentTo("192.168.0.15")) }) It("No free ranges", func() { @@ -132,51 +172,63 @@ var _ = Describe("Allocator", func() { // pool is /24, must fail on the second allocation pool1.PerNodeBlockSize = 200 a := allocator.New() - a.Configure(ctx, []allocator.AllocationConfig{pool1}) - node1Alloc, err := a.Allocate(ctx, testNodeName1) + a.Configure(ctx, pool1) + node1Alloc, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName1) Expect(err).NotTo(HaveOccurred()) - Expect(node1Alloc).To(HaveLen(1)) + Expect(node1Alloc).NotTo(BeNil()) - _, err = a.Allocate(ctx, testNodeName2) + _, err = a.AllocateFromPool(ctx, testPoolName1, testNodeName2) Expect(errors.Is(err, allocator.ErrNoFreeRanges)).To(BeTrue()) }) + It("return NoFreeRanges in case if IP is too large", func() { _, subnet, _ := net.ParseCIDR("255.255.255.0/24") + testPool := "pool" a := allocator.New() - a.Configure(ctx, []allocator.AllocationConfig{{ - PoolName: "pool", + a.Configure(ctx, allocator.AllocationConfig{ + PoolName: testPool, Subnet: subnet, Gateway: net.ParseIP("255.255.255.1"), - PerNodeBlockSize: 200}}) - _, err := a.Allocate(ctx, testNodeName1) + PerNodeBlockSize: 200}) + _, err := a.AllocateFromPool(ctx, testPool, testNodeName1) Expect(err).NotTo(HaveOccurred()) - _, err = a.Allocate(ctx, testNodeName2) + _, err = a.AllocateFromPool(ctx, testPool, testNodeName2) Expect(errors.Is(err, allocator.ErrNoFreeRanges)).To(BeTrue()) }) It("Configure - reset allocations", func() { a := allocator.New() origConfig := getPool1Config() - a.Configure(ctx, []allocator.AllocationConfig{origConfig}) - _, err := a.Allocate(ctx, testNodeName1) + a.Configure(ctx, origConfig) + _, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName1) Expect(err).NotTo(HaveOccurred()) - node2Alloc, err := a.Allocate(ctx, testNodeName2) + node2Alloc, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName2) Expect(err).NotTo(HaveOccurred()) // update config with same configuration, should not reset allocations - a.Configure(ctx, []allocator.AllocationConfig{origConfig}) - node2AllocSecondCall, err := a.Allocate(ctx, testNodeName2) + a.Configure(ctx, origConfig) + node2AllocSecondCall, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName2) Expect(err).NotTo(HaveOccurred()) - Expect(node2AllocSecondCall[testPoolName1].StartIP).To(Equal(node2Alloc[testPoolName1].StartIP)) + Expect(node2AllocSecondCall.StartIP).To(Equal(node2Alloc.StartIP)) // reset config newCfg := origConfig newCfg.Gateway = net.ParseIP("192.168.0.2") - a.Configure(ctx, []allocator.AllocationConfig{newCfg}) - node2AllocThirdCall, err := a.Allocate(ctx, testNodeName2) + a.Configure(ctx, newCfg) + node2AllocThirdCall, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName2) Expect(err).NotTo(HaveOccurred()) // allocation begins from the start of the subnet - Expect(node2AllocThirdCall[testPoolName1].StartIP).NotTo(Equal(node2Alloc[testPoolName1].StartIP)) + Expect(node2AllocThirdCall.StartIP).NotTo(Equal(node2Alloc.StartIP)) + }) + + It("Remove Pool", func() { + pool1 := getPool1Config() + a := allocator.New() + Expect(a.IsPoolLoaded(testPoolName1)).To(BeFalse()) + a.Configure(ctx, pool1) + Expect(a.IsPoolLoaded(testPoolName1)).To(BeTrue()) + a.RemovePool(ctx, testPoolName1) + Expect(a.IsPoolLoaded(testPoolName1)).To(BeFalse()) }) It("ConfigureAndLoadAllocations - Data load test", func() { @@ -302,32 +354,34 @@ var _ = Describe("Allocator", func() { } for _, test := range testCases { a := allocator.New() - Expect(a.IsConfigured()).To(BeFalse()) - pool1 := getPool1Config() + Expect(a.IsPoolLoaded(testPoolName1)).To(BeFalse()) - defNode := corev1.Node{} - defNode.SetName(testPoolName1) - defNodeAlloc := map[string]*pool.IPPool{ - testPoolName1: {Name: testPoolName1, - Subnet: "192.168.0.0/24", - StartIP: "192.168.0.1", - EndIP: "192.168.0.15", - Gateway: "192.168.0.1", - }, + pool1 := getPool1Config() + poolCR := &ipamv1alpha1.IPPool{ + ObjectMeta: v1.ObjectMeta{Name: testPoolName1}, + Spec: ipamv1alpha1.IPPoolSpec{Subnet: pool1.Subnet.String(), PerNodeBlockSize: pool1.PerNodeBlockSize, + Gateway: pool1.Gateway.String()}, + Status: ipamv1alpha1.IPPoolStatus{ + Allocations: []ipamv1alpha1.Allocation{ + { + NodeName: testPoolName1, + StartIP: "192.168.0.1", + EndIP: "192.168.0.15", + }, + { + NodeName: testNodeName2, + StartIP: test.in.StartIP, + EndIP: test.in.EndIP, + }, + }}, } - Expect(pool.SetIPBlockAnnotation(&defNode, defNodeAlloc)).NotTo(HaveOccurred()) + a.ConfigureAndLoadAllocations(ctx, pool1, poolCR) + Expect(a.IsPoolLoaded(testPoolName1)).To(BeTrue()) - testNodeAlloc := map[string]*pool.IPPool{ - testPoolName1: test.in, - } - testNode := corev1.Node{} - testNode.SetName(testNodeName2) - Expect(pool.SetIPBlockAnnotation(&testNode, testNodeAlloc)).NotTo(HaveOccurred()) + testNodeAlloc := test.in - a.ConfigureAndLoadAllocations(ctx, []allocator.AllocationConfig{pool1}, []corev1.Node{defNode, {}, testNode}) - Expect(a.IsConfigured()).To(BeTrue()) - node1AllocFromAllocator, err := a.Allocate(ctx, testNodeName2) + node1AllocFromAllocator, err := a.AllocateFromPool(ctx, testPoolName1, testNodeName2) Expect(err).NotTo(HaveOccurred()) if test.loaded { Expect(node1AllocFromAllocator).To(Equal(testNodeAlloc)) diff --git a/pkg/ipam-controller/config/config.go b/pkg/ipam-controller/config/config.go index 1f7f9f0..61e4556 100644 --- a/pkg/ipam-controller/config/config.go +++ b/pkg/ipam-controller/config/config.go @@ -58,34 +58,39 @@ func (c *Config) Validate() error { return errList.ToAggregate() } for poolName, pool := range c.Pools { - if err := cniUtils.ValidateNetworkName(poolName); err != nil { - return fmt.Errorf("invalid IP pool name %s, should be compatible with CNI network name", poolName) - } - _, network, err := net.ParseCIDR(pool.Subnet) - if err != nil { - return fmt.Errorf("IP pool %s contains invalid subnet: %v", poolName, err) - } + return ValidatePool(poolName, pool.Subnet, pool.Gateway, pool.PerNodeBlockSize) + } + return nil +} - if pool.PerNodeBlockSize < 2 { - return fmt.Errorf("perNodeBlockSize should be at least 2") - } +func ValidatePool(name string, subnet string, gateway string, blockSize int) error { + if err := cniUtils.ValidateNetworkName(name); err != nil { + return fmt.Errorf("invalid IP pool name %s, should be compatible with CNI network name", name) + } + _, network, err := net.ParseCIDR(subnet) + if err != nil { + return fmt.Errorf("IP pool %s contains invalid subnet: %v", name, err) + } - setBits, bitsTotal := network.Mask.Size() - // possibleIPs = net size - network address - broadcast - possibleIPs := int(math.Pow(2, float64(bitsTotal-setBits))) - 2 - if possibleIPs < pool.PerNodeBlockSize { - // config is not valid even if only one node exist in the cluster - return fmt.Errorf("IP pool subnet contains less available IPs then " + - "requested by perNodeBlockSize parameter") - } - parsedGW := net.ParseIP(pool.Gateway) - if len(parsedGW) == 0 { - return fmt.Errorf("IP pool contains invalid gateway configuration: invalid IP") - } - if !network.Contains(parsedGW) { - return fmt.Errorf("IP pool contains invalid gateway configuration: " + - "gateway is outside of the subnet") - } + if blockSize < 2 { + return fmt.Errorf("perNodeBlockSize should be at least 2") + } + + setBits, bitsTotal := network.Mask.Size() + // possibleIPs = net size - network address - broadcast + possibleIPs := int(math.Pow(2, float64(bitsTotal-setBits))) - 2 + if possibleIPs < blockSize { + // config is not valid even if only one node exist in the cluster + return fmt.Errorf("IP pool subnet contains less available IPs then " + + "requested by perNodeBlockSize parameter") + } + parsedGW := net.ParseIP(gateway) + if len(parsedGW) == 0 { + return fmt.Errorf("IP pool contains invalid gateway configuration: invalid IP") + } + if !network.Contains(parsedGW) { + return fmt.Errorf("IP pool contains invalid gateway configuration: " + + "gateway is outside of the subnet") } return nil } diff --git a/pkg/ipam-controller/controllers/configmap/configmap.go b/pkg/ipam-controller/controllers/configmap/configmap.go deleted file mode 100644 index 8e150cf..0000000 --- a/pkg/ipam-controller/controllers/configmap/configmap.go +++ /dev/null @@ -1,132 +0,0 @@ -/* - Copyright 2023, NVIDIA CORPORATION & AFFILIATES - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package controllers - -import ( - "context" - "encoding/json" - "fmt" - "net" - - corev1 "k8s.io/api/core/v1" - apiErrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/log" - - "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/allocator" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/config" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/selector" -) - -// ConfigMapReconciler reconciles ConfigMap objects -type ConfigMapReconciler struct { - Allocator *allocator.Allocator - Selector *selector.Selector - ConfigMapName string - ConfigMapNamespace string - - ConfigEventCh chan event.GenericEvent - client.Client - Scheme *runtime.Scheme -} - -// Reconcile contains logic to sync Node objects -func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - reqLog := log.FromContext(ctx) - if req.Name != r.ConfigMapName || req.Namespace != r.ConfigMapNamespace { - // this should never happen because of the watcher configuration of the manager from controller-runtime pkg - return ctrl.Result{}, nil - } - - cfg := &corev1.ConfigMap{} - err := r.Client.Get(ctx, req.NamespacedName, cfg) - if err != nil { - if apiErrors.IsNotFound(err) { - reqLog.Info("ConfigMap not found, wait for creation") - return ctrl.Result{}, nil - } - reqLog.Error(err, "failed to get ConfigMap object from the cache") - return ctrl.Result{}, err - } - - confData, exist := cfg.Data[config.ConfigMapKey] - if !exist { - reqLog.Error(nil, fmt.Sprintf("invalid configuration: ConfigMap %s doesn't contain %s key", - r.ConfigMapNamespace, config.ConfigMapKey)) - return ctrl.Result{}, nil - } - - controllerConfig := &config.Config{} - if err := json.Unmarshal([]byte(confData), controllerConfig); err != nil { - reqLog.Error(err, fmt.Sprintf("invalid configuration: ConfigMap %s contains invalid JSON", - config.ConfigMapKey)) - return ctrl.Result{}, nil - } - - if err := controllerConfig.Validate(); err != nil { - reqLog.Error(err, fmt.Sprintf("invalid configuration: ConfigMap %s contains invalid config", - config.ConfigMapKey)) - return ctrl.Result{}, nil - } - - r.Selector.Update(controllerConfig.NodeSelector) - - allocatorPools := make([]allocator.AllocationConfig, 0, len(controllerConfig.Pools)) - for pName, p := range controllerConfig.Pools { - // already validated by Validate function - _, subnet, _ := net.ParseCIDR(p.Subnet) - gateway := net.ParseIP(p.Gateway) - allocatorPools = append(allocatorPools, allocator.AllocationConfig{ - PoolName: pName, - Subnet: subnet, - Gateway: gateway, - PerNodeBlockSize: p.PerNodeBlockSize, - }) - } - - if r.Allocator.IsConfigured() { - r.Allocator.Configure(ctx, allocatorPools) - } else { - nodeList := &corev1.NodeList{} - if err := r.Client.List(ctx, nodeList); err != nil { - return ctrl.Result{}, err - } - r.Allocator.ConfigureAndLoadAllocations(ctx, allocatorPools, nodeList.Items) - } - - // config updated, trigger sync for all nodes - nodeList := &corev1.NodeList{} - if err := r.Client.List(ctx, nodeList); err != nil { - return ctrl.Result{}, err - } - for _, n := range nodeList.Items { - r.ConfigEventCh <- event.GenericEvent{ - Object: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: n.Name}, - }} - } - - return ctrl.Result{}, nil -} - -// SetupWithManager sets up the controller with the Manager. -func (r *ConfigMapReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&corev1.ConfigMap{}). - Complete(r) -} diff --git a/pkg/ipam-controller/controllers/node/node.go b/pkg/ipam-controller/controllers/node/node.go index 26ff442..65c381d 100644 --- a/pkg/ipam-controller/controllers/node/node.go +++ b/pkg/ipam-controller/controllers/node/node.go @@ -15,35 +15,27 @@ package controllers import ( "context" - "errors" - "reflect" - "time" apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" + + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" corev1 "k8s.io/api/core/v1" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/allocator" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/selector" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" ) // NodeReconciler reconciles Node objects type NodeReconciler struct { - Allocator *allocator.Allocator - Selector *selector.Selector - - ConfigEventCh chan event.GenericEvent + Allocator *allocator.Allocator + PoolsNamespace string + NodeEventCh chan event.GenericEvent client.Client Scheme *runtime.Scheme } @@ -51,74 +43,26 @@ type NodeReconciler struct { // Reconcile contains logic to sync Node objects func (r *NodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqLog := log.FromContext(ctx) - if !r.Allocator.IsConfigured() { - reqLog.V(1).Info("allocator is not yet configured, requeue") - return ctrl.Result{RequeueAfter: time.Second, Requeue: true}, nil - } - + reqLog.Info("Notification on Node", "name", req.Name) node := &corev1.Node{} err := r.Client.Get(ctx, req.NamespacedName, node) if err != nil { - if apiErrors.IsNotFound(err) { - reqLog.Info("node object removed, deallocate ranges") - r.Allocator.Deallocate(ctx, req.Name) - return ctrl.Result{}, nil + if !apiErrors.IsNotFound(err) { + return ctrl.Result{}, err } - return ctrl.Result{}, err - } - - if !r.Selector.Match(node) { - reqLog.Info("node doesn't match selector, ensure range is not allocated") - r.Allocator.Deallocate(ctx, node.Name) - return r.cleanAnnotation(ctx, node) + reqLog.Info("node object removed, deallocate ranges") + r.Allocator.Deallocate(ctx, req.Name) } - - var existingNodeAlloc map[string]*pool.IPPool - poolCfg, err := pool.NewConfigReader(node) - if err == nil { - existingNodeAlloc = poolCfg.GetPools() - } - - expectedAlloc, err := r.Allocator.Allocate(ctx, node.Name) - if err != nil { - if errors.Is(allocator.ErrNoFreeRanges, err) { - _, err := r.cleanAnnotation(ctx, node) - if err != nil { - return ctrl.Result{}, err - } - // keep retrying to allocated IP - return ctrl.Result{RequeueAfter: time.Second * 5}, nil - } + // node updated, trigger sync for all pools + poolList := &ipamv1alpha1.IPPoolList{} + if err := r.Client.List(ctx, poolList, client.InNamespace(r.PoolsNamespace)); err != nil { return ctrl.Result{}, err } - if reflect.DeepEqual(existingNodeAlloc, expectedAlloc) { - reqLog.Info("node ranges are up-to-date") - return ctrl.Result{}, nil - } - - if err := pool.SetIPBlockAnnotation(node, expectedAlloc); err != nil { - return ctrl.Result{}, err - } - - if err := r.Client.Update(ctx, node); err != nil { - reqLog.Info("failed to set annotation on the node object, deallocate ranges and retry", - "reason", err.Error()) - r.Allocator.Deallocate(ctx, node.Name) - return ctrl.Result{}, err - } - reqLog.Info("node object updated") - - return ctrl.Result{}, nil -} - -// remove annotation from the node object in the API -func (r *NodeReconciler) cleanAnnotation(ctx context.Context, node *corev1.Node) (ctrl.Result, error) { - if !pool.IPBlockAnnotationExists(node) { - return ctrl.Result{}, nil - } - pool.RemoveIPBlockAnnotation(node) - if err := r.Client.Update(ctx, node); err != nil { - return ctrl.Result{}, err + for _, p := range poolList.Items { + r.NodeEventCh <- event.GenericEvent{ + Object: &ipamv1alpha1.IPPool{ + ObjectMeta: metav1.ObjectMeta{Namespace: r.PoolsNamespace, Name: p.Name}, + }} } return ctrl.Result{}, nil } @@ -127,13 +71,5 @@ func (r *NodeReconciler) cleanAnnotation(ctx context.Context, node *corev1.Node) func (r *NodeReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&corev1.Node{}). - // catch notifications received through chan from ConfigMap controller - Watches(&source.Channel{Source: r.ConfigEventCh}, handler.Funcs{ - GenericFunc: func(e event.GenericEvent, q workqueue.RateLimitingInterface) { - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Namespace: e.Object.GetNamespace(), - Name: e.Object.GetName(), - }}) - }}). Complete(r) } diff --git a/pkg/ipam-controller/controllers/pool/pool.go b/pkg/ipam-controller/controllers/pool/pool.go new file mode 100644 index 0000000..12c4d96 --- /dev/null +++ b/pkg/ipam-controller/controllers/pool/pool.go @@ -0,0 +1,177 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "context" + "errors" + "net" + "sort" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + v1helper "k8s.io/component-helpers/scheduling/corev1" + + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/allocator" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/config" +) + +// PoolReconciler reconciles Pool objects +type PoolReconciler struct { + Allocator *allocator.Allocator + PoolsNamespace string + NodeEventCh chan event.GenericEvent + client.Client + Scheme *runtime.Scheme + recorder record.EventRecorder +} + +// Reconcile contains logic to sync Node objects +func (r *PoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqLog := log.FromContext(ctx) + if req.Namespace != r.PoolsNamespace { + // this should never happen because of the watcher configuration of the manager from controller-runtime pkg + return ctrl.Result{}, nil + } + + pool := &ipamv1alpha1.IPPool{} + err := r.Client.Get(ctx, req.NamespacedName, pool) + if err != nil { + if apiErrors.IsNotFound(err) { + reqLog.Info("Pool not found, removing from Allocator") + r.Allocator.RemovePool(ctx, req.Name) + return ctrl.Result{}, nil + } + reqLog.Error(err, "failed to get Pool object from the cache") + return ctrl.Result{}, err + } + reqLog.Info("Notification on Pool", "name", pool.Name) + + err = config.ValidatePool(pool.Name, pool.Spec.Subnet, pool.Spec.Gateway, pool.Spec.PerNodeBlockSize) + if err != nil { + return r.handleInvalidSpec(ctx, err, pool) + } + + // already validated by Validate function + _, subnet, _ := net.ParseCIDR(pool.Spec.Subnet) + gateway := net.ParseIP(pool.Spec.Gateway) + allocatorConfig := allocator.AllocationConfig{ + PoolName: pool.Name, + Subnet: subnet, + Gateway: gateway, + PerNodeBlockSize: pool.Spec.PerNodeBlockSize, + NodeSelector: pool.Spec.NodeSelector, + } + if !r.Allocator.IsPoolLoaded(pool.Name) { + r.Allocator.ConfigureAndLoadAllocations(ctx, allocatorConfig, pool) + } else { + r.Allocator.Configure(ctx, allocatorConfig) + } + + nodeList := &corev1.NodeList{} + if err := r.Client.List(ctx, nodeList); err != nil { + reqLog.Error(err, "failed to list Nodes") + return ctrl.Result{}, err + } + selectedNodes := make([]*corev1.Node, 0) + for i := range nodeList.Items { + node := nodeList.Items[i] + if pool.Spec.NodeSelector != nil { + match, err := v1helper.MatchNodeSelectorTerms(&node, pool.Spec.NodeSelector) + if err != nil { + reqLog.Error(err, "failed to match Node", "node", node.Name) + continue + } + if match { + selectedNodes = append(selectedNodes, &node) + } else { + reqLog.Info("node doesn't match selector, ensure range is not allocated", "node", node.Name) + r.Allocator.DeallocateFromPool(ctx, pool.Name, node.Name) + } + } else { + selectedNodes = append(selectedNodes, &node) + } + } + sort.Slice(selectedNodes, func(i, j int) bool { + return selectedNodes[i].Name < selectedNodes[j].Name + }) + + allocations := make([]ipamv1alpha1.Allocation, 0) + for i := range selectedNodes { + node := selectedNodes[i] + a, err := r.Allocator.AllocateFromPool(ctx, pool.Name, node.Name) + if err != nil { + if errors.Is(allocator.ErrNoFreeRanges, err) { + reqLog.Error(err, "failed to allocate IPs", "node", node.Name) + // keep retrying to allocated IP + return ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + return ctrl.Result{}, err + } + alloc := ipamv1alpha1.Allocation{ + NodeName: node.Name, + StartIP: a.StartIP, + EndIP: a.EndIP, + } + allocations = append(allocations, alloc) + } + pool.Status.Allocations = allocations + if err := r.Status().Update(ctx, pool); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *PoolReconciler) handleInvalidSpec(ctx context.Context, + err error, pool *ipamv1alpha1.IPPool) (reconcile.Result, error) { + reqLog := log.FromContext(ctx) + reqLog.Error(err, "invalid Pool Spec, clearing Status") + pool.Status.Allocations = make([]ipamv1alpha1.Allocation, 0) + if err2 := r.Status().Update(ctx, pool); err2 != nil { + return ctrl.Result{}, err2 + } + r.recorder.Event(pool, "Warning", "InvalidSpec", err.Error()) + return ctrl.Result{Requeue: false}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PoolReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.recorder = mgr.GetEventRecorderFor("IPPoolController") + return ctrl.NewControllerManagedBy(mgr). + For(&ipamv1alpha1.IPPool{}). + // catch notifications received through chan from Node controller + Watches(&source.Channel{Source: r.NodeEventCh}, handler.Funcs{ + GenericFunc: func(e event.GenericEvent, q workqueue.RateLimitingInterface) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: e.Object.GetNamespace(), + Name: e.Object.GetName(), + }}) + }}). + Complete(r) +} diff --git a/pkg/ipam-controller/selector/selector.go b/pkg/ipam-controller/selector/selector.go deleted file mode 100644 index 589496e..0000000 --- a/pkg/ipam-controller/selector/selector.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - Copyright 2023, NVIDIA CORPORATION & AFFILIATES - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package selector - -import ( - "sync" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" -) - -// New creates a new selector instance -func New() *Selector { - return &Selector{} -} - -// Selector holds labels selector -type Selector struct { - lock sync.RWMutex - selector map[string]string -} - -// Match check if selector match the node -func (s *Selector) Match(node *corev1.Node) bool { - s.lock.RLock() - defer s.lock.RUnlock() - if len(s.selector) == 0 { - return true - } - return labels.Set(s.selector).AsSelector().Matches(labels.Set(node.GetLabels())) -} - -// Update label selector -func (s *Selector) Update(newSelector map[string]string) { - s.lock.Lock() - defer s.lock.Unlock() - s.selector = newSelector -} diff --git a/pkg/ipam-controller/selector/selector_suite_test.go b/pkg/ipam-controller/selector/selector_suite_test.go deleted file mode 100644 index f81f3a2..0000000 --- a/pkg/ipam-controller/selector/selector_suite_test.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright 2023, NVIDIA CORPORATION & AFFILIATES - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package selector_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestSelector(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Selector Suite") -} diff --git a/pkg/ipam-controller/selector/selector_test.go b/pkg/ipam-controller/selector/selector_test.go deleted file mode 100644 index 7b21bbc..0000000 --- a/pkg/ipam-controller/selector/selector_test.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - Copyright 2023, NVIDIA CORPORATION & AFFILIATES - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package selector_test - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - - "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/selector" -) - -var _ = Describe("Selector", func() { - It("Empty selector - match all", func() { - s := selector.New() - Expect(s.Match(&corev1.Node{})).To(BeTrue()) - }) - It("Match", func() { - s := selector.New() - labels := map[string]string{"foo": "bar"} - s.Update(labels) - node := &corev1.Node{} - node.SetLabels(labels) - Expect(s.Match(node)).To(BeTrue()) - Expect(s.Match(&corev1.Node{})).To(BeFalse()) - s.Update(map[string]string{"foobar": "foobar"}) - Expect(s.Match(node)).To(BeFalse()) - }) -})