From 4eb203a6dc2f87fbee8bf6269e963b6361101d7d Mon Sep 17 00:00:00 2001 From: Kubernetes Prow Robot Date: Tue, 18 Aug 2020 20:25:11 -0700 Subject: [PATCH 1/4] Merge pull request #3312 from detiber/unstructured [cluster-autoscaler][clusterapi] Remove internal types in favor of unstructured --- .../clusterapi/clusterapi_controller.go | 515 ++++++------- .../clusterapi/clusterapi_controller_test.go | 728 ++++++++++++------ .../clusterapi/clusterapi_converters.go | 196 ----- .../clusterapi_machinedeployment.go | 152 ---- .../clusterapi/clusterapi_machineset.go | 138 ---- .../clusterapi/clusterapi_machineset_test.go | 150 ---- .../clusterapi/clusterapi_nodegroup.go | 46 +- .../clusterapi/clusterapi_nodegroup_test.go | 357 ++++----- .../clusterapi/clusterapi_provider.go | 39 +- .../clusterapi/clusterapi_provider_test.go | 7 +- .../clusterapi/clusterapi_scalableresource.go | 74 -- .../clusterapi/clusterapi_unstructured.go | 169 ++++ .../clusterapi_unstructured_test.go | 186 +++++ .../clusterapi/clusterapi_utils.go | 35 +- .../clusterapi/clusterapi_utils_test.go | 338 ++++---- .../cloudprovider/clusterapi/machine_types.go | 92 --- .../clusterapi/machinedeployment_types.go | 59 -- .../clusterapi/machineset_types.go | 81 -- .../clusterapi/zz_generated.deepcopy.go | 360 --------- .../client-go/discovery/cached/memory/BUILD | 47 ++ .../discovery/cached/memory/memcache.go | 243 ++++++ .../vendor/k8s.io/client-go/restmapper/BUILD | 57 ++ .../restmapper/category_expansion.go | 119 +++ .../k8s.io/client-go/restmapper/discovery.go | 338 ++++++++ .../k8s.io/client-go/restmapper/shortcut.go | 172 +++++ .../vendor/k8s.io/client-go/scale/fake/BUILD | 31 + .../k8s.io/client-go/scale/fake/client.go | 81 ++ cluster-autoscaler/vendor/modules.txt | 3 + 28 files changed, 2538 insertions(+), 2275 deletions(-) delete mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_converters.go delete mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go delete mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go delete mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset_test.go delete mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured_test.go delete mode 100644 cluster-autoscaler/cloudprovider/clusterapi/machine_types.go delete mode 100644 cluster-autoscaler/cloudprovider/clusterapi/machinedeployment_types.go delete mode 100644 cluster-autoscaler/cloudprovider/clusterapi/machineset_types.go delete mode 100644 cluster-autoscaler/cloudprovider/clusterapi/zz_generated.deepcopy.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/restmapper/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/restmapper/category_expansion.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/restmapper/discovery.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/restmapper/shortcut.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/scale/fake/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/scale/fake/client.go diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go index 6430b485c731..f495826e7072 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go @@ -17,7 +17,6 @@ limitations under the License. package clusterapi import ( - "context" "fmt" "os" "strings" @@ -27,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" @@ -34,9 +34,9 @@ import ( "k8s.io/client-go/informers" kubeinformers "k8s.io/client-go/informers" kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/scale" "k8s.io/client-go/tools/cache" klog "k8s.io/klog/v2" - "k8s.io/utils/pointer" ) const ( @@ -49,6 +49,9 @@ const ( resourceNameMachineSet = "machinesets" resourceNameMachineDeployment = "machinedeployments" failedMachinePrefix = "failed-machine-" + machineDeploymentKind = "MachineDeployment" + machineSetKind = "MachineSet" + machineKind = "Machine" ) // machineController watches for Nodes, Machines, MachineSets and @@ -56,21 +59,21 @@ const ( // cluster. Additionally, it adds indices to the node informers to // satisfy lookup by node.Spec.ProviderID. type machineController struct { - kubeInformerFactory kubeinformers.SharedInformerFactory - machineInformerFactory dynamicinformer.DynamicSharedInformerFactory - machineDeploymentInformer informers.GenericInformer - machineInformer informers.GenericInformer - machineSetInformer informers.GenericInformer - nodeInformer cache.SharedIndexInformer - dynamicclient dynamic.Interface - machineSetResource *schema.GroupVersionResource - machineResource *schema.GroupVersionResource - machineDeploymentResource *schema.GroupVersionResource - accessLock sync.Mutex + workloadInformerFactory kubeinformers.SharedInformerFactory + managementInformerFactory dynamicinformer.DynamicSharedInformerFactory + machineDeploymentInformer informers.GenericInformer + machineInformer informers.GenericInformer + machineSetInformer informers.GenericInformer + nodeInformer cache.SharedIndexInformer + managementClient dynamic.Interface + managementScaleClient scale.ScalesGetter + machineSetResource schema.GroupVersionResource + machineResource schema.GroupVersionResource + machineDeploymentResource schema.GroupVersionResource + machineDeploymentsAvailable bool + accessLock sync.Mutex } -type machineSetFilterFunc func(machineSet *MachineSet) error - func indexMachineByProviderID(obj interface{}) ([]string, error) { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -98,31 +101,20 @@ func indexNodeByProviderID(obj interface{}) ([]string, error) { return []string{}, nil } -func (c *machineController) findMachine(id string) (*Machine, error) { - item, exists, err := c.machineInformer.Informer().GetStore().GetByKey(id) - if err != nil { - return nil, err - } - - if !exists { - return nil, nil - } - - u, ok := item.(*unstructured.Unstructured) - if !ok { - return nil, fmt.Errorf("internal error; unexpected type: %T", item) - } +func (c *machineController) findMachine(id string) (*unstructured.Unstructured, error) { + return findResourceByKey(c.machineInformer.Informer().GetStore(), id) +} - machine := newMachineFromUnstructured(u.DeepCopy()) - if machine == nil { - return nil, nil - } +func (c *machineController) findMachineSet(id string) (*unstructured.Unstructured, error) { + return findResourceByKey(c.machineSetInformer.Informer().GetStore(), id) +} - return machine, nil +func (c *machineController) findMachineDeployment(id string) (*unstructured.Unstructured, error) { + return findResourceByKey(c.machineDeploymentInformer.Informer().GetStore(), id) } -func (c *machineController) findMachineDeployment(id string) (*MachineDeployment, error) { - item, exists, err := c.machineDeploymentInformer.Informer().GetStore().GetByKey(id) +func findResourceByKey(store cache.Store, key string) (*unstructured.Unstructured, error) { + item, exists, err := store.GetByKey(key) if err != nil { return nil, err } @@ -136,62 +128,45 @@ func (c *machineController) findMachineDeployment(id string) (*MachineDeployment return nil, fmt.Errorf("internal error; unexpected type: %T", item) } - machineDeployment := newMachineDeploymentFromUnstructured(u.DeepCopy()) - if machineDeployment == nil { - return nil, nil - } - - return machineDeployment, nil + return u.DeepCopy(), nil } // findMachineOwner returns the machine set owner for machine, or nil // if there is no owner. A DeepCopy() of the object is returned on // success. -func (c *machineController) findMachineOwner(machine *Machine) (*MachineSet, error) { +func (c *machineController) findMachineOwner(machine *unstructured.Unstructured) (*unstructured.Unstructured, error) { machineOwnerRef := machineOwnerRef(machine) if machineOwnerRef == nil { return nil, nil } - store := c.machineSetInformer.Informer().GetStore() - item, exists, err := store.GetByKey(fmt.Sprintf("%s/%s", machine.Namespace, machineOwnerRef.Name)) - if err != nil { - return nil, err - } - if !exists { - return nil, nil - } - - u, ok := item.(*unstructured.Unstructured) - if !ok { - return nil, fmt.Errorf("internal error; unexpected type: %T", item) - } - - u = u.DeepCopy() - machineSet := newMachineSetFromUnstructured(u) - if machineSet == nil { - return nil, nil - } + return c.findMachineSet(fmt.Sprintf("%s/%s", machine.GetNamespace(), machineOwnerRef.Name)) +} - if !machineIsOwnedByMachineSet(machine, machineSet) { +// findMachineSetOwner returns the owner for the machineSet, or nil +// if there is no owner. A DeepCopy() of the object is returned on +// success. +func (c *machineController) findMachineSetOwner(machineSet *unstructured.Unstructured) (*unstructured.Unstructured, error) { + machineSetOwnerRef := machineSetOwnerRef(machineSet) + if machineSetOwnerRef == nil { return nil, nil } - return machineSet, nil + return c.findMachineDeployment(fmt.Sprintf("%s/%s", machineSet.GetNamespace(), machineSetOwnerRef.Name)) } // run starts shared informers and waits for the informer cache to // synchronize. func (c *machineController) run(stopCh <-chan struct{}) error { - c.kubeInformerFactory.Start(stopCh) - c.machineInformerFactory.Start(stopCh) + c.workloadInformerFactory.Start(stopCh) + c.managementInformerFactory.Start(stopCh) syncFuncs := []cache.InformerSynced{ c.nodeInformer.HasSynced, c.machineInformer.Informer().HasSynced, c.machineSetInformer.Informer().HasSynced, } - if c.machineDeploymentResource != nil { + if c.machineDeploymentsAvailable { syncFuncs = append(syncFuncs, c.machineDeploymentInformer.Informer().HasSynced) } @@ -203,9 +178,43 @@ func (c *machineController) run(stopCh <-chan struct{}) error { return nil } +func (c *machineController) findScalableResourceByProviderID(providerID normalizedProviderID) (*unstructured.Unstructured, error) { + machine, err := c.findMachineByProviderID(providerID) + if err != nil { + return nil, err + } + + if machine == nil { + return nil, nil + } + + machineSet, err := c.findMachineOwner(machine) + if err != nil { + return nil, err + } + + if machineSet == nil { + return nil, nil + } + + if c.machineDeploymentsAvailable { + machineDeployment, err := c.findMachineSetOwner(machineSet) + if err != nil { + return nil, err + } + + // If a matching machineDeployment was found return it + if machineDeployment != nil { + return machineDeployment, nil + } + } + + return machineSet, nil +} + // findMachineByProviderID finds machine matching providerID. A // DeepCopy() of the object is returned on success. -func (c *machineController) findMachineByProviderID(providerID normalizedProviderID) (*Machine, error) { +func (c *machineController) findMachineByProviderID(providerID normalizedProviderID) (*unstructured.Unstructured, error) { objs, err := c.machineInformer.Informer().GetIndexer().ByIndex(machineProviderIDIndex, string(providerID)) if err != nil { return nil, err @@ -219,20 +228,11 @@ func (c *machineController) findMachineByProviderID(providerID normalizedProvide if !ok { return nil, fmt.Errorf("internal error; unexpected type %T", objs[0]) } - machine := newMachineFromUnstructured(u.DeepCopy()) - if machine != nil { - return machine, nil - } + return u.DeepCopy(), nil } if isFailedMachineProviderID(providerID) { - machine, err := c.findMachine(machineKeyFromFailedProviderID(providerID)) - if err != nil { - return nil, err - } - if machine != nil { - return machine.DeepCopy(), nil - } + return c.findMachine(machineKeyFromFailedProviderID(providerID)) } // If the machine object has no providerID--maybe actuator @@ -280,29 +280,6 @@ func (c *machineController) findNodeByNodeName(name string) (*corev1.Node, error return node.DeepCopy(), nil } -// machinesInMachineSet returns all the machines that belong to -// machineSet. For each machine in the set a DeepCopy() of the object -// is returned. -func (c *machineController) machinesInMachineSet(machineSet *MachineSet) ([]*Machine, error) { - machines, err := c.listMachines(machineSet.Namespace, labels.SelectorFromSet(machineSet.Labels)) - if err != nil { - return nil, err - } - if machines == nil { - return nil, nil - } - - var result []*Machine - - for _, machine := range machines { - if machineIsOwnedByMachineSet(machine, machineSet) { - result = append(result, machine) - } - } - - return result, nil -} - // getCAPIGroup returns a string that specifies the group for the API. // It will return either the value from the // CAPI_GROUP environment variable, or the default value i.e cluster.x-k8s.io. @@ -319,57 +296,58 @@ func getCAPIGroup() string { // Machines and MachineSet as they are added, updated and deleted on // the cluster. func newMachineController( - dynamicclient dynamic.Interface, - kubeclient kubeclient.Interface, - discoveryclient discovery.DiscoveryInterface, + managementClient dynamic.Interface, + workloadClient kubeclient.Interface, + managementDiscoveryClient discovery.DiscoveryInterface, + managementScaleClient scale.ScalesGetter, ) (*machineController, error) { - kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeclient, 0) - informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicclient, 0, metav1.NamespaceAll, nil) + workloadInformerFactory := kubeinformers.NewSharedInformerFactory(workloadClient, 0) + managementInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(managementClient, 0, metav1.NamespaceAll, nil) CAPIGroup := getCAPIGroup() - CAPIVersion, err := getAPIGroupPreferredVersion(discoveryclient, CAPIGroup) + CAPIVersion, err := getAPIGroupPreferredVersion(managementDiscoveryClient, CAPIGroup) if err != nil { return nil, fmt.Errorf("could not find preferred version for CAPI group %q: %v", CAPIGroup, err) } klog.Infof("Using version %q for API group %q", CAPIVersion, CAPIGroup) - var gvrMachineDeployment *schema.GroupVersionResource + var gvrMachineDeployment schema.GroupVersionResource var machineDeploymentInformer informers.GenericInformer - machineDeployment, err := groupVersionHasResource(discoveryclient, + machineDeploymentAvailable, err := groupVersionHasResource(managementDiscoveryClient, fmt.Sprintf("%s/%s", CAPIGroup, CAPIVersion), resourceNameMachineDeployment) if err != nil { return nil, fmt.Errorf("failed to validate if resource %q is available for group %q: %v", resourceNameMachineDeployment, fmt.Sprintf("%s/%s", CAPIGroup, CAPIVersion), err) } - if machineDeployment { - gvrMachineDeployment = &schema.GroupVersionResource{ + if machineDeploymentAvailable { + gvrMachineDeployment = schema.GroupVersionResource{ Group: CAPIGroup, Version: CAPIVersion, Resource: resourceNameMachineDeployment, } - machineDeploymentInformer = informerFactory.ForResource(*gvrMachineDeployment) + machineDeploymentInformer = managementInformerFactory.ForResource(gvrMachineDeployment) machineDeploymentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{}) } - gvrMachineSet := &schema.GroupVersionResource{ + gvrMachineSet := schema.GroupVersionResource{ Group: CAPIGroup, Version: CAPIVersion, Resource: resourceNameMachineSet, } - machineSetInformer := informerFactory.ForResource(*gvrMachineSet) + machineSetInformer := managementInformerFactory.ForResource(gvrMachineSet) machineSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{}) - gvrMachine := &schema.GroupVersionResource{ + gvrMachine := schema.GroupVersionResource{ Group: CAPIGroup, Version: CAPIVersion, Resource: resourceNameMachine, } - machineInformer := informerFactory.ForResource(*gvrMachine) + machineInformer := managementInformerFactory.ForResource(gvrMachine) machineInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{}) - nodeInformer := kubeInformerFactory.Core().V1().Nodes().Informer() + nodeInformer := workloadInformerFactory.Core().V1().Nodes().Informer() nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{}) if err := machineInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{ @@ -385,16 +363,18 @@ func newMachineController( } return &machineController{ - kubeInformerFactory: kubeInformerFactory, - machineInformerFactory: informerFactory, - machineDeploymentInformer: machineDeploymentInformer, - machineInformer: machineInformer, - machineSetInformer: machineSetInformer, - nodeInformer: nodeInformer, - dynamicclient: dynamicclient, - machineSetResource: gvrMachineSet, - machineResource: gvrMachine, - machineDeploymentResource: gvrMachineDeployment, + workloadInformerFactory: workloadInformerFactory, + managementInformerFactory: managementInformerFactory, + machineDeploymentInformer: machineDeploymentInformer, + machineInformer: machineInformer, + machineSetInformer: machineSetInformer, + nodeInformer: nodeInformer, + managementClient: managementClient, + managementScaleClient: managementScaleClient, + machineSetResource: gvrMachineSet, + machineResource: gvrMachine, + machineDeploymentResource: gvrMachineDeployment, + machineDeploymentsAvailable: machineDeploymentAvailable, }, nil } @@ -428,179 +408,125 @@ func getAPIGroupPreferredVersion(client discovery.DiscoveryInterface, APIGroup s return "", fmt.Errorf("failed to find API group %q", APIGroup) } -func (c *machineController) machineSetProviderIDs(machineSet *MachineSet) ([]string, error) { - machines, err := c.machinesInMachineSet(machineSet) +func (c *machineController) scalableResourceProviderIDs(scalableResource *unstructured.Unstructured) ([]string, error) { + machines, err := c.listMachinesForScalableResource(scalableResource) if err != nil { return nil, fmt.Errorf("error listing machines: %v", err) } var providerIDs []string for _, machine := range machines { - if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { - klog.Warningf("Machine %q has no providerID", machine.Name) + providerID, found, err := unstructured.NestedString(machine.Object, "spec", "providerID") + if err != nil { + return nil, err } - if machine.Spec.ProviderID != nil && *machine.Spec.ProviderID != "" { - providerIDs = append(providerIDs, *machine.Spec.ProviderID) - continue + if found { + if providerID != "" { + providerIDs = append(providerIDs, providerID) + continue + } } - if machine.Status.FailureMessage != nil { - klog.V(4).Infof("Status.FailureMessage of machine %q is %q", machine.Name, *machine.Status.FailureMessage) + klog.Warningf("Machine %q has no providerID", machine.GetName()) + + failureMessage, found, err := unstructured.NestedString(machine.Object, "status", "failureMessage") + if err != nil { + return nil, err + } + + if found { + klog.V(4).Infof("Status.FailureMessage of machine %q is %q", machine.GetName(), failureMessage) // Provide a fake ID to allow the autoscaler to track machines that will never // become nodes and mark the nodegroup unhealthy after maxNodeProvisionTime. // Fake ID needs to be recognised later and converted into a machine key. // Use an underscore as a separator between namespace and name as it is not a // valid character within a namespace name. - providerIDs = append(providerIDs, fmt.Sprintf("%s%s_%s", failedMachinePrefix, machine.Namespace, machine.Name)) + providerIDs = append(providerIDs, fmt.Sprintf("%s%s_%s", failedMachinePrefix, machine.GetNamespace(), machine.GetName())) continue } - if machine.Status.NodeRef == nil { - klog.V(4).Infof("Status.NodeRef of machine %q is currently nil", machine.Name) - continue + _, found, err = unstructured.NestedFieldCopy(machine.Object, "status", "nodeRef") + if err != nil { + return nil, err } - if machine.Status.NodeRef.Kind != "Node" { - klog.Errorf("Status.NodeRef of machine %q does not reference a node (rather %q)", machine.Name, machine.Status.NodeRef.Kind) + if !found { + klog.V(4).Infof("Status.NodeRef of machine %q is currently nil", machine.GetName()) continue } - node, err := c.findNodeByNodeName(machine.Status.NodeRef.Name) + nodeRefKind, found, err := unstructured.NestedString(machine.Object, "status", "nodeRef", "kind") if err != nil { - return nil, fmt.Errorf("unknown node %q", machine.Status.NodeRef.Name) + return nil, err } - if node != nil { - providerIDs = append(providerIDs, node.Spec.ProviderID) + if found && nodeRefKind != "Node" { + klog.Errorf("Status.NodeRef of machine %q does not reference a node (rather %q)", machine.GetName(), nodeRefKind) + continue } - } - klog.V(4).Infof("nodegroup %s has nodes %v", machineSet.Name, providerIDs) - return providerIDs, nil -} - -func (c *machineController) filterAllMachineSets(f machineSetFilterFunc) error { - return c.filterMachineSets(metav1.NamespaceAll, f) -} - -func (c *machineController) filterMachineSets(namespace string, f machineSetFilterFunc) error { - machineSets, err := c.listMachineSets(namespace, labels.Everything()) - if err != nil { - return nil - } - for _, machineSet := range machineSets { - if err := f(machineSet); err != nil { - return err + nodeRefName, found, err := unstructured.NestedString(machine.Object, "status", "nodeRef", "name") + if err != nil { + return nil, err } - } - return nil -} -func (c *machineController) machineSetNodeGroups() ([]*nodegroup, error) { - var nodegroups []*nodegroup + if found { + node, err := c.findNodeByNodeName(nodeRefName) + if err != nil { + return nil, fmt.Errorf("unknown node %q", nodeRefName) + } - if err := c.filterAllMachineSets(func(machineSet *MachineSet) error { - if machineSetHasMachineDeploymentOwnerRef(machineSet) { - return nil - } - ng, err := newNodegroupFromMachineSet(c, machineSet) - if err != nil { - return err - } - if ng.MaxSize()-ng.MinSize() > 0 && pointer.Int32PtrDerefOr(machineSet.Spec.Replicas, 0) > 0 { - nodegroups = append(nodegroups, ng) + if node != nil { + providerIDs = append(providerIDs, node.Spec.ProviderID) + } } - return nil - }); err != nil { - return nil, err } - return nodegroups, nil + klog.V(4).Infof("nodegroup %s has nodes %v", scalableResource.GetName(), providerIDs) + + return providerIDs, nil } -func (c *machineController) machineDeploymentNodeGroups() ([]*nodegroup, error) { - machineDeployments, err := c.listMachineDeployments(metav1.NamespaceAll, labels.Everything()) +func (c *machineController) nodeGroups() ([]*nodegroup, error) { + scalableResources, err := c.listScalableResources() if err != nil { return nil, err } - var nodegroups []*nodegroup + nodegroups := make([]*nodegroup, 0, len(scalableResources)) - for _, md := range machineDeployments { - ng, err := newNodegroupFromMachineDeployment(c, md) + for _, r := range scalableResources { + ng, err := newNodegroupFromScalableResource(c, r) if err != nil { return nil, err } - // add nodegroup iff it has the capacity to scale - if ng.MaxSize()-ng.MinSize() > 0 && pointer.Int32PtrDerefOr(md.Spec.Replicas, 0) > 0 { - nodegroups = append(nodegroups, ng) - } - } - - return nodegroups, nil -} -func (c *machineController) nodeGroups() ([]*nodegroup, error) { - machineSets, err := c.machineSetNodeGroups() - if err != nil { - return nil, err - } + // add nodegroup iff it has the capacity to scale + if ng.MaxSize()-ng.MinSize() > 0 { + replicas, found, err := unstructured.NestedInt64(r.Object, "spec", "replicas") + if err != nil { + return nil, err + } - if c.machineDeploymentResource != nil { - machineDeployments, err := c.machineDeploymentNodeGroups() - if err != nil { - return nil, err + if found && replicas > 0 { + nodegroups = append(nodegroups, ng) + } } - machineSets = append(machineSets, machineDeployments...) } - - return machineSets, nil + return nodegroups, nil } func (c *machineController) nodeGroupForNode(node *corev1.Node) (*nodegroup, error) { - machine, err := c.findMachineByProviderID(normalizedProviderString(node.Spec.ProviderID)) - if err != nil { - return nil, err - } - if machine == nil { - return nil, nil - } - - machineSet, err := c.findMachineOwner(machine) + scalableResource, err := c.findScalableResourceByProviderID(normalizedProviderString(node.Spec.ProviderID)) if err != nil { return nil, err } - - if machineSet == nil { + if scalableResource == nil { return nil, nil } - if c.machineDeploymentResource != nil { - if ref := machineSetMachineDeploymentRef(machineSet); ref != nil { - key := fmt.Sprintf("%s/%s", machineSet.Namespace, ref.Name) - machineDeployment, err := c.findMachineDeployment(key) - if err != nil { - return nil, fmt.Errorf("unknown MachineDeployment %q: %v", key, err) - } - if machineDeployment == nil { - return nil, fmt.Errorf("unknown MachineDeployment %q", key) - } - nodegroup, err := newNodegroupFromMachineDeployment(c, machineDeployment) - if err != nil { - return nil, fmt.Errorf("failed to build nodegroup for node %q: %v", node.Name, err) - } - // We don't scale from 0 so nodes must belong - // to a nodegroup that has a scale size of at - // least 1. - if nodegroup.MaxSize()-nodegroup.MinSize() < 1 { - return nil, nil - } - return nodegroup, nil - } - } - - nodegroup, err := newNodegroupFromMachineSet(c, machineSet) + nodegroup, err := newNodegroupFromScalableResource(c, scalableResource) if err != nil { return nil, fmt.Errorf("failed to build nodegroup for node %q: %v", node.Name, err) } @@ -611,7 +537,7 @@ func (c *machineController) nodeGroupForNode(node *corev1.Node) (*nodegroup, err return nil, nil } - klog.V(4).Infof("node %q is in nodegroup %q", node.Name, machineSet.Name) + klog.V(4).Infof("node %q is in nodegroup %q", node.Name, nodegroup.Id()) return nodegroup, nil } @@ -639,80 +565,75 @@ func (c *machineController) findNodeByProviderID(providerID normalizedProviderID return node.DeepCopy(), nil } -func (c *machineController) getMachine(namespace, name string, options metav1.GetOptions) (*Machine, error) { - u, err := c.dynamicclient.Resource(*c.machineResource).Namespace(namespace).Get(context.TODO(), name, options) - if err != nil { - return nil, err - } - return newMachineFromUnstructured(u.DeepCopy()), nil -} - -func (c *machineController) getMachineSet(namespace, name string, options metav1.GetOptions) (*MachineSet, error) { - u, err := c.dynamicclient.Resource(*c.machineSetResource).Namespace(namespace).Get(context.TODO(), name, options) - if err != nil { - return nil, err - } - return newMachineSetFromUnstructured(u.DeepCopy()), nil -} - -func (c *machineController) getMachineDeployment(namespace, name string, options metav1.GetOptions) (*MachineDeployment, error) { - u, err := c.dynamicclient.Resource(*c.machineDeploymentResource).Namespace(namespace).Get(context.TODO(), name, options) - if err != nil { - return nil, err - } - return newMachineDeploymentFromUnstructured(u.DeepCopy()), nil -} +func (c *machineController) listMachinesForScalableResource(r *unstructured.Unstructured) ([]*unstructured.Unstructured, error) { + switch r.GetKind() { + case machineSetKind, machineDeploymentKind: + unstructuredSelector, found, err := unstructured.NestedMap(r.Object, "spec", "selector") + if err != nil { + return nil, err + } -func (c *machineController) listMachines(namespace string, selector labels.Selector) ([]*Machine, error) { - objs, err := c.machineInformer.Lister().ByNamespace(namespace).List(selector) - if err != nil { - return nil, err - } + if !found { + return nil, fmt.Errorf("expected field spec.selector on scalable resource type") + } - var machines []*Machine + labelSelector := &metav1.LabelSelector{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredSelector, labelSelector); err != nil { + return nil, err + } - for _, x := range objs { - u := x.(*unstructured.Unstructured).DeepCopy() - if machine := newMachineFromUnstructured(u); machine != nil { - machines = append(machines, machine) + selector, err := metav1.LabelSelectorAsSelector(labelSelector) + if err != nil { + return nil, err } - } - return machines, nil + return listResources(c.machineInformer.Lister().ByNamespace(r.GetNamespace()), selector) + default: + return nil, fmt.Errorf("unknown scalable resource kind %s", r.GetKind()) + } } -func (c *machineController) listMachineSets(namespace string, selector labels.Selector) ([]*MachineSet, error) { - objs, err := c.machineSetInformer.Lister().ByNamespace(namespace).List(selector) +func (c *machineController) listScalableResources() ([]*unstructured.Unstructured, error) { + scalableResources, err := c.listResources(c.machineSetInformer.Lister()) if err != nil { return nil, err } - var machineSets []*MachineSet - - for _, x := range objs { - u := x.(*unstructured.Unstructured).DeepCopy() - if machineSet := newMachineSetFromUnstructured(u); machineSet != nil { - machineSets = append(machineSets, machineSet) + if c.machineDeploymentsAvailable { + machineDeployments, err := c.listResources(c.machineDeploymentInformer.Lister()) + if err != nil { + return nil, err } + + scalableResources = append(scalableResources, machineDeployments...) } + return scalableResources, nil +} - return machineSets, nil +func (c *machineController) listResources(lister cache.GenericLister) ([]*unstructured.Unstructured, error) { + return listResources(lister.ByNamespace(metav1.NamespaceAll), labels.Everything()) } -func (c *machineController) listMachineDeployments(namespace string, selector labels.Selector) ([]*MachineDeployment, error) { - objs, err := c.machineDeploymentInformer.Lister().ByNamespace(namespace).List(selector) +func listResources(lister cache.GenericNamespaceLister, selector labels.Selector) ([]*unstructured.Unstructured, error) { + objs, err := lister.List(selector) if err != nil { return nil, err } - var machineDeployments []*MachineDeployment - + results := make([]*unstructured.Unstructured, 0, len(objs)) for _, x := range objs { - u := x.(*unstructured.Unstructured).DeepCopy() - if machineDeployment := newMachineDeploymentFromUnstructured(u); machineDeployment != nil { - machineDeployments = append(machineDeployments, machineDeployment) + u, ok := x.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("expected unstructured resource from lister, not %T", x) + } + + // if we are listing MachineSets, do not return MachineSets that are owned by a MachineDeployment + if u.GetKind() == machineSetKind && machineSetHasMachineDeploymentOwnerRef(u) { + continue } + + results = append(results, u.DeepCopy()) } - return machineDeployments, nil + return results, nil } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go index f498ca24cf20..e6af9af0cb3c 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go @@ -17,33 +17,39 @@ limitations under the License. package clusterapi import ( + "context" "fmt" + "math/rand" "os" "path" "reflect" "sort" - "strings" "testing" + "time" + autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime/schema" fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/dynamic" fakedynamic "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/informers" fakekube "k8s.io/client-go/kubernetes/fake" + fakescale "k8s.io/client-go/scale/fake" clientgotesting "k8s.io/client-go/testing" - "k8s.io/utils/pointer" ) type testControllerShutdownFunc func() type testConfig struct { spec *testSpec - machineDeployment *MachineDeployment - machineSet *MachineSet - machines []*Machine + machineDeployment *unstructured.Unstructured + machineSet *unstructured.Unstructured + machines []*unstructured.Unstructured nodes []*corev1.Node } @@ -70,12 +76,12 @@ func mustCreateTestController(t *testing.T, testConfigs ...*testConfig) (*machin } for i := range config.machines { - machineObjects = append(machineObjects, newUnstructuredFromMachine(config.machines[i])) + machineObjects = append(machineObjects, config.machines[i]) } - machineObjects = append(machineObjects, newUnstructuredFromMachineSet(config.machineSet)) + machineObjects = append(machineObjects, config.machineSet) if config.machineDeployment != nil { - machineObjects = append(machineObjects, newUnstructuredFromMachineDeployment(config.machineDeployment)) + machineObjects = append(machineObjects, config.machineDeployment) } } @@ -83,10 +89,10 @@ func mustCreateTestController(t *testing.T, testConfigs ...*testConfig) (*machin dynamicClientset := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(), machineObjects...) discoveryClient := &fakediscovery.FakeDiscovery{ Fake: &clientgotesting.Fake{ - Resources: []*v1.APIResourceList{ + Resources: []*metav1.APIResourceList{ { GroupVersion: fmt.Sprintf("%s/v1beta1", customCAPIGroup), - APIResources: []v1.APIResource{ + APIResources: []metav1.APIResource{ { Name: resourceNameMachineDeployment, }, @@ -100,7 +106,7 @@ func mustCreateTestController(t *testing.T, testConfigs ...*testConfig) (*machin }, { GroupVersion: fmt.Sprintf("%s/v1alpha3", defaultCAPIGroup), - APIResources: []v1.APIResource{ + APIResources: []metav1.APIResource{ { Name: resourceNameMachineDeployment, }, @@ -115,7 +121,98 @@ func mustCreateTestController(t *testing.T, testConfigs ...*testConfig) (*machin }, }, } - controller, err := newMachineController(dynamicClientset, kubeclientSet, discoveryClient) + + scaleClient := &fakescale.FakeScaleClient{Fake: clientgotesting.Fake{}} + scaleReactor := func(action clientgotesting.Action) (bool, runtime.Object, error) { + resource := action.GetResource().Resource + if resource != resourceNameMachineSet && resource != resourceNameMachineDeployment { + // Do not attempt to react to resources that are not MachineSet or MachineDeployment + return false, nil, nil + } + + subresource := action.GetSubresource() + if subresource != "scale" { + // Handle a bug in the client-go fakeNamespaceScaleClient, where the action namespace and subresource are + // switched for update actions + if action.GetVerb() == "update" && action.GetNamespace() == "scale" { + subresource = "scale" + } else { + // Do not attempt to respond to anything but scale subresource requests + return false, nil, nil + } + } + + gvr := schema.GroupVersionResource{ + Group: action.GetResource().Group, + Version: "v1alpha3", + Resource: resource, + } + + switch action.GetVerb() { + case "get": + action, ok := action.(clientgotesting.GetAction) + if !ok { + return true, nil, fmt.Errorf("failed to convert Action to GetAction: %T", action) + } + + u, err := dynamicClientset.Resource(gvr).Namespace(action.GetNamespace()).Get(context.TODO(), action.GetName(), metav1.GetOptions{}) + if err != nil { + return true, nil, err + } + + replicas, found, err := unstructured.NestedInt64(u.Object, "spec", "replicas") + if err != nil { + return true, nil, err + } + + if !found { + replicas = 0 + } + + result := &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: u.GetName(), + Namespace: u.GetNamespace(), + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: int32(replicas), + }, + } + + return true, result, nil + case "update": + action, ok := action.(clientgotesting.UpdateAction) + if !ok { + return true, nil, fmt.Errorf("failed to convert Action to UpdateAction: %T", action) + } + + s, ok := action.GetObject().(*autoscalingv1.Scale) + if !ok { + return true, nil, fmt.Errorf("failed to convert Resource to Scale: %T", s) + } + + u, err := dynamicClientset.Resource(gvr).Namespace(s.Namespace).Get(context.TODO(), s.Name, metav1.GetOptions{}) + if err != nil { + return true, nil, fmt.Errorf("failed to fetch underlying %s resource: %s/%s", resource, s.Namespace, s.Name) + } + + if err := unstructured.SetNestedField(u.Object, int64(s.Spec.Replicas), "spec", "replicas"); err != nil { + return true, nil, err + } + + _, err = dynamicClientset.Resource(gvr).Namespace(s.Namespace).Update(context.TODO(), u, metav1.UpdateOptions{}) + if err != nil { + return true, nil, err + } + + return true, s, nil + default: + return true, nil, fmt.Errorf("unknown verb: %v", action.GetVerb()) + } + } + scaleClient.AddReactor("*", "*", scaleReactor) + + controller, err := newMachineController(dynamicClientset, kubeclientSet, discoveryClient, scaleClient) if err != nil { t.Fatal("failed to create test controller") } @@ -130,31 +227,31 @@ func mustCreateTestController(t *testing.T, testConfigs ...*testConfig) (*machin } } -func createMachineSetTestConfig(namespace string, nodeCount int, annotations map[string]string) *testConfig { - return createTestConfigs(createTestSpecs(namespace, 1, nodeCount, false, annotations)...)[0] +func createMachineSetTestConfig(namespace, namePrefix string, nodeCount int, annotations map[string]string) *testConfig { + return createTestConfigs(createTestSpecs(namespace, namePrefix, 1, nodeCount, false, annotations)...)[0] } -func createMachineSetTestConfigs(namespace string, configCount, nodeCount int, annotations map[string]string) []*testConfig { - return createTestConfigs(createTestSpecs(namespace, configCount, nodeCount, false, annotations)...) +func createMachineSetTestConfigs(namespace, namePrefix string, configCount, nodeCount int, annotations map[string]string) []*testConfig { + return createTestConfigs(createTestSpecs(namespace, namePrefix, configCount, nodeCount, false, annotations)...) } -func createMachineDeploymentTestConfig(namespace string, nodeCount int, annotations map[string]string) *testConfig { - return createTestConfigs(createTestSpecs(namespace, 1, nodeCount, true, annotations)...)[0] +func createMachineDeploymentTestConfig(namespace, namePrefix string, nodeCount int, annotations map[string]string) *testConfig { + return createTestConfigs(createTestSpecs(namespace, namePrefix, 1, nodeCount, true, annotations)...)[0] } -func createMachineDeploymentTestConfigs(namespace string, configCount, nodeCount int, annotations map[string]string) []*testConfig { - return createTestConfigs(createTestSpecs(namespace, configCount, nodeCount, true, annotations)...) +func createMachineDeploymentTestConfigs(namespace, namePrefix string, configCount, nodeCount int, annotations map[string]string) []*testConfig { + return createTestConfigs(createTestSpecs(namespace, namePrefix, configCount, nodeCount, true, annotations)...) } -func createTestSpecs(namespace string, scalableResourceCount, nodeCount int, isMachineDeployment bool, annotations map[string]string) []testSpec { +func createTestSpecs(namespace, namePrefix string, scalableResourceCount, nodeCount int, isMachineDeployment bool, annotations map[string]string) []testSpec { var specs []testSpec for i := 0; i < scalableResourceCount; i++ { specs = append(specs, testSpec{ annotations: annotations, - machineDeploymentName: fmt.Sprintf("machinedeployment-%d", i), - machineSetName: fmt.Sprintf("machineset-%d", i), - namespace: strings.ToLower(namespace), + machineDeploymentName: fmt.Sprintf("%s-%d", namePrefix, i), + machineSetName: fmt.Sprintf("%s-%d", namePrefix, i), + namespace: namespace, nodeCount: nodeCount, rootIsMachineDeployment: isMachineDeployment, }) @@ -164,63 +261,85 @@ func createTestSpecs(namespace string, scalableResourceCount, nodeCount int, isM } func createTestConfigs(specs ...testSpec) []*testConfig { - var result []*testConfig + result := make([]*testConfig, 0, len(specs)) for i, spec := range specs { config := &testConfig{ spec: &specs[i], nodes: make([]*corev1.Node, spec.nodeCount), - machines: make([]*Machine, spec.nodeCount), + machines: make([]*unstructured.Unstructured, spec.nodeCount), } - config.machineSet = &MachineSet{ - TypeMeta: v1.TypeMeta{ - APIVersion: fmt.Sprintf("%s/v1alpha3", defaultCAPIGroup), - Kind: "MachineSet", - }, - ObjectMeta: v1.ObjectMeta{ - Name: spec.machineSetName, - Namespace: spec.namespace, - UID: types.UID(spec.machineSetName), + machineSetLabels := map[string]string{ + "machineSetName": spec.machineSetName, + } + + config.machineSet = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": spec.machineSetName, + "namespace": spec.namespace, + "uid": spec.machineSetName, + }, + "spec": map[string]interface{}{ + "replicas": int64(spec.nodeCount), + }, + "status": map[string]interface{}{}, }, } + config.machineSet.SetAnnotations(make(map[string]string)) + if !spec.rootIsMachineDeployment { - config.machineSet.ObjectMeta.Annotations = spec.annotations - config.machineSet.Spec.Replicas = int32ptr(int32(spec.nodeCount)) + config.machineSet.SetAnnotations(spec.annotations) } else { - config.machineDeployment = &MachineDeployment{ - TypeMeta: v1.TypeMeta{ - APIVersion: fmt.Sprintf("%s/v1alpha3", defaultCAPIGroup), - Kind: "MachineDeployment", - }, - ObjectMeta: v1.ObjectMeta{ - Name: spec.machineDeploymentName, - Namespace: spec.namespace, - UID: types.UID(spec.machineDeploymentName), - Annotations: spec.annotations, - }, - Spec: MachineDeploymentSpec{ - Replicas: int32ptr(int32(spec.nodeCount)), + machineSetLabels["machineDeploymentName"] = spec.machineDeploymentName + + machineDeploymentLabels := map[string]string{ + "machineDeploymentName": spec.machineDeploymentName, + } + + config.machineDeployment = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineDeploymentKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": spec.machineDeploymentName, + "namespace": spec.namespace, + "uid": spec.machineDeploymentName, + }, + "spec": map[string]interface{}{ + "replicas": int64(spec.nodeCount), + }, + "status": map[string]interface{}{}, }, } + config.machineDeployment.SetAnnotations(spec.annotations) + config.machineDeployment.SetLabels(machineDeploymentLabels) + unstructured.SetNestedStringMap(config.machineDeployment.Object, machineDeploymentLabels, "spec", "selector", "matchLabels") - config.machineSet.OwnerReferences = make([]v1.OwnerReference, 1) - config.machineSet.OwnerReferences[0] = v1.OwnerReference{ - Name: config.machineDeployment.Name, - Kind: config.machineDeployment.Kind, - UID: config.machineDeployment.UID, + ownerRefs := []metav1.OwnerReference{ + { + Name: config.machineDeployment.GetName(), + Kind: config.machineDeployment.GetKind(), + UID: config.machineDeployment.GetUID(), + }, } + config.machineSet.SetOwnerReferences(ownerRefs) } + config.machineSet.SetLabels(machineSetLabels) + unstructured.SetNestedStringMap(config.machineSet.Object, machineSetLabels, "spec", "selector", "matchLabels") - machineOwner := v1.OwnerReference{ - Name: config.machineSet.Name, - Kind: config.machineSet.Kind, - UID: config.machineSet.UID, + machineOwner := metav1.OwnerReference{ + Name: config.machineSet.GetName(), + Kind: config.machineSet.GetKind(), + UID: config.machineSet.GetUID(), } for j := 0; j < spec.nodeCount; j++ { - config.nodes[j], config.machines[j] = makeLinkedNodeAndMachine(j, spec.namespace, machineOwner) + config.nodes[j], config.machines[j] = makeLinkedNodeAndMachine(j, spec.namespace, machineOwner, machineSetLabels) } result = append(result, config) @@ -232,12 +351,12 @@ func createTestConfigs(specs ...testSpec) []*testConfig { // makeLinkedNodeAndMachine creates a node and machine. The machine // has its NodeRef set to the new node and the new machine's owner // reference is set to owner. -func makeLinkedNodeAndMachine(i int, namespace string, owner v1.OwnerReference) (*corev1.Node, *Machine) { +func makeLinkedNodeAndMachine(i int, namespace string, owner metav1.OwnerReference, machineLabels map[string]string) (*corev1.Node, *unstructured.Unstructured) { node := &corev1.Node{ - TypeMeta: v1.TypeMeta{ + TypeMeta: metav1.TypeMeta{ Kind: "Node", }, - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s-node-%d", namespace, owner.Name, i), Annotations: map[string]string{ machineAnnotationKey: fmt.Sprintf("%s/%s-%s-machine-%d", namespace, namespace, owner.Name, i), @@ -248,56 +367,50 @@ func makeLinkedNodeAndMachine(i int, namespace string, owner v1.OwnerReference) }, } - machine := &Machine{ - TypeMeta: v1.TypeMeta{ - APIVersion: fmt.Sprintf("%s/v1alpha3", defaultCAPIGroup), - Kind: "Machine", - }, - ObjectMeta: v1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s-machine-%d", namespace, owner.Name, i), - Namespace: namespace, - OwnerReferences: []v1.OwnerReference{{ - Name: owner.Name, - Kind: owner.Kind, - UID: owner.UID, - }}, - }, - Spec: MachineSpec{ - ProviderID: pointer.StringPtr(fmt.Sprintf("test:////%s-%s-nodeid-%d", namespace, owner.Name, i)), - }, - Status: MachineStatus{ - NodeRef: &corev1.ObjectReference{ - Kind: node.Kind, - Name: node.Name, + machine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": fmt.Sprintf("%s-%s-machine-%d", namespace, owner.Name, i), + "namespace": namespace, + }, + "spec": map[string]interface{}{ + "providerID": fmt.Sprintf("test:////%s-%s-nodeid-%d", namespace, owner.Name, i), + }, + "status": map[string]interface{}{ + "nodeRef": map[string]interface{}{ + "kind": node.Kind, + "name": node.Name, + }, }, }, } + machine.SetOwnerReferences([]metav1.OwnerReference{owner}) + machine.SetLabels(machineLabels) return node, machine } -func int32ptr(v int32) *int32 { - return &v -} - func addTestConfigs(t *testing.T, controller *machineController, testConfigs ...*testConfig) error { t.Helper() for _, config := range testConfigs { if config.machineDeployment != nil { - - if err := controller.machineDeploymentInformer.Informer().GetStore().Add(newUnstructuredFromMachineDeployment(config.machineDeployment)); err != nil { + if err := createResource(controller.managementClient, controller.machineDeploymentInformer, controller.machineDeploymentResource, config.machineDeployment); err != nil { return err } } - if err := controller.machineSetInformer.Informer().GetStore().Add(newUnstructuredFromMachineSet(config.machineSet)); err != nil { + if err := createResource(controller.managementClient, controller.machineSetInformer, controller.machineSetResource, config.machineSet); err != nil { return err } + for i := range config.machines { - if err := controller.machineInformer.Informer().GetStore().Add(newUnstructuredFromMachine(config.machines[i])); err != nil { + if err := createResource(controller.managementClient, controller.machineInformer, controller.machineResource, config.machines[i]); err != nil { return err } } + for i := range config.nodes { if err := controller.nodeInformer.GetStore().Add(config.nodes[i]); err != nil { return err @@ -307,6 +420,45 @@ func addTestConfigs(t *testing.T, controller *machineController, testConfigs ... return nil } +func selectorFromScalableResource(u *unstructured.Unstructured) (labels.Selector, error) { + unstructuredSelector, found, err := unstructured.NestedMap(u.Object, "spec", "selector") + if err != nil { + return nil, err + } + + if !found { + return nil, fmt.Errorf("expected field spec.selector on scalable resource type") + } + + labelSelector := &metav1.LabelSelector{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredSelector, labelSelector); err != nil { + return nil, err + } + + return metav1.LabelSelectorAsSelector(labelSelector) +} + +func createResource(client dynamic.Interface, informer informers.GenericInformer, gvr schema.GroupVersionResource, resource *unstructured.Unstructured) error { + if _, err := client.Resource(gvr).Namespace(resource.GetNamespace()).Create(context.TODO(), resource.DeepCopy(), metav1.CreateOptions{}); err != nil { + return err + } + return informer.Informer().GetStore().Add(resource.DeepCopy()) +} + +func updateResource(client dynamic.Interface, informer informers.GenericInformer, gvr schema.GroupVersionResource, resource *unstructured.Unstructured) error { + if _, err := client.Resource(gvr).Namespace(resource.GetNamespace()).Update(context.TODO(), resource.DeepCopy(), metav1.UpdateOptions{}); err != nil { + return err + } + return informer.Informer().GetStore().Update(resource.DeepCopy()) +} + +func deleteResource(client dynamic.Interface, informer informers.GenericInformer, gvr schema.GroupVersionResource, resource *unstructured.Unstructured) error { + if err := client.Resource(gvr).Namespace(resource.GetNamespace()).Delete(context.TODO(), resource.GetName(), metav1.DeleteOptions{}); err != nil { + return err + } + return informer.Informer().GetStore().Delete(resource) +} + func deleteTestConfigs(t *testing.T, controller *machineController, testConfigs ...*testConfig) error { t.Helper() @@ -333,7 +485,7 @@ func deleteTestConfigs(t *testing.T, controller *machineController, testConfigs return nil } -func TestControllerFindMachineByID(t *testing.T) { +func TestControllerFindMachine(t *testing.T) { type testCase struct { description string name string @@ -369,26 +521,26 @@ func TestControllerFindMachineByID(t *testing.T) { } if tc.lookupSucceeds && machine != nil { - if machine.Name != tc.name { - t.Errorf("expected %q, got %q", tc.name, machine.Name) + if machine.GetName() != tc.name { + t.Errorf("expected %q, got %q", tc.name, machine.GetName()) } - if machine.Namespace != tc.namespace { - t.Errorf("expected %q, got %q", tc.namespace, machine.Namespace) + if machine.GetNamespace() != tc.namespace { + t.Errorf("expected %q, got %q", tc.namespace, machine.GetNamespace()) } } } for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) if tc.name == "" { - tc.name = testConfig.machines[0].Name + tc.name = testConfig.machines[0].GetName() } if tc.namespace == "" { - tc.namespace = testConfig.machines[0].Namespace + tc.namespace = testConfig.machines[0].GetNamespace() } test(t, tc, testConfig) }) @@ -396,7 +548,7 @@ func TestControllerFindMachineByID(t *testing.T) { } func TestControllerFindMachineOwner(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -412,13 +564,17 @@ func TestControllerFindMachineOwner(t *testing.T) { if testResult1 == nil { t.Fatal("expected non-nil result") } - if testConfig.spec.machineSetName != testResult1.Name { - t.Errorf("expected %q, got %q", testConfig.spec.machineSetName, testResult1.Name) + if testConfig.spec.machineSetName != testResult1.GetName() { + t.Errorf("expected %q, got %q", testConfig.spec.machineSetName, testResult1.GetName()) } - // Test #2: Lookup fails as the machine UUID != machineset UUID + // Test #2: Lookup fails as the machine ownerref Name != machineset Name testMachine2 := testConfig.machines[0].DeepCopy() - testMachine2.OwnerReferences[0].UID = "does-not-match-machineset" + ownerRefs := testMachine2.GetOwnerReferences() + ownerRefs[0].Name = "does-not-match-machineset" + + testMachine2.SetOwnerReferences(ownerRefs) + testResult2, err := controller.findMachineOwner(testMachine2) if err != nil { t.Fatalf("unexpected error, got %v", err) @@ -428,7 +584,7 @@ func TestControllerFindMachineOwner(t *testing.T) { } // Test #3: Delete the MachineSet and lookup should fail - if err := controller.machineSetInformer.Informer().GetStore().Delete(testResult1); err != nil { + if err := deleteResource(controller.managementClient, controller.machineSetInformer, controller.machineSetResource, testConfig.machineSet); err != nil { t.Fatalf("unexpected error, got %v", err) } testResult3, err := controller.findMachineOwner(testConfig.machines[0].DeepCopy()) @@ -441,7 +597,7 @@ func TestControllerFindMachineOwner(t *testing.T) { } func TestControllerFindMachineByProviderID(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -473,7 +629,7 @@ func TestControllerFindMachineByProviderID(t *testing.T) { } // Test #2: Verify machine returned by fake provider ID is correct machine - fakeProviderID := fmt.Sprintf("%s$s/%s", testConfig.machines[0].Namespace, testConfig.machines[0].Name) + fakeProviderID := fmt.Sprintf("%s$s/%s", testConfig.machines[0].GetNamespace(), testConfig.machines[0].GetName()) machine, err = controller.findMachineByProviderID(normalizedProviderID(fakeProviderID)) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -485,7 +641,9 @@ func TestControllerFindMachineByProviderID(t *testing.T) { // Test #3: Verify machine is not found if it has a // non-existent or different provider ID. machine = testConfig.machines[0].DeepCopy() - machine.Spec.ProviderID = pointer.StringPtr("does-not-match") + if err := unstructured.SetNestedField(machine.Object, "does-not-match", "spec", "providerID"); err != nil { + t.Fatalf("unexpected error: %v", err) + } if err := controller.machineInformer.Informer().GetStore().Update(machine); err != nil { t.Fatalf("unexpected error updating machine, got %v", err) } @@ -499,7 +657,7 @@ func TestControllerFindMachineByProviderID(t *testing.T) { } func TestControllerFindNodeByNodeName(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -526,102 +684,151 @@ func TestControllerFindNodeByNodeName(t *testing.T) { } } -func TestControllerMachinesInMachineSet(t *testing.T) { - testConfig1 := createMachineSetTestConfig("testConfig1", 5, map[string]string{ - nodeGroupMinSizeAnnotationKey: "1", - nodeGroupMaxSizeAnnotationKey: "10", - }) +func TestControllerListMachinesForScalableResource(t *testing.T) { + test := func(t *testing.T, testConfig1 *testConfig, testConfig2 *testConfig) { + controller, stop := mustCreateTestController(t, testConfig1) + defer stop() - controller, stop := mustCreateTestController(t, testConfig1) - defer stop() + if err := addTestConfigs(t, controller, testConfig2); err != nil { + t.Fatalf("unexpected error: %v", err) + } - // Construct a second set of objects and add the machines, - // nodes and the additional machineset to the existing set of - // test objects in the controller. This gives us two - // machinesets, each with their own machines and linked nodes. - testConfig2 := createMachineSetTestConfig("testConfig2", 5, map[string]string{ - nodeGroupMinSizeAnnotationKey: "1", - nodeGroupMaxSizeAnnotationKey: "10", - }) + scalableResource1 := testConfig1.machineSet + if testConfig1.machineDeployment != nil { + scalableResource1 = testConfig1.machineDeployment + } - if err := addTestConfigs(t, controller, testConfig2); err != nil { - t.Fatalf("unexpected error: %v", err) - } + scalableResource2 := testConfig2.machineSet + if testConfig2.machineDeployment != nil { + scalableResource2 = testConfig2.machineDeployment + } - machinesInTestObjs1, err := controller.listMachines(testConfig1.spec.namespace, labels.Everything()) - if err != nil { - t.Fatalf("error listing machines: %v", err) - } + machinesInScalableResource1, err := controller.listMachinesForScalableResource(scalableResource1) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } - machinesInTestObjs2, err := controller.listMachines(testConfig2.spec.namespace, labels.Everything()) - if err != nil { - t.Fatalf("error listing machines: %v", err) - } + machinesInScalableResource2, err := controller.listMachinesForScalableResource(scalableResource2) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } - actual := len(machinesInTestObjs1) + len(machinesInTestObjs2) - expected := len(testConfig1.machines) + len(testConfig2.machines) - if actual != expected { - t.Fatalf("expected %d machines, got %d", expected, actual) - } + actual := len(machinesInScalableResource1) + len(machinesInScalableResource2) + expected := len(testConfig1.machines) + len(testConfig2.machines) + if actual != expected { + t.Fatalf("expected %d machines, got %d", expected, actual) + } - // Sort results as order is not guaranteed. - sort.Slice(machinesInTestObjs1, func(i, j int) bool { - return machinesInTestObjs1[i].Name < machinesInTestObjs1[j].Name - }) - sort.Slice(machinesInTestObjs2, func(i, j int) bool { - return machinesInTestObjs2[i].Name < machinesInTestObjs2[j].Name - }) + // Sort results as order is not guaranteed. + sort.Slice(machinesInScalableResource1, func(i, j int) bool { + return machinesInScalableResource1[i].GetName() < machinesInScalableResource1[j].GetName() + }) + sort.Slice(machinesInScalableResource2, func(i, j int) bool { + return machinesInScalableResource2[i].GetName() < machinesInScalableResource2[j].GetName() + }) - for i, m := range machinesInTestObjs1 { - if m.Name != testConfig1.machines[i].Name { - t.Errorf("expected %q, got %q", testConfig1.machines[i].Name, m.Name) + for i, m := range machinesInScalableResource1 { + if m.GetName() != testConfig1.machines[i].GetName() { + t.Errorf("expected %q, got %q", testConfig1.machines[i].GetName(), m.GetName()) + } + if m.GetNamespace() != testConfig1.machines[i].GetNamespace() { + t.Errorf("expected %q, got %q", testConfig1.machines[i].GetNamespace(), m.GetNamespace()) + } } - if m.Namespace != testConfig1.machines[i].Namespace { - t.Errorf("expected %q, got %q", testConfig1.machines[i].Namespace, m.Namespace) + + for i, m := range machinesInScalableResource2 { + if m.GetName() != testConfig2.machines[i].GetName() { + t.Errorf("expected %q, got %q", testConfig2.machines[i].GetName(), m.GetName()) + } + if m.GetNamespace() != testConfig2.machines[i].GetNamespace() { + t.Errorf("expected %q, got %q", testConfig2.machines[i].GetNamespace(), m.GetNamespace()) + } } - } - for i, m := range machinesInTestObjs2 { - if m.Name != testConfig2.machines[i].Name { - t.Errorf("expected %q, got %q", testConfig2.machines[i].Name, m.Name) + // Finally everything in the respective objects should be equal. + if !reflect.DeepEqual(testConfig1.machines, machinesInScalableResource1) { + t.Fatalf("expected %+v, got %+v", testConfig1.machines, machinesInScalableResource2) } - if m.Namespace != testConfig2.machines[i].Namespace { - t.Errorf("expected %q, got %q", testConfig2.machines[i].Namespace, m.Namespace) + if !reflect.DeepEqual(testConfig2.machines, machinesInScalableResource2) { + t.Fatalf("expected %+v, got %+v", testConfig2.machines, machinesInScalableResource2) } } - // Finally everything in the respective objects should be equal. - if !reflect.DeepEqual(testConfig1.machines, machinesInTestObjs1) { - t.Fatalf("expected %+v, got %+v", testConfig1.machines, machinesInTestObjs1) - } - if !reflect.DeepEqual(testConfig2.machines, machinesInTestObjs2) { - t.Fatalf("expected %+v, got %+v", testConfig2.machines, machinesInTestObjs2) - } + t.Run("MachineSet", func(t *testing.T) { + namespace := RandomString(6) + testConfig1 := createMachineSetTestConfig(namespace, RandomString(6), 5, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + // Construct a second set of objects and add the machines, + // nodes and the additional machineset to the existing set of + // test objects in the controller. This gives us two + // machinesets, each with their own machines and linked nodes. + testConfig2 := createMachineSetTestConfig(namespace, RandomString(6), 5, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + test(t, testConfig1, testConfig2) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + namespace := RandomString(6) + testConfig1 := createMachineDeploymentTestConfig(namespace, RandomString(6), 5, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + // Construct a second set of objects and add the machines, + // nodes, machineset, and the additional machineset to the existing set of + // test objects in the controller. This gives us two + // machinedeployments, each with their own machineSet, machines and linked nodes. + testConfig2 := createMachineDeploymentTestConfig(namespace, RandomString(6), 5, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + test(t, testConfig1, testConfig2) + }) } func TestControllerLookupNodeGroupForNonExistentNode(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ - nodeGroupMinSizeAnnotationKey: "1", - nodeGroupMaxSizeAnnotationKey: "10", - }) + test := func(t *testing.T, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() - controller, stop := mustCreateTestController(t, testConfig) - defer stop() + node := testConfig.nodes[0].DeepCopy() + node.Spec.ProviderID = "does-not-exist" - node := testConfig.nodes[0].DeepCopy() - node.Spec.ProviderID = "does-not-exist" + ng, err := controller.nodeGroupForNode(node) - ng, err := controller.nodeGroupForNode(node) + // Looking up a node that doesn't exist doesn't generate an + // error. But, equally, the ng should actually be nil. + if err != nil { + t.Fatalf("unexpected error: %v", err) + } - // Looking up a node that doesn't exist doesn't generate an - // error. But, equally, the ng should actually be nil. - if err != nil { - t.Fatalf("unexpected error: %v", err) + if ng != nil { + t.Fatalf("unexpected nodegroup: %v", ng) + } } - if ng != nil { - t.Fatalf("unexpected nodegroup: %v", ng) - } + t.Run("MachineSet", func(t *testing.T) { + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + test(t, testConfig) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + test(t, testConfig) + }) } func TestControllerNodeGroupForNodeWithMissingMachineOwner(t *testing.T) { @@ -630,8 +837,9 @@ func TestControllerNodeGroupForNodeWithMissingMachineOwner(t *testing.T) { defer stop() machine := testConfig.machines[0].DeepCopy() - machine.OwnerReferences = []v1.OwnerReference{} - if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + machine.SetOwnerReferences([]metav1.OwnerReference{}) + + if err := updateResource(controller.managementClient, controller.machineInformer, controller.machineResource, machine); err != nil { t.Fatalf("unexpected error updating machine, got %v", err) } @@ -646,7 +854,7 @@ func TestControllerNodeGroupForNodeWithMissingMachineOwner(t *testing.T) { } t.Run("MachineSet", func(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -654,7 +862,7 @@ func TestControllerNodeGroupForNodeWithMissingMachineOwner(t *testing.T) { }) t.Run("MachineDeployment", func(t *testing.T) { - testConfig := createMachineDeploymentTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -662,6 +870,32 @@ func TestControllerNodeGroupForNodeWithMissingMachineOwner(t *testing.T) { }) } +func TestControllerNodeGroupForNodeWithMissingSetMachineOwner(t *testing.T) { + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + machineSet := testConfig.machineSet.DeepCopy() + machineSet.SetOwnerReferences([]metav1.OwnerReference{}) + + if err := updateResource(controller.managementClient, controller.machineSetInformer, controller.machineSetResource, machineSet); err != nil { + t.Fatalf("unexpected error updating machine, got %v", err) + } + + ng, err := controller.nodeGroupForNode(testConfig.nodes[0]) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if ng != nil { + t.Fatalf("unexpected nodegroup: %v", ng) + } +} + func TestControllerNodeGroupForNodeWithPositiveScalingBounds(t *testing.T) { test := func(t *testing.T, testConfig *testConfig) { controller, stop := mustCreateTestController(t, testConfig) @@ -679,7 +913,7 @@ func TestControllerNodeGroupForNodeWithPositiveScalingBounds(t *testing.T) { } t.Run("MachineSet", func(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "1", }) @@ -687,7 +921,7 @@ func TestControllerNodeGroupForNodeWithPositiveScalingBounds(t *testing.T) { }) t.Run("MachineDeployment", func(t *testing.T) { - testConfig := createMachineDeploymentTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "1", }) @@ -715,18 +949,20 @@ func TestControllerNodeGroups(t *testing.T) { controller, stop := mustCreateTestController(t) defer stop() + namespace := RandomString(6) + // Test #1: zero nodegroups assertNodegroupLen(t, controller, 0) // Test #2: add 5 machineset-based nodegroups - machineSetConfigs := createMachineSetTestConfigs("MachineSet", 5, 1, annotations) + machineSetConfigs := createMachineSetTestConfigs(namespace, RandomString(6), 5, 1, annotations) if err := addTestConfigs(t, controller, machineSetConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } assertNodegroupLen(t, controller, 5) // Test #2: add 2 machinedeployment-based nodegroups - machineDeploymentConfigs := createMachineDeploymentTestConfigs("MachineDeployment", 2, 1, annotations) + machineDeploymentConfigs := createMachineDeploymentTestConfigs(namespace, RandomString(6), 2, 1, annotations) if err := addTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -750,14 +986,14 @@ func TestControllerNodeGroups(t *testing.T) { } // Test #5: machineset with no scaling bounds results in no nodegroups - machineSetConfigs = createMachineSetTestConfigs("MachineSet", 5, 1, annotations) + machineSetConfigs = createMachineSetTestConfigs(namespace, RandomString(6), 5, 1, annotations) if err := addTestConfigs(t, controller, machineSetConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } assertNodegroupLen(t, controller, 0) // Test #6: machinedeployment with no scaling bounds results in no nodegroups - machineDeploymentConfigs = createMachineDeploymentTestConfigs("MachineDeployment", 2, 1, annotations) + machineDeploymentConfigs = createMachineDeploymentTestConfigs(namespace, RandomString(6), 2, 1, annotations) if err := addTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -769,7 +1005,7 @@ func TestControllerNodeGroups(t *testing.T) { } // Test #7: machineset with bad scaling bounds results in an error and no nodegroups - machineSetConfigs = createMachineSetTestConfigs("MachineSet", 5, 1, annotations) + machineSetConfigs = createMachineSetTestConfigs(namespace, RandomString(6), 5, 1, annotations) if err := addTestConfigs(t, controller, machineSetConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -778,7 +1014,7 @@ func TestControllerNodeGroups(t *testing.T) { } // Test #8: machinedeployment with bad scaling bounds results in an error and no nodegroups - machineDeploymentConfigs = createMachineDeploymentTestConfigs("MachineDeployment", 2, 1, annotations) + machineDeploymentConfigs = createMachineDeploymentTestConfigs(namespace, RandomString(6), 2, 1, annotations) if err := addTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -842,19 +1078,19 @@ func TestControllerNodeGroupsNodeCount(t *testing.T) { t.Run("MachineSet", func(t *testing.T) { for _, tc := range testCases { - test(t, tc, createMachineSetTestConfigs(testNamespace, tc.nodeGroups, tc.nodesPerGroup, annotations)) + test(t, tc, createMachineSetTestConfigs(RandomString(6), RandomString(6), tc.nodeGroups, tc.nodesPerGroup, annotations)) } }) t.Run("MachineDeployment", func(t *testing.T) { for _, tc := range testCases { - test(t, tc, createMachineDeploymentTestConfigs(testNamespace, tc.nodeGroups, tc.nodesPerGroup, annotations)) + test(t, tc, createMachineDeploymentTestConfigs(RandomString(6), RandomString(6), tc.nodeGroups, tc.nodesPerGroup, annotations)) } }) } func TestControllerFindMachineFromNodeAnnotation(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -866,8 +1102,9 @@ func TestControllerFindMachineFromNodeAnnotation(t *testing.T) { // want to force findMachineByProviderID() to fallback to // searching using the annotation on the node object. for _, machine := range testConfig.machines { - machine.Spec.ProviderID = nil - if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + unstructured.RemoveNestedField(machine.Object, "spec", "providerID") + + if err := updateResource(controller.managementClient, controller.machineInformer, controller.machineResource, machine); err != nil { t.Fatalf("unexpected error updating machine, got %v", err) } } @@ -901,7 +1138,7 @@ func TestControllerFindMachineFromNodeAnnotation(t *testing.T) { } func TestControllerMachineSetNodeNamesWithoutLinkage(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 3, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 3, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -910,15 +1147,13 @@ func TestControllerMachineSetNodeNamesWithoutLinkage(t *testing.T) { defer stop() // Remove all linkage between node and machine. - for _, machine := range testConfig.machines { - machine.Spec.ProviderID = nil - if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { - t.Fatalf("unexpected error updating machine, got %v", err) - } - } - for _, machine := range testConfig.machines { - machine.Status.NodeRef = nil - if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + for i := range testConfig.machines { + machine := testConfig.machines[i].DeepCopy() + + unstructured.RemoveNestedField(machine.Object, "spec", "providerID") + unstructured.RemoveNestedField(machine.Object, "status", "nodeRef") + + if err := updateResource(controller.managementClient, controller.machineInformer, controller.machineResource, machine); err != nil { t.Fatalf("unexpected error updating machine, got %v", err) } } @@ -945,7 +1180,7 @@ func TestControllerMachineSetNodeNamesWithoutLinkage(t *testing.T) { } func TestControllerMachineSetNodeNamesUsingProviderID(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 3, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 3, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -956,9 +1191,12 @@ func TestControllerMachineSetNodeNamesUsingProviderID(t *testing.T) { // Remove Status.NodeRef.Name on all the machines. We want to // force machineSetNodeNames() to only consider the provider // ID for lookups. - for _, machine := range testConfig.machines { - machine.Status.NodeRef = nil - if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + for i := range testConfig.machines { + machine := testConfig.machines[i].DeepCopy() + + unstructured.RemoveNestedField(machine.Object, "status", "nodeRef") + + if err := updateResource(controller.managementClient, controller.machineInformer, controller.machineResource, machine); err != nil { t.Fatalf("unexpected error updating machine, got %v", err) } } @@ -994,7 +1232,7 @@ func TestControllerMachineSetNodeNamesUsingProviderID(t *testing.T) { } func TestControllerMachineSetNodeNamesUsingStatusNodeRefName(t *testing.T) { - testConfig := createMachineSetTestConfig(testNamespace, 3, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 3, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -1005,9 +1243,12 @@ func TestControllerMachineSetNodeNamesUsingStatusNodeRefName(t *testing.T) { // Remove all the provider ID values on all the machines. We // want to force machineSetNodeNames() to fallback to // searching using Status.NodeRef.Name. - for _, machine := range testConfig.machines { - machine.Spec.ProviderID = nil - if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + for i := range testConfig.machines { + machine := testConfig.machines[i].DeepCopy() + + unstructured.RemoveNestedField(machine.Object, "spec", "providerID") + + if err := updateResource(controller.managementClient, controller.machineInformer, controller.machineResource, machine); err != nil { t.Fatalf("unexpected error updating machine, got %v", err) } } @@ -1062,7 +1303,7 @@ func TestControllerGetAPIVersionGroup(t *testing.T) { } func TestControllerGetAPIVersionGroupWithMachineDeployments(t *testing.T) { - testConfig := createMachineDeploymentTestConfig(testNamespace, 1, map[string]string{ + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "1", }) @@ -1070,35 +1311,43 @@ func TestControllerGetAPIVersionGroupWithMachineDeployments(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - testConfig.machineDeployment.TypeMeta.APIVersion = fmt.Sprintf("%s/v1beta1", customCAPIGroup) - testConfig.machineSet.TypeMeta.APIVersion = fmt.Sprintf("%s/v1beta1", customCAPIGroup) + testConfig.machineDeployment.SetAPIVersion(fmt.Sprintf("%s/v1beta1", customCAPIGroup)) + testConfig.machineSet.SetAPIVersion(fmt.Sprintf("%s/v1beta1", customCAPIGroup)) + for _, machine := range testConfig.machines { - machine.TypeMeta.APIVersion = fmt.Sprintf("%s/v1beta1", customCAPIGroup) + machine.SetAPIVersion(fmt.Sprintf("%s/v1beta1", customCAPIGroup)) } + controller, stop := mustCreateTestController(t, testConfig) defer stop() - machineDeployments, err := controller.listMachineDeployments(testNamespace, labels.Everything()) + machineDeployments, err := controller.managementClient.Resource(controller.machineDeploymentResource).Namespace(testConfig.spec.namespace). + List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } - if l := len(machineDeployments); l != 1 { + + if l := len(machineDeployments.Items); l != 1 { t.Fatalf("Incorrect number of MachineDeployments, expected 1, got %d", l) } - machineSets, err := controller.listMachineSets(testNamespace, labels.Everything()) + machineSets, err := controller.managementClient.Resource(controller.machineSetResource).Namespace(testConfig.spec.namespace). + List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } - if l := len(machineSets); l != 1 { - t.Fatalf("Incorrect number of MachineSets, expected 1, got %d", l) + + if l := len(machineSets.Items); l != 1 { + t.Fatalf("Incorrect number of MachineDeployments, expected 1, got %d", l) } - machines, err := controller.listMachines(testNamespace, labels.Everything()) + machines, err := controller.managementClient.Resource(controller.machineResource).Namespace(testConfig.spec.namespace). + List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } - if l := len(machines); l != 1 { + + if l := len(machines.Items); l != 1 { t.Fatalf("Incorrect number of Machines, expected 1, got %d", l) } @@ -1136,7 +1385,7 @@ func TestGetAPIGroupPreferredVersion(t *testing.T) { discoveryClient := &fakediscovery.FakeDiscovery{ Fake: &clientgotesting.Fake{ - Resources: []*v1.APIResourceList{ + Resources: []*metav1.APIResourceList{ { GroupVersion: fmt.Sprintf("%s/v1beta1", customCAPIGroup), }, @@ -1192,10 +1441,10 @@ func TestGroupVersionHasResource(t *testing.T) { discoveryClient := &fakediscovery.FakeDiscovery{ Fake: &clientgotesting.Fake{ - Resources: []*v1.APIResourceList{ + Resources: []*metav1.APIResourceList{ { GroupVersion: fmt.Sprintf("%s/v1alpha3", defaultCAPIGroup), - APIResources: []v1.APIResource{ + APIResources: []metav1.APIResource{ { Name: resourceNameMachineDeployment, }, @@ -1276,3 +1525,16 @@ func TestMachineKeyFromFailedProviderID(t *testing.T) { }) } } + +const CharSet = "0123456789abcdefghijklmnopqrstuvwxyz" + +var rnd = rand.New(rand.NewSource(time.Now().UnixNano())) + +// RandomString returns a random alphanumeric string. +func RandomString(n int) string { + result := make([]byte, n) + for i := range result { + result[i] = CharSet[rnd.Intn(len(CharSet))] + } + return string(result) +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_converters.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_converters.go deleted file mode 100644 index 0b90285280f8..000000000000 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_converters.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterapi - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" -) - -func newMachineDeploymentFromUnstructured(u *unstructured.Unstructured) *MachineDeployment { - machineDeployment := MachineDeployment{ - TypeMeta: metav1.TypeMeta{ - Kind: u.GetKind(), - APIVersion: u.GetAPIVersion(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: u.GetName(), - Namespace: u.GetNamespace(), - UID: u.GetUID(), - Labels: u.GetLabels(), - Annotations: u.GetAnnotations(), - OwnerReferences: u.GetOwnerReferences(), - DeletionTimestamp: u.GetDeletionTimestamp(), - }, - Spec: MachineDeploymentSpec{}, - Status: MachineDeploymentStatus{}, - } - - replicas, found, err := unstructured.NestedInt64(u.Object, "spec", "replicas") - if err == nil && found { - machineDeployment.Spec.Replicas = pointer.Int32Ptr(int32(replicas)) - } - - return &machineDeployment -} - -func newMachineSetFromUnstructured(u *unstructured.Unstructured) *MachineSet { - machineSet := MachineSet{ - TypeMeta: metav1.TypeMeta{ - Kind: u.GetKind(), - APIVersion: u.GetAPIVersion(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: u.GetName(), - Namespace: u.GetNamespace(), - UID: u.GetUID(), - Labels: u.GetLabels(), - Annotations: u.GetAnnotations(), - OwnerReferences: u.GetOwnerReferences(), - DeletionTimestamp: u.GetDeletionTimestamp(), - }, - Spec: MachineSetSpec{}, - Status: MachineSetStatus{}, - } - - replicas, found, err := unstructured.NestedInt64(u.Object, "spec", "replicas") - if err == nil && found { - machineSet.Spec.Replicas = pointer.Int32Ptr(int32(replicas)) - } - - return &machineSet -} - -func newMachineFromUnstructured(u *unstructured.Unstructured) *Machine { - machine := Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: u.GetKind(), - APIVersion: u.GetAPIVersion(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: u.GetName(), - Namespace: u.GetNamespace(), - UID: u.GetUID(), - Labels: u.GetLabels(), - Annotations: u.GetAnnotations(), - OwnerReferences: u.GetOwnerReferences(), - ClusterName: u.GetClusterName(), - DeletionTimestamp: u.GetDeletionTimestamp(), - }, - Spec: MachineSpec{}, - Status: MachineStatus{}, - } - - if providerID, _, _ := unstructured.NestedString(u.Object, "spec", "providerID"); providerID != "" { - machine.Spec.ProviderID = pointer.StringPtr(providerID) - } - - nodeRef := corev1.ObjectReference{} - - if nodeRefKind, _, _ := unstructured.NestedString(u.Object, "status", "nodeRef", "kind"); nodeRefKind != "" { - nodeRef.Kind = nodeRefKind - } - - if nodeRefName, _, _ := unstructured.NestedString(u.Object, "status", "nodeRef", "name"); nodeRefName != "" { - nodeRef.Name = nodeRefName - } - - if nodeRef.Name != "" || nodeRef.Kind != "" { - machine.Status.NodeRef = &nodeRef - } - - if failureMessage, _, _ := unstructured.NestedString(u.Object, "status", "failureMessage"); failureMessage != "" { - machine.Status.FailureMessage = pointer.StringPtr(failureMessage) - } - - return &machine -} - -func newUnstructuredFromMachineSet(m *MachineSet) *unstructured.Unstructured { - u := unstructured.Unstructured{} - - u.SetAPIVersion(m.APIVersion) - u.SetAnnotations(m.Annotations) - u.SetKind(m.Kind) - u.SetLabels(m.Labels) - u.SetName(m.Name) - u.SetNamespace(m.Namespace) - u.SetOwnerReferences(m.OwnerReferences) - u.SetUID(m.UID) - u.SetDeletionTimestamp(m.DeletionTimestamp) - - if m.Spec.Replicas != nil { - unstructured.SetNestedField(u.Object, int64(*m.Spec.Replicas), "spec", "replicas") - } - - return &u -} - -func newUnstructuredFromMachineDeployment(m *MachineDeployment) *unstructured.Unstructured { - u := unstructured.Unstructured{} - - u.SetAPIVersion(m.APIVersion) - u.SetAnnotations(m.Annotations) - u.SetKind(m.Kind) - u.SetLabels(m.Labels) - u.SetName(m.Name) - u.SetNamespace(m.Namespace) - u.SetOwnerReferences(m.OwnerReferences) - u.SetUID(m.UID) - u.SetDeletionTimestamp(m.DeletionTimestamp) - - if m.Spec.Replicas != nil { - unstructured.SetNestedField(u.Object, int64(*m.Spec.Replicas), "spec", "replicas") - } - - return &u -} - -func newUnstructuredFromMachine(m *Machine) *unstructured.Unstructured { - u := unstructured.Unstructured{} - - u.SetAPIVersion(m.APIVersion) - u.SetAnnotations(m.Annotations) - u.SetKind(m.Kind) - u.SetLabels(m.Labels) - u.SetName(m.Name) - u.SetNamespace(m.Namespace) - u.SetOwnerReferences(m.OwnerReferences) - u.SetUID(m.UID) - u.SetDeletionTimestamp(m.DeletionTimestamp) - - if m.Spec.ProviderID != nil && *m.Spec.ProviderID != "" { - unstructured.SetNestedField(u.Object, *m.Spec.ProviderID, "spec", "providerID") - } - - if m.Status.NodeRef != nil { - if m.Status.NodeRef.Kind != "" { - unstructured.SetNestedField(u.Object, m.Status.NodeRef.Kind, "status", "nodeRef", "kind") - } - if m.Status.NodeRef.Name != "" { - unstructured.SetNestedField(u.Object, m.Status.NodeRef.Name, "status", "nodeRef", "name") - } - } - - if m.Status.FailureMessage != nil { - unstructured.SetNestedField(u.Object, *m.Status.FailureMessage, "status", "failureMessage") - } - - return &u -} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go deleted file mode 100644 index 7ce675cc801d..000000000000 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterapi - -import ( - "context" - "fmt" - "path" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - klog "k8s.io/klog/v2" - "k8s.io/utils/pointer" -) - -type machineDeploymentScalableResource struct { - controller *machineController - machineDeployment *MachineDeployment - maxSize int - minSize int -} - -var _ scalableResource = (*machineDeploymentScalableResource)(nil) - -func (r machineDeploymentScalableResource) ID() string { - return path.Join(r.Namespace(), r.Name()) -} - -func (r machineDeploymentScalableResource) MaxSize() int { - return r.maxSize -} - -func (r machineDeploymentScalableResource) MinSize() int { - return r.minSize -} - -func (r machineDeploymentScalableResource) Name() string { - return r.machineDeployment.Name -} - -func (r machineDeploymentScalableResource) Namespace() string { - return r.machineDeployment.Namespace -} - -func (r machineDeploymentScalableResource) Nodes() ([]string, error) { - var result []string - - if err := r.controller.filterAllMachineSets(func(machineSet *MachineSet) error { - if machineSetIsOwnedByMachineDeployment(machineSet, r.machineDeployment) { - providerIDs, err := r.controller.machineSetProviderIDs(machineSet) - if err != nil { - return err - } - result = append(result, providerIDs...) - } - return nil - }); err != nil { - return nil, err - } - - return result, nil -} - -func (r machineDeploymentScalableResource) Replicas() (int32, error) { - freshMachineDeployment, err := r.controller.getMachineDeployment(r.machineDeployment.Namespace, r.machineDeployment.Name, metav1.GetOptions{}) - if err != nil { - return 0, err - } - - if freshMachineDeployment == nil { - return 0, fmt.Errorf("unknown machineDeployment %s", r.machineDeployment.Name) - } - - if freshMachineDeployment.Spec.Replicas == nil { - klog.Warningf("MachineDeployment %q has nil spec.replicas. This is unsupported behaviour. Falling back to status.replicas.", r.machineDeployment.Name) - } - // If no value for replicas on the MachineSet spec, fallback to the status - // TODO: Remove this fallback once defaulting is implemented for MachineSet Replicas - return pointer.Int32PtrDerefOr(freshMachineDeployment.Spec.Replicas, freshMachineDeployment.Status.Replicas), nil -} - -func (r machineDeploymentScalableResource) SetSize(nreplicas int32) error { - u, err := r.controller.dynamicclient.Resource(*r.controller.machineDeploymentResource).Namespace(r.machineDeployment.Namespace).Get(context.TODO(), r.machineDeployment.Name, metav1.GetOptions{}) - - if err != nil { - return err - } - - if u == nil { - return fmt.Errorf("unknown machineDeployment %s", r.machineDeployment.Name) - } - - u = u.DeepCopy() - if err := unstructured.SetNestedField(u.Object, int64(nreplicas), "spec", "replicas"); err != nil { - return fmt.Errorf("failed to set replica value: %v", err) - } - - _, updateErr := r.controller.dynamicclient.Resource(*r.controller.machineDeploymentResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) - return updateErr -} - -func (r machineDeploymentScalableResource) UnmarkMachineForDeletion(machine *Machine) error { - return unmarkMachineForDeletion(r.controller, machine) -} - -func (r machineDeploymentScalableResource) MarkMachineForDeletion(machine *Machine) error { - u, err := r.controller.dynamicclient.Resource(*r.controller.machineResource).Namespace(machine.Namespace).Get(context.TODO(), machine.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - u = u.DeepCopy() - - annotations := u.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - annotations[machineDeleteAnnotationKey] = time.Now().String() - u.SetAnnotations(annotations) - - _, updateErr := r.controller.dynamicclient.Resource(*r.controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) - return updateErr -} - -func newMachineDeploymentScalableResource(controller *machineController, machineDeployment *MachineDeployment) (*machineDeploymentScalableResource, error) { - minSize, maxSize, err := parseScalingBounds(machineDeployment.Annotations) - if err != nil { - return nil, fmt.Errorf("error validating min/max annotations: %v", err) - } - - return &machineDeploymentScalableResource{ - controller: controller, - machineDeployment: machineDeployment, - maxSize: maxSize, - minSize: minSize, - }, nil -} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go deleted file mode 100644 index 5db264e287b8..000000000000 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterapi - -import ( - "context" - "fmt" - "path" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - klog "k8s.io/klog/v2" - "k8s.io/utils/pointer" -) - -type machineSetScalableResource struct { - controller *machineController - machineSet *MachineSet - maxSize int - minSize int -} - -var _ scalableResource = (*machineSetScalableResource)(nil) - -func (r machineSetScalableResource) ID() string { - return path.Join(r.Namespace(), r.Name()) -} - -func (r machineSetScalableResource) MaxSize() int { - return r.maxSize -} - -func (r machineSetScalableResource) MinSize() int { - return r.minSize -} - -func (r machineSetScalableResource) Name() string { - return r.machineSet.Name -} - -func (r machineSetScalableResource) Namespace() string { - return r.machineSet.Namespace -} - -func (r machineSetScalableResource) Nodes() ([]string, error) { - return r.controller.machineSetProviderIDs(r.machineSet) -} - -func (r machineSetScalableResource) Replicas() (int32, error) { - freshMachineSet, err := r.controller.getMachineSet(r.machineSet.Namespace, r.machineSet.Name, metav1.GetOptions{}) - if err != nil { - return 0, err - } - - if freshMachineSet == nil { - return 0, fmt.Errorf("unknown machineSet %s", r.machineSet.Name) - } - - if freshMachineSet.Spec.Replicas == nil { - klog.Warningf("MachineSet %q has nil spec.replicas. This is unsupported behaviour. Falling back to status.replicas.", r.machineSet.Name) - } - - // If no value for replicas on the MachineSet spec, fallback to the status - // TODO: Remove this fallback once defaulting is implemented for MachineSet Replicas - return pointer.Int32PtrDerefOr(freshMachineSet.Spec.Replicas, freshMachineSet.Status.Replicas), nil -} - -func (r machineSetScalableResource) SetSize(nreplicas int32) error { - u, err := r.controller.dynamicclient.Resource(*r.controller.machineSetResource).Namespace(r.machineSet.Namespace).Get(context.TODO(), r.machineSet.Name, metav1.GetOptions{}) - - if err != nil { - return err - } - - if u == nil { - return fmt.Errorf("unknown machineSet %s", r.machineSet.Name) - } - - u = u.DeepCopy() - if err := unstructured.SetNestedField(u.Object, int64(nreplicas), "spec", "replicas"); err != nil { - return fmt.Errorf("failed to set replica value: %v", err) - } - - _, updateErr := r.controller.dynamicclient.Resource(*r.controller.machineSetResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) - return updateErr -} - -func (r machineSetScalableResource) MarkMachineForDeletion(machine *Machine) error { - u, err := r.controller.dynamicclient.Resource(*r.controller.machineResource).Namespace(machine.Namespace).Get(context.TODO(), machine.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - u = u.DeepCopy() - - annotations := u.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - annotations[machineDeleteAnnotationKey] = time.Now().String() - u.SetAnnotations(annotations) - - _, updateErr := r.controller.dynamicclient.Resource(*r.controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) - return updateErr -} - -func (r machineSetScalableResource) UnmarkMachineForDeletion(machine *Machine) error { - return unmarkMachineForDeletion(r.controller, machine) -} - -func newMachineSetScalableResource(controller *machineController, machineSet *MachineSet) (*machineSetScalableResource, error) { - minSize, maxSize, err := parseScalingBounds(machineSet.Annotations) - if err != nil { - return nil, fmt.Errorf("error validating min/max annotations: %v", err) - } - - return &machineSetScalableResource{ - controller: controller, - machineSet: machineSet, - maxSize: maxSize, - minSize: minSize, - }, nil -} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset_test.go deleted file mode 100644 index 92930ee01c6d..000000000000 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset_test.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterapi - -import ( - "context" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -func TestSetSize(t *testing.T) { - initialReplicas := int32(1) - updatedReplicas := int32(5) - - testConfig := createMachineSetTestConfig(testNamespace, int(initialReplicas), nil) - controller, stop := mustCreateTestController(t, testConfig) - defer stop() - - sr, err := newMachineSetScalableResource(controller, testConfig.machineSet) - if err != nil { - t.Fatal(err) - } - - err = sr.SetSize(updatedReplicas) - if err != nil { - t.Fatal(err) - } - - // fetch machineSet - u, err := sr.controller.dynamicclient.Resource(*sr.controller.machineSetResource).Namespace(testConfig.machineSet.Namespace). - Get(context.TODO(), testConfig.machineSet.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - - replicas, found, err := unstructured.NestedInt64(u.Object, "spec", "replicas") - if err != nil { - t.Fatal(err) - } - if !found { - t.Fatal("spec.replicas not found") - } - - got := int32(replicas) - if got != updatedReplicas { - t.Errorf("expected %v, got: %v", updatedReplicas, got) - } -} - -func TestReplicas(t *testing.T) { - initialReplicas := int32(1) - updatedReplicas := int32(5) - - testConfig := createMachineSetTestConfig(testNamespace, int(initialReplicas), nil) - controller, stop := mustCreateTestController(t, testConfig) - defer stop() - - sr, err := newMachineSetScalableResource(controller, testConfig.machineSet) - if err != nil { - t.Fatal(err) - } - - i, err := sr.Replicas() - if err != nil { - t.Fatal(err) - } - - if i != initialReplicas { - t.Errorf("expected %v, got: %v", initialReplicas, i) - } - - // fetch and update machineSet - u, err := sr.controller.dynamicclient.Resource(*sr.controller.machineSetResource).Namespace(testConfig.machineSet.Namespace). - Get(context.TODO(), testConfig.machineSet.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - - if err := unstructured.SetNestedField(u.Object, int64(updatedReplicas), "spec", "replicas"); err != nil { - t.Fatal(err) - } - - _, err = sr.controller.dynamicclient.Resource(*sr.controller.machineSetResource).Namespace(u.GetNamespace()). - Update(context.TODO(), u, metav1.UpdateOptions{}) - if err != nil { - t.Fatal(err) - } - - i, err = sr.Replicas() - if err != nil { - t.Fatal(err) - } - - if i != updatedReplicas { - t.Errorf("expected %v, got: %v", updatedReplicas, i) - } -} - -func TestSetSizeAndReplicas(t *testing.T) { - initialReplicas := int32(1) - updatedReplicas := int32(5) - - testConfig := createMachineSetTestConfig(testNamespace, int(initialReplicas), nil) - controller, stop := mustCreateTestController(t, testConfig) - defer stop() - - sr, err := newMachineSetScalableResource(controller, testConfig.machineSet) - if err != nil { - t.Fatal(err) - } - - i, err := sr.Replicas() - if err != nil { - t.Fatal(err) - } - - if i != initialReplicas { - t.Errorf("expected %v, got: %v", initialReplicas, i) - } - - err = sr.SetSize(updatedReplicas) - if err != nil { - t.Fatal(err) - } - - i, err = sr.Replicas() - if err != nil { - t.Fatal(err) - } - - if i != updatedReplicas { - t.Errorf("expected %v, got: %v", updatedReplicas, i) - } -} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go index 49cf372040e7..2632aba5212f 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go @@ -20,8 +20,10 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ) const ( @@ -32,19 +34,11 @@ const ( type nodegroup struct { machineController *machineController - scalableResource scalableResource + scalableResource *unstructuredScalableResource } var _ cloudprovider.NodeGroup = (*nodegroup)(nil) -func (ng *nodegroup) Name() string { - return ng.scalableResource.Name() -} - -func (ng *nodegroup) Namespace() string { - return ng.scalableResource.Namespace() -} - func (ng *nodegroup) MinSize() int { return ng.scalableResource.MinSize() } @@ -147,9 +141,6 @@ func (ng *nodegroup) DeleteNodes(nodes []*corev1.Node) error { continue } - if machine.Annotations == nil { - machine.Annotations = map[string]string{} - } nodeGroup, err := ng.machineController.nodeGroupForNode(node) if err != nil { return err @@ -160,7 +151,7 @@ func (ng *nodegroup) DeleteNodes(nodes []*corev1.Node) error { } if err := ng.scalableResource.SetSize(replicas - 1); err != nil { - nodeGroup.scalableResource.UnmarkMachineForDeletion(machine) + _ = nodeGroup.scalableResource.UnmarkMachineForDeletion(machine) return err } @@ -216,7 +207,7 @@ func (ng *nodegroup) Debug() string { // Nodes returns a list of all nodes that belong to this node group. // This includes instances that might have not become a kubernetes node yet. func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) { - nodes, err := ng.scalableResource.Nodes() + providerIDs, err := ng.scalableResource.ProviderIDs() if err != nil { return nil, err } @@ -225,10 +216,10 @@ func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) { // The IDs returned here are used to check if a node is registered or not and // must match the ID on the Node object itself. // https://github.com/kubernetes/autoscaler/blob/a973259f1852303ba38a3a61eeee8489cf4e1b13/cluster-autoscaler/clusterstate/clusterstate.go#L967-L985 - instances := make([]cloudprovider.Instance, len(nodes)) - for i := range nodes { + instances := make([]cloudprovider.Instance, len(providerIDs)) + for i := range providerIDs { instances[i] = cloudprovider.Instance{ - Id: nodes[i], + Id: providerIDs[i], } } @@ -257,7 +248,10 @@ func (ng *nodegroup) Exist() bool { // Create creates the node group on the cloud nodegroup side. // Implementation optional. func (ng *nodegroup) Create() (cloudprovider.NodeGroup, error) { - return nil, cloudprovider.ErrAlreadyExist + if ng.Exist() { + return nil, cloudprovider.ErrAlreadyExist + } + return nil, cloudprovider.ErrNotImplemented } // Delete deletes the node group on the cloud nodegroup side. This will @@ -274,22 +268,12 @@ func (ng *nodegroup) Autoprovisioned() bool { return false } -func newNodegroupFromMachineSet(controller *machineController, machineSet *MachineSet) (*nodegroup, error) { - scalableResource, err := newMachineSetScalableResource(controller, machineSet) +func newNodegroupFromScalableResource(controller *machineController, unstructuredScalableResource *unstructured.Unstructured) (*nodegroup, error) { + scalableResource, err := newUnstructuredScalableResource(controller, unstructuredScalableResource) if err != nil { return nil, err } - return &nodegroup{ - machineController: controller, - scalableResource: scalableResource, - }, nil -} -func newNodegroupFromMachineDeployment(controller *machineController, machineDeployment *MachineDeployment) (*nodegroup, error) { - scalableResource, err := newMachineDeploymentScalableResource(controller, machineDeployment) - if err != nil { - return nil, err - } return &nodegroup{ machineController: controller, scalableResource: scalableResource, diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go index 29a5212fa235..8deb746b7506 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go @@ -27,10 +27,10 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - "k8s.io/utils/pointer" ) const ( @@ -104,18 +104,18 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { errors: false, }} - newNodeGroup := func(t *testing.T, controller *machineController, testConfig *testConfig) (*nodegroup, error) { + newNodeGroup := func(controller *machineController, testConfig *testConfig) (*nodegroup, error) { if testConfig.machineDeployment != nil { - return newNodegroupFromMachineDeployment(controller, testConfig.machineDeployment) + return newNodegroupFromScalableResource(controller, testConfig.machineDeployment) } - return newNodegroupFromMachineSet(controller, testConfig.machineSet) + return newNodegroupFromScalableResource(controller, testConfig.machineSet) } test := func(t *testing.T, tc testCase, testConfig *testConfig) { controller, stop := mustCreateTestController(t, testConfig) defer stop() - ng, err := newNodeGroup(t, controller, testConfig) + ng, err := newNodeGroup(controller, testConfig) if tc.errors && err == nil { t.Fatal("expected an error") } @@ -134,26 +134,25 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { t.Fatal("expected nodegroup to be non-nil") } - var expectedName string + var expectedName, expectedKind string - switch v := (ng.scalableResource).(type) { - case *machineSetScalableResource: - expectedName = testConfig.spec.machineSetName - case *machineDeploymentScalableResource: + if testConfig.machineDeployment != nil { + expectedKind = machineDeploymentKind expectedName = testConfig.spec.machineDeploymentName - default: - t.Fatalf("unexpected type: %T", v) + } else { + expectedKind = machineSetKind + expectedName = testConfig.spec.machineSetName } - expectedID := path.Join(testConfig.spec.namespace, expectedName) + expectedID := path.Join(expectedKind, testConfig.spec.namespace, expectedName) expectedDebug := fmt.Sprintf(debugFormat, expectedID, tc.minSize, tc.maxSize, tc.replicas) - if ng.Name() != expectedName { - t.Errorf("expected %q, got %q", expectedName, ng.Name()) + if ng.scalableResource.Name() != expectedName { + t.Errorf("expected %q, got %q", expectedName, ng.scalableResource.Name()) } - if ng.Namespace() != testConfig.spec.namespace { - t.Errorf("expected %q, got %q", testConfig.spec.namespace, ng.Namespace()) + if ng.scalableResource.Namespace() != testConfig.spec.namespace { + t.Errorf("expected %q, got %q", testConfig.spec.namespace, ng.scalableResource.Namespace()) } if ng.MinSize() != tc.minSize { @@ -198,7 +197,7 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { t.Run("MachineSet", func(t *testing.T) { for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { - test(t, tc, createMachineSetTestConfig(testNamespace, tc.nodeCount, tc.annotations)) + test(t, tc, createMachineSetTestConfig(RandomString(6), RandomString(6), tc.nodeCount, tc.annotations)) }) } }) @@ -206,7 +205,7 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { t.Run("MachineDeployment", func(t *testing.T) { for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { - test(t, tc, createMachineDeploymentTestConfig(testNamespace, tc.nodeCount, tc.annotations)) + test(t, tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), tc.nodeCount, tc.annotations)) }) } }) @@ -270,27 +269,16 @@ func TestNodeGroupIncreaseSizeErrors(t *testing.T) { t.Errorf("expected error message to contain %q, got %q", tc.errorMsg, err.Error()) } - switch v := (ng.scalableResource).(type) { - case *machineSetScalableResource: - // A nodegroup is immutable; get a fresh copy. - ms, err := ng.machineController.getMachineSet(ng.Namespace(), ng.Name(), v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(ms.Spec.Replicas, 0); actual != tc.initial { - t.Errorf("expected %v, got %v", tc.initial, actual) - } - case *machineDeploymentScalableResource: - // A nodegroup is immutable; get a fresh copy. - md, err := ng.machineController.getMachineDeployment(ng.Namespace(), ng.Name(), v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(md.Spec.Replicas, 0); actual != tc.initial { - t.Errorf("expected %v, got %v", tc.initial, actual) - } - default: - t.Errorf("unexpected type: %T", v) + gvr, err := ng.scalableResource.GroupVersionResource() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + scalableResource, err := ng.machineController.managementScaleClient.Scales(testConfig.spec.namespace). + Get(context.TODO(), gvr.GroupResource(), ng.scalableResource.Name(), metav1.GetOptions{}) + + if scalableResource.Spec.Replicas != tc.initial { + t.Errorf("expected %v, got %v", tc.initial, scalableResource.Spec.Replicas) } } @@ -301,7 +289,7 @@ func TestNodeGroupIncreaseSizeErrors(t *testing.T) { nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", } - test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } }) @@ -313,7 +301,7 @@ func TestNodeGroupIncreaseSizeErrors(t *testing.T) { nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", } - test(t, &tc, createMachineDeploymentTestConfig(testNamespace, int(tc.initial), annotations)) + test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } }) @@ -354,27 +342,16 @@ func TestNodeGroupIncreaseSize(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - switch v := (ng.scalableResource).(type) { - case *machineSetScalableResource: - // A nodegroup is immutable; get a fresh copy. - ms, err := ng.machineController.getMachineSet(ng.Namespace(), ng.Name(), v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(ms.Spec.Replicas, 0); actual != tc.expected { - t.Errorf("expected %v, got %v", tc.expected, actual) - } - case *machineDeploymentScalableResource: - // A nodegroup is immutable; get a fresh copy. - md, err := ng.machineController.getMachineDeployment(ng.Namespace(), ng.Name(), v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(md.Spec.Replicas, 0); actual != tc.expected { - t.Errorf("expected %v, got %v", tc.expected, actual) - } - default: - t.Errorf("unexpected type: %T", v) + gvr, err := ng.scalableResource.GroupVersionResource() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + scalableResource, err := ng.machineController.managementScaleClient.Scales(ng.scalableResource.Namespace()). + Get(context.TODO(), gvr.GroupResource(), ng.scalableResource.Name(), metav1.GetOptions{}) + + if scalableResource.Spec.Replicas != tc.expected { + t.Errorf("expected %v, got %v", tc.expected, scalableResource.Spec.Replicas) } } @@ -390,7 +367,7 @@ func TestNodeGroupIncreaseSize(t *testing.T) { expected: 4, delta: 1, } - test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) }) t.Run("MachineDeployment", func(t *testing.T) { @@ -400,7 +377,7 @@ func TestNodeGroupIncreaseSize(t *testing.T) { expected: 4, delta: 1, } - test(t, &tc, createMachineDeploymentTestConfig(testNamespace, int(tc.initial), annotations)) + test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } @@ -428,31 +405,28 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { } ng := nodegroups[0] + + gvr, err := ng.scalableResource.GroupVersionResource() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // DecreaseTargetSize should only decrease the size when the current target size of the nodeGroup // is bigger than the number existing instances for that group. We force such a scenario with targetSizeIncrement. - switch v := (ng.scalableResource).(type) { - case *machineSetScalableResource: - testConfig.machineSet.Spec.Replicas = int32ptr(*testConfig.machineSet.Spec.Replicas + tc.targetSizeIncrement) - u := newUnstructuredFromMachineSet(testConfig.machineSet) - if err := controller.machineSetInformer.Informer().GetStore().Add(u); err != nil { - t.Fatalf("failed to add new machine: %v", err) - } - _, err := controller.dynamicclient.Resource(*controller.machineSetResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) - if err != nil { - t.Fatalf("failed to updating machine: %v", err) - } - case *machineDeploymentScalableResource: - testConfig.machineDeployment.Spec.Replicas = int32ptr(*testConfig.machineDeployment.Spec.Replicas + tc.targetSizeIncrement) - u := newUnstructuredFromMachineDeployment(testConfig.machineDeployment) - if err := controller.machineDeploymentInformer.Informer().GetStore().Add(u); err != nil { - } - _, err := controller.dynamicclient.Resource(*controller.machineDeploymentResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) - if err != nil { - t.Fatalf("failed to updating machine: %v", err) - } - default: - t.Errorf("unexpected type: %T", v) + scalableResource, err := controller.managementScaleClient.Scales(testConfig.spec.namespace). + Get(context.TODO(), gvr.GroupResource(), ng.scalableResource.Name(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) } + + scalableResource.Spec.Replicas += tc.targetSizeIncrement + + _, err = ng.machineController.managementScaleClient.Scales(ng.scalableResource.Namespace()). + Update(context.TODO(), gvr.GroupResource(), scalableResource, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // A nodegroup is immutable; get a fresh copy after adding targetSizeIncrement. nodegroups, err = controller.nodeGroups() if err != nil { @@ -473,27 +447,14 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { t.Fatalf("expected error: %v, got: %v", tc.expectedError, err) } - switch v := (ng.scalableResource).(type) { - case *machineSetScalableResource: - // A nodegroup is immutable; get a fresh copy. - ms, err := ng.machineController.getMachineSet(ng.Namespace(), ng.Name(), v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(ms.Spec.Replicas, 0); actual != tc.expected { - t.Errorf("expected %v, got %v", tc.expected, actual) - } - case *machineDeploymentScalableResource: - // A nodegroup is immutable; get a fresh copy. - md, err := ng.machineController.getMachineDeployment(ng.Namespace(), ng.Name(), v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(md.Spec.Replicas, 0); actual != tc.expected { - t.Errorf("expected %v, got %v", tc.expected, actual) - } - default: - t.Errorf("unexpected type: %T", v) + scalableResource, err = controller.managementScaleClient.Scales(testConfig.spec.namespace). + Get(context.TODO(), gvr.GroupResource(), ng.scalableResource.Name(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if scalableResource.Spec.Replicas != tc.expected { + t.Errorf("expected %v, got %v", tc.expected, scalableResource.Spec.Replicas) } } @@ -511,18 +472,18 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { delta: -1, expectedError: true, } - test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) }) t.Run("MachineSet", func(t *testing.T) { tc := testCase{ - description: "A node group with targe size 4 but only 3 existing instances should decrease by 1", + description: "A node group with target size 4 but only 3 existing instances should decrease by 1", initial: 3, targetSizeIncrement: 1, expected: 3, delta: -1, } - test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) }) t.Run("MachineDeployment", func(t *testing.T) { @@ -534,7 +495,7 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { delta: -1, expectedError: true, } - test(t, &tc, createMachineDeploymentTestConfig(testNamespace, int(tc.initial), annotations)) + test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } @@ -596,27 +557,16 @@ func TestNodeGroupDecreaseSizeErrors(t *testing.T) { t.Errorf("expected error message to contain %q, got %q", tc.errorMsg, err.Error()) } - switch v := (ng.scalableResource).(type) { - case *machineSetScalableResource: - // A nodegroup is immutable; get a fresh copy. - ms, err := ng.machineController.getMachineSet(ng.Namespace(), ng.Name(), v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(ms.Spec.Replicas, 0); actual != tc.initial { - t.Errorf("expected %v, got %v", tc.initial, actual) - } - case *machineDeploymentScalableResource: - // A nodegroup is immutable; get a fresh copy. - md, err := ng.machineController.getMachineDeployment(ng.Namespace(), ng.Name(), v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(md.Spec.Replicas, 0); actual != tc.initial { - t.Errorf("expected %v, got %v", tc.initial, actual) - } - default: - t.Errorf("unexpected type: %T", v) + gvr, err := ng.scalableResource.GroupVersionResource() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + scalableResource, err := ng.machineController.managementScaleClient.Scales(testConfig.spec.namespace). + Get(context.TODO(), gvr.GroupResource(), ng.scalableResource.Name(), metav1.GetOptions{}) + + if scalableResource.Spec.Replicas != tc.initial { + t.Errorf("expected %v, got %v", tc.initial, scalableResource.Spec.Replicas) } } @@ -627,7 +577,7 @@ func TestNodeGroupDecreaseSizeErrors(t *testing.T) { nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", } - test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } }) @@ -639,7 +589,7 @@ func TestNodeGroupDecreaseSizeErrors(t *testing.T) { nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", } - test(t, &tc, createMachineDeploymentTestConfig(testNamespace, int(tc.initial), annotations)) + test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } }) @@ -684,34 +634,30 @@ func TestNodeGroupDeleteNodes(t *testing.T) { } for i := 5; i < len(testConfig.machines); i++ { - machine, err := controller.getMachine(testConfig.machines[i].Namespace, testConfig.machines[i].Name, v1.GetOptions{}) + machine, err := controller.managementClient.Resource(controller.machineResource). + Namespace(testConfig.spec.namespace). + Get(context.TODO(), testConfig.machines[i].GetName(), metav1.GetOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } - if _, found := machine.Annotations[machineDeleteAnnotationKey]; !found { - t.Errorf("expected annotation %q on machine %s", machineDeleteAnnotationKey, machine.Name) + if _, found := machine.GetAnnotations()[machineDeleteAnnotationKey]; !found { + t.Errorf("expected annotation %q on machine %s", machineDeleteAnnotationKey, machine.GetName()) } } - switch v := (ng.scalableResource).(type) { - case *machineSetScalableResource: - updatedMachineSet, err := controller.getMachineSet(testConfig.machineSet.Namespace, testConfig.machineSet.Name, v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(updatedMachineSet.Spec.Replicas, 0); actual != 5 { - t.Fatalf("expected 5 nodes, got %v", actual) - } - case *machineDeploymentScalableResource: - updatedMachineDeployment, err := controller.getMachineDeployment(testConfig.machineDeployment.Namespace, testConfig.machineDeployment.Name, v1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(updatedMachineDeployment.Spec.Replicas, 0); actual != 5 { - t.Fatalf("expected 5 nodes, got %v", actual) - } - default: - t.Errorf("unexpected type: %T", v) + gvr, err := ng.scalableResource.GroupVersionResource() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + scalableResource, err := ng.machineController.managementScaleClient.Scales(testConfig.spec.namespace). + Get(context.TODO(), gvr.GroupResource(), ng.scalableResource.Name(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if scalableResource.Spec.Replicas != 5 { + t.Errorf("expected 5, got %v", scalableResource.Spec.Replicas) } } @@ -721,14 +667,14 @@ func TestNodeGroupDeleteNodes(t *testing.T) { // sorting and the expected semantics in test() will fail. t.Run("MachineSet", func(t *testing.T) { - test(t, createMachineSetTestConfig(testNamespace, 10, map[string]string{ + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) }) t.Run("MachineDeployment", func(t *testing.T) { - test(t, createMachineDeploymentTestConfig(testNamespace, 10, map[string]string{ + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) @@ -765,13 +711,10 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { t.Error("expected an error") } - expectedErr0 := `node "test-namespace1-machineset-0-nodeid-0" doesn't belong to node group "test-namespace0/machineset-0"` - if testConfig0.machineDeployment != nil { - expectedErr0 = `node "test-namespace1-machineset-0-nodeid-0" doesn't belong to node group "test-namespace0/machinedeployment-0"` - } + expectedErrSubstring := "doesn't belong to node group" - if !strings.Contains(err0.Error(), string(normalizedProviderString(expectedErr0))) { - t.Errorf("expected: %q, got: %q", expectedErr0, err0.Error()) + if !strings.Contains(err0.Error(), expectedErrSubstring) { + t.Errorf("expected error: %q to contain: %q", err0.Error(), expectedErrSubstring) } // Deleting nodes that are not in ng1 should fail. @@ -780,13 +723,8 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { t.Error("expected an error") } - expectedErr1 := `node "test-namespace0-machineset-0-nodeid-0" doesn't belong to node group "test-namespace1/machineset-0"` - if testConfig1.machineDeployment != nil { - expectedErr1 = `node "test-namespace0-machineset-0-nodeid-0" doesn't belong to node group "test-namespace1/machinedeployment-0"` - } - - if !strings.Contains(err1.Error(), string(normalizedProviderString(expectedErr1))) { - t.Errorf("expected: %q, got: %q", expectedErr1, err1.Error()) + if !strings.Contains(err1.Error(), expectedErrSubstring) { + t.Errorf("expected error: %q to contain: %q", err0.Error(), expectedErrSubstring) } // Deleting from correct node group should fail because @@ -808,20 +746,22 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { } t.Run("MachineSet", func(t *testing.T) { - testConfig0 := createMachineSetTestConfigs(testNamespace+"0", 1, 2, annotations) - testConfig1 := createMachineSetTestConfigs(testNamespace+"1", 1, 2, annotations) + namespace := RandomString(6) + testConfig0 := createMachineSetTestConfigs(namespace, RandomString(6), 1, 2, annotations) + testConfig1 := createMachineSetTestConfigs(namespace, RandomString(6), 1, 2, annotations) test(t, 2, append(testConfig0, testConfig1...)) }) t.Run("MachineDeployment", func(t *testing.T) { - testConfig0 := createMachineDeploymentTestConfigs(testNamespace+"0", 1, 2, annotations) - testConfig1 := createMachineDeploymentTestConfigs(testNamespace+"1", 1, 2, annotations) + namespace := RandomString(6) + testConfig0 := createMachineDeploymentTestConfigs(namespace, RandomString(6), 1, 2, annotations) + testConfig1 := createMachineDeploymentTestConfigs(namespace, RandomString(6), 1, 2, annotations) test(t, 2, append(testConfig0, testConfig1...)) }) } func TestNodeGroupDeleteNodesTwice(t *testing.T) { - addDeletionTimestampToMachine := func(t *testing.T, controller *machineController, node *corev1.Node) error { + addDeletionTimestampToMachine := func(controller *machineController, node *corev1.Node) error { m, err := controller.findMachineByProviderID(normalizedProviderString(node.Spec.ProviderID)) if err != nil { return err @@ -831,9 +771,12 @@ func TestNodeGroupDeleteNodesTwice(t *testing.T) { // Machine API controllers were running Don't actually // delete since the fake client does not support // finalizers. - now := v1.Now() - m.DeletionTimestamp = &now - if _, err := controller.dynamicclient.Resource(*controller.machineResource).Namespace(m.GetNamespace()).Update(context.Background(), newUnstructuredFromMachine(m), v1.UpdateOptions{}); err != nil { + now := metav1.Now() + + m.SetDeletionTimestamp(&now) + + if _, err := controller.managementClient.Resource(controller.machineResource). + Namespace(m.GetNamespace()).Update(context.TODO(), m, metav1.UpdateOptions{}); err != nil { return err } @@ -893,7 +836,7 @@ func TestNodeGroupDeleteNodesTwice(t *testing.T) { // Assert that we have no DeletionTimestamp for i := expectedSize; i < len(testConfig.machines); i++ { - if !testConfig.machines[i].ObjectMeta.DeletionTimestamp.IsZero() { + if !testConfig.machines[i].GetDeletionTimestamp().IsZero() { t.Fatalf("unexpected DeletionTimestamp") } } @@ -904,7 +847,7 @@ func TestNodeGroupDeleteNodesTwice(t *testing.T) { } for _, node := range nodesToBeDeleted { - if err := addDeletionTimestampToMachine(t, controller, node); err != nil { + if err := addDeletionTimestampToMachine(controller, node); err != nil { t.Fatalf("unexpected err: %v", err) } } @@ -963,25 +906,16 @@ func TestNodeGroupDeleteNodesTwice(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - switch v := (ng.scalableResource).(type) { - case *machineSetScalableResource: - updatedMachineSet, err := controller.getMachineSet(testConfig.machineSet.Namespace, testConfig.machineSet.Name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(updatedMachineSet.Spec.Replicas, 0); int(actual) != expectedSize { - t.Fatalf("expected %v nodes, got %v", expectedSize, actual) - } - case *machineDeploymentScalableResource: - updatedMachineDeployment, err := controller.getMachineDeployment(testConfig.machineDeployment.Namespace, testConfig.machineDeployment.Name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if actual := pointer.Int32PtrDerefOr(updatedMachineDeployment.Spec.Replicas, 0); int(actual) != expectedSize { - t.Fatalf("expected %v nodes, got %v", expectedSize, actual) - } - default: - t.Errorf("unexpected type: %T", v) + gvr, err := ng.scalableResource.GroupVersionResource() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + scalableResource, err := ng.machineController.managementScaleClient.Scales(testConfig.spec.namespace). + Get(context.TODO(), gvr.GroupResource(), ng.scalableResource.Name(), metav1.GetOptions{}) + + if scalableResource.Spec.Replicas != int32(expectedSize) { + t.Errorf("expected %v, got %v", expectedSize, scalableResource.Spec.Replicas) } } @@ -991,14 +925,14 @@ func TestNodeGroupDeleteNodesTwice(t *testing.T) { // sorting and the expected semantics in test() will fail. t.Run("MachineSet", func(t *testing.T) { - test(t, createMachineSetTestConfig(testNamespace, 10, map[string]string{ + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) }) t.Run("MachineDeployment", func(t *testing.T) { - test(t, createMachineDeploymentTestConfig(testNamespace, 10, map[string]string{ + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) @@ -1012,10 +946,11 @@ func TestNodeGroupWithFailedMachine(t *testing.T) { // Simulate a failed machine machine := testConfig.machines[3].DeepCopy() - machine.Spec.ProviderID = nil - failureMessage := "FailureMessage" - machine.Status.FailureMessage = &failureMessage - if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + + unstructured.RemoveNestedField(machine.Object, "spec", "providerID") + unstructured.SetNestedField(machine.Object, "FailureMessage", "status", "failureMessage") + + if err := updateResource(controller.managementClient, controller.machineInformer, controller.machineResource, machine); err != nil { t.Fatalf("unexpected error updating machine, got %v", err) } @@ -1043,7 +978,7 @@ func TestNodeGroupWithFailedMachine(t *testing.T) { }) // The failed machine key is sorted to the first index - failedMachineID := fmt.Sprintf("%s%s_%s", failedMachinePrefix, machine.Namespace, machine.Name) + failedMachineID := fmt.Sprintf("%s%s_%s", failedMachinePrefix, machine.GetNamespace(), machine.GetName()) if nodeNames[0].Id != failedMachineID { t.Fatalf("expected %q, got %q", failedMachineID, nodeNames[0].Id) } @@ -1071,14 +1006,14 @@ func TestNodeGroupWithFailedMachine(t *testing.T) { // sorting and the expected semantics in test() will fail. t.Run("MachineSet", func(t *testing.T) { - test(t, createMachineSetTestConfig(testNamespace, 10, map[string]string{ + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) }) t.Run("MachineDeployment", func(t *testing.T) { - test(t, createMachineDeploymentTestConfig(testNamespace, 10, map[string]string{ + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go index 63b08a6e2cbd..a0a017cad67a 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go @@ -21,14 +21,18 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - "k8s.io/autoscaler/cluster-autoscaler/config" - "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" klog "k8s.io/klog/v2" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" ) const ( @@ -126,12 +130,12 @@ func newProvider( name string, rl *cloudprovider.ResourceLimiter, controller *machineController, -) (cloudprovider.CloudProvider, error) { +) cloudprovider.CloudProvider { return &provider{ providerName: name, resourceLimiter: rl, controller: controller, - }, nil + } } // BuildClusterAPI builds CloudProvider implementation for machine api. @@ -142,22 +146,32 @@ func BuildClusterAPI(opts config.AutoscalingOptions, do cloudprovider.NodeGroupD } // Grab a dynamic interface that we can create informers from - dc, err := dynamic.NewForConfig(externalConfig) + managementClient, err := dynamic.NewForConfig(externalConfig) if err != nil { klog.Fatalf("could not generate dynamic client for config") } - kubeClient, err := kubernetes.NewForConfig(externalConfig) + workloadClient, err := kubernetes.NewForConfig(externalConfig) if err != nil { klog.Fatalf("create kube clientset failed: %v", err) } - discoveryClient, err := discovery.NewDiscoveryClientForConfig(externalConfig) + managementDiscoveryClient, err := discovery.NewDiscoveryClientForConfig(externalConfig) if err != nil { klog.Fatalf("create discovery client failed: %v", err) } - controller, err := newMachineController(dc, kubeClient, discoveryClient) + cachedDiscovery := memory.NewMemCacheClient(managementDiscoveryClient) + managementScaleClient, err := scale.NewForConfig( + externalConfig, + restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscovery), + dynamic.LegacyAPIPathResolverFunc, + scale.NewDiscoveryScaleKindResolver(managementDiscoveryClient)) + if err != nil { + klog.Fatalf("create scale client failed: %v", err) + } + + controller, err := newMachineController(managementClient, workloadClient, managementDiscoveryClient, managementScaleClient) if err != nil { klog.Fatal(err) } @@ -170,10 +184,5 @@ func BuildClusterAPI(opts config.AutoscalingOptions, do cloudprovider.NodeGroupD klog.Fatal(err) } - provider, err := newProvider(ProviderName, rl, controller) - if err != nil { - klog.Fatal(err) - } - - return provider + return newProvider(ProviderName, rl, controller) } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider_test.go index 6ba2774c2cb7..301ee32776a9 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider_test.go @@ -22,6 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ) @@ -31,11 +32,7 @@ func TestProviderConstructorProperties(t *testing.T) { controller, stop := mustCreateTestController(t) defer stop() - provider, err := newProvider(ProviderName, &resourceLimits, controller) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - + provider := newProvider(ProviderName, &resourceLimits, controller) if actual := provider.Name(); actual != ProviderName { t.Errorf("expected %q, got %q", ProviderName, actual) } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go deleted file mode 100644 index aa1228fb4420..000000000000 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterapi - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// scalableResource is a resource that can be scaled up and down by -// adjusting its replica count field. -type scalableResource interface { - // Id returns an unique identifier of the resource - ID() string - - // MaxSize returns maximum size of the resource - MaxSize() int - - // MinSize returns minimum size of the resource - MinSize() int - - // Name returns the name of the resource - Name() string - - // Namespace returns the namespace the resource is in - Namespace() string - - // Nodes returns a list of all machines that already have or should become nodes that belong to this - // resource - Nodes() ([]string, error) - - // SetSize() sets the replica count of the resource - SetSize(nreplicas int32) error - - // Replicas returns the current replica count of the resource - Replicas() (int32, error) - - // MarkMachineForDeletion marks machine for deletion - MarkMachineForDeletion(machine *Machine) error - - // UnmarkMachineForDeletion unmarks machine for deletion - UnmarkMachineForDeletion(machine *Machine) error -} - -func unmarkMachineForDeletion(controller *machineController, machine *Machine) error { - u, err := controller.dynamicclient.Resource(*controller.machineResource).Namespace(machine.Namespace).Get(context.TODO(), machine.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - annotations := u.GetAnnotations() - if _, ok := annotations[machineDeleteAnnotationKey]; ok { - delete(annotations, machineDeleteAnnotationKey) - u.SetAnnotations(annotations) - _, updateErr := controller.dynamicclient.Resource(*controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) - return updateErr - } - return nil -} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go new file mode 100644 index 000000000000..3fd5bd9849ce --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go @@ -0,0 +1,169 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "context" + "fmt" + "path" + "time" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type unstructuredScalableResource struct { + controller *machineController + unstructured *unstructured.Unstructured + maxSize int + minSize int +} + +func (r unstructuredScalableResource) ID() string { + return path.Join(r.Kind(), r.Namespace(), r.Name()) +} + +func (r unstructuredScalableResource) MaxSize() int { + return r.maxSize +} + +func (r unstructuredScalableResource) MinSize() int { + return r.minSize +} + +func (r unstructuredScalableResource) Kind() string { + return r.unstructured.GetKind() +} + +func (r unstructuredScalableResource) GroupVersionResource() (schema.GroupVersionResource, error) { + switch r.Kind() { + case machineDeploymentKind: + return r.controller.machineDeploymentResource, nil + case machineSetKind: + return r.controller.machineSetResource, nil + default: + return schema.GroupVersionResource{}, fmt.Errorf("unknown scalable resource kind %s", r.Kind()) + } +} + +func (r unstructuredScalableResource) Name() string { + return r.unstructured.GetName() +} + +func (r unstructuredScalableResource) Namespace() string { + return r.unstructured.GetNamespace() +} + +func (r unstructuredScalableResource) ProviderIDs() ([]string, error) { + providerIds, err := r.controller.scalableResourceProviderIDs(r.unstructured) + if err != nil { + return nil, err + } + + return providerIds, nil +} + +func (r unstructuredScalableResource) Replicas() (int32, error) { + gvr, err := r.GroupVersionResource() + if err != nil { + return 0, err + } + + s, err := r.controller.managementScaleClient.Scales(r.Namespace()).Get(context.TODO(), gvr.GroupResource(), r.Name(), metav1.GetOptions{}) + if err != nil { + return 0, err + } + if s == nil { + return 0, fmt.Errorf("unknown %s %s/%s", r.Kind(), r.Namespace(), r.Name()) + } + return s.Spec.Replicas, nil +} + +func (r unstructuredScalableResource) SetSize(nreplicas int32) error { + gvr, err := r.GroupVersionResource() + if err != nil { + return err + } + + s, err := r.controller.managementScaleClient.Scales(r.Namespace()).Get(context.TODO(), gvr.GroupResource(), r.Name(), metav1.GetOptions{}) + if err != nil { + return err + } + + if s == nil { + return fmt.Errorf("unknown %s %s/%s", r.Kind(), r.Namespace(), r.Name()) + } + + s.Spec.Replicas = nreplicas + _, updateErr := r.controller.managementScaleClient.Scales(r.Namespace()).Update(context.TODO(), gvr.GroupResource(), s, metav1.UpdateOptions{}) + return updateErr +} + +func (r unstructuredScalableResource) UnmarkMachineForDeletion(machine *unstructured.Unstructured) error { + u, err := r.controller.managementClient.Resource(r.controller.machineResource).Namespace(machine.GetNamespace()).Get(context.TODO(), machine.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + annotations := u.GetAnnotations() + if _, ok := annotations[machineDeleteAnnotationKey]; ok { + delete(annotations, machineDeleteAnnotationKey) + u.SetAnnotations(annotations) + _, updateErr := r.controller.managementClient.Resource(r.controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) + + return updateErr + } + + return nil +} + +func (r unstructuredScalableResource) MarkMachineForDeletion(machine *unstructured.Unstructured) error { + u, err := r.controller.managementClient.Resource(r.controller.machineResource).Namespace(machine.GetNamespace()).Get(context.TODO(), machine.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + u = u.DeepCopy() + + annotations := u.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + + annotations[machineDeleteAnnotationKey] = time.Now().String() + u.SetAnnotations(annotations) + + _, updateErr := r.controller.managementClient.Resource(r.controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) + + return updateErr +} + +func newUnstructuredScalableResource(controller *machineController, u *unstructured.Unstructured) (*unstructuredScalableResource, error) { + minSize, maxSize, err := parseScalingBounds(u.GetAnnotations()) + if err != nil { + return nil, errors.Wrap(err, "error validating min/max annotations") + } + + return &unstructuredScalableResource{ + controller: controller, + unstructured: u, + maxSize: maxSize, + minSize: minSize, + }, nil +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured_test.go new file mode 100644 index 000000000000..2c51dcfbf79d --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured_test.go @@ -0,0 +1,186 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "context" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestSetSize(t *testing.T) { + initialReplicas := 1 + updatedReplicas := int32(5) + + test := func(t *testing.T, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + testResource := testConfig.machineSet + if testConfig.machineDeployment != nil { + testResource = testConfig.machineDeployment + } + + sr, err := newUnstructuredScalableResource(controller, testResource) + if err != nil { + t.Fatal(err) + } + + gvr, err := sr.GroupVersionResource() + if err != nil { + t.Fatal(err) + } + + err = sr.SetSize(updatedReplicas) + if err != nil { + t.Fatal(err) + } + + s, err := sr.controller.managementScaleClient.Scales(testResource.GetNamespace()). + Get(context.TODO(), gvr.GroupResource(), testResource.GetName(), metav1.GetOptions{}) + + if s.Spec.Replicas != updatedReplicas { + t.Errorf("expected %v, got: %v", updatedReplicas, s.Spec.Replicas) + } + } + + t.Run("MachineSet", func(t *testing.T) { + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + }) +} + +func TestReplicas(t *testing.T) { + initialReplicas := 1 + updatedReplicas := int32(5) + + test := func(t *testing.T, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + testResource := testConfig.machineSet + if testConfig.machineDeployment != nil { + testResource = testConfig.machineDeployment + } + + sr, err := newUnstructuredScalableResource(controller, testResource) + if err != nil { + t.Fatal(err) + } + + gvr, err := sr.GroupVersionResource() + if err != nil { + t.Fatal(err) + } + + i, err := sr.Replicas() + if err != nil { + t.Fatal(err) + } + + if i != int32(initialReplicas) { + t.Errorf("expected %v, got: %v", initialReplicas, i) + } + + // fetch and update machineSet + s, err := sr.controller.managementScaleClient.Scales(testResource.GetNamespace()). + Get(context.TODO(), gvr.GroupResource(), testResource.GetName(), metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + + s.Spec.Replicas = updatedReplicas + + _, err = sr.controller.managementScaleClient.Scales(testResource.GetNamespace()). + Update(context.TODO(), gvr.GroupResource(), s, metav1.UpdateOptions{}) + if err != nil { + t.Fatal(err) + } + + i, err = sr.Replicas() + if err != nil { + t.Fatal(err) + } + + if i != updatedReplicas { + t.Errorf("expected %v, got: %v", updatedReplicas, i) + } + } + + t.Run("MachineSet", func(t *testing.T) { + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + }) +} + +func TestSetSizeAndReplicas(t *testing.T) { + initialReplicas := 1 + updatedReplicas := int32(5) + + test := func(t *testing.T, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + testResource := testConfig.machineSet + if testConfig.machineDeployment != nil { + testResource = testConfig.machineDeployment + } + + sr, err := newUnstructuredScalableResource(controller, testResource) + if err != nil { + t.Fatal(err) + } + + i, err := sr.Replicas() + if err != nil { + t.Fatal(err) + } + + if i != int32(initialReplicas) { + t.Errorf("expected %v, got: %v", initialReplicas, i) + } + + err = sr.SetSize(updatedReplicas) + if err != nil { + t.Fatal(err) + } + + i, err = sr.Replicas() + if err != nil { + t.Fatal(err) + } + + if i != updatedReplicas { + t.Errorf("expected %v, got: %v", updatedReplicas, i) + } + } + + t.Run("MachineSet", func(t *testing.T) { + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + }) +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go index 0e0f00151b0c..ed7bacf49c72 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) const ( @@ -109,9 +110,9 @@ func parseScalingBounds(annotations map[string]string) (int, int, error) { return minSize, maxSize, nil } -func machineOwnerRef(machine *Machine) *metav1.OwnerReference { - for _, ref := range machine.OwnerReferences { - if ref.Kind == "MachineSet" && ref.Name != "" { +func getOwnerForKind(u *unstructured.Unstructured, kind string) *metav1.OwnerReference { + for _, ref := range u.GetOwnerReferences() { + if ref.Kind == kind && ref.Name != "" { return ref.DeepCopy() } } @@ -119,32 +120,16 @@ func machineOwnerRef(machine *Machine) *metav1.OwnerReference { return nil } -func machineIsOwnedByMachineSet(machine *Machine, machineSet *MachineSet) bool { - if ref := machineOwnerRef(machine); ref != nil { - return ref.UID == machineSet.UID - } - return false +func machineOwnerRef(machine *unstructured.Unstructured) *metav1.OwnerReference { + return getOwnerForKind(machine, machineSetKind) } -func machineSetMachineDeploymentRef(machineSet *MachineSet) *metav1.OwnerReference { - for _, ref := range machineSet.OwnerReferences { - if ref.Kind == "MachineDeployment" { - return ref.DeepCopy() - } - } - - return nil -} - -func machineSetHasMachineDeploymentOwnerRef(machineSet *MachineSet) bool { - return machineSetMachineDeploymentRef(machineSet) != nil +func machineSetOwnerRef(machineSet *unstructured.Unstructured) *metav1.OwnerReference { + return getOwnerForKind(machineSet, machineDeploymentKind) } -func machineSetIsOwnedByMachineDeployment(machineSet *MachineSet, machineDeployment *MachineDeployment) bool { - if ref := machineSetMachineDeploymentRef(machineSet); ref != nil { - return ref.UID == machineDeployment.UID - } - return false +func machineSetHasMachineDeploymentOwnerRef(machineSet *unstructured.Unstructured) bool { + return machineSetOwnerRef(machineSet) != nil } // normalizedProviderString splits s on '/' returning everything after diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go index 41e852ab75fb..421fb830e405 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go @@ -17,10 +17,12 @@ limitations under the License. package clusterapi import ( + "reflect" "strings" "testing" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) const ( @@ -112,13 +114,21 @@ func TestUtilParseScalingBounds(t *testing.T) { max: 1, }} { t.Run(tc.description, func(t *testing.T) { - machineSet := MachineSet{ - ObjectMeta: v1.ObjectMeta{ - Annotations: tc.annotations, + machineSet := unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, }, } + machineSet.SetAnnotations(tc.annotations) - min, max, err := parseScalingBounds(machineSet.Annotations) + min, max, err := parseScalingBounds(machineSet.GetAnnotations()) if tc.error != nil && err == nil { t.Fatalf("test #%d: expected an error", i) } @@ -141,228 +151,244 @@ func TestUtilParseScalingBounds(t *testing.T) { } } -func TestUtilMachineSetIsOwnedByMachineDeployment(t *testing.T) { +func TestUtilGetOwnerByKindMachineSet(t *testing.T) { for _, tc := range []struct { - description string - machineSet MachineSet - machineDeployment MachineDeployment - owned bool + description string + machineSet *unstructured.Unstructured + machineSetOwnerRefs []metav1.OwnerReference + expectedOwnerRef *metav1.OwnerReference }{{ - description: "not owned as no owner references", - machineSet: MachineSet{}, - machineDeployment: MachineDeployment{}, - owned: false, + description: "not owned as no owner references", + machineSet: &unstructured.Unstructured{}, + machineSetOwnerRefs: []metav1.OwnerReference{}, + expectedOwnerRef: nil, }, { description: "not owned as not the same Kind", - machineSet: MachineSet{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "Other", - }}, - }, - }, - machineDeployment: MachineDeployment{}, - owned: false, - }, { - description: "not owned because no OwnerReference.Name", - machineSet: MachineSet{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "MachineSet", - UID: uuid1, - }}, + machineSet: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, }, }, - machineDeployment: MachineDeployment{ - ObjectMeta: v1.ObjectMeta{ - UID: uuid1, + machineSetOwnerRefs: []metav1.OwnerReference{ + { + Kind: "Other", }, }, - owned: false, + expectedOwnerRef: nil, }, { - description: "not owned as UID values don't match", - machineSet: MachineSet{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "MachineSet", - Name: "foo", - UID: uuid2, - }}, + description: "not owned because no OwnerReference.Name", + machineSet: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, }, }, - machineDeployment: MachineDeployment{ - TypeMeta: v1.TypeMeta{ - Kind: "MachineDeployment", - }, - ObjectMeta: v1.ObjectMeta{ - UID: uuid1, + machineSetOwnerRefs: []metav1.OwnerReference{ + { + Kind: machineDeploymentKind, + UID: uuid1, }, }, - owned: false, + expectedOwnerRef: nil, }, { description: "owned as UID values match and same Kind and Name not empty", - machineSet: MachineSet{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "MachineDeployment", - Name: "foo", - UID: uuid1, - }}, + machineSet: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, }, }, - machineDeployment: MachineDeployment{ - TypeMeta: v1.TypeMeta{ - Kind: "MachineDeployment", - }, - ObjectMeta: v1.ObjectMeta{ + machineSetOwnerRefs: []metav1.OwnerReference{ + { + Kind: machineDeploymentKind, Name: "foo", UID: uuid1, }, }, - owned: true, + expectedOwnerRef: &metav1.OwnerReference{ + Kind: machineDeploymentKind, + Name: "foo", + UID: uuid1, + }, }} { t.Run(tc.description, func(t *testing.T) { - owned := machineSetIsOwnedByMachineDeployment(&tc.machineSet, &tc.machineDeployment) - if tc.owned != owned { - t.Errorf("expected %t, got %t", tc.owned, owned) + tc.machineSet.SetOwnerReferences(tc.machineSetOwnerRefs) + + ownerRef := getOwnerForKind(tc.machineSet, machineDeploymentKind) + if !reflect.DeepEqual(tc.expectedOwnerRef, ownerRef) { + t.Errorf("expected %v, got %v", tc.expectedOwnerRef, ownerRef) } }) } } -func TestUtilMachineIsOwnedByMachineSet(t *testing.T) { +func TestUtilGetOwnerByKindMachine(t *testing.T) { for _, tc := range []struct { - description string - machine Machine - machineSet MachineSet - owned bool + description string + machine *unstructured.Unstructured + machineOwnerRefs []metav1.OwnerReference + expectedOwnerRef *metav1.OwnerReference }{{ - description: "not owned as no owner references", - machine: Machine{}, - machineSet: MachineSet{}, - owned: false, + description: "not owned as no owner references", + machine: &unstructured.Unstructured{}, + machineOwnerRefs: []metav1.OwnerReference{}, + expectedOwnerRef: nil, }, { description: "not owned as not the same Kind", - machine: Machine{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "Other", - }}, - }, - }, - machineSet: MachineSet{}, - owned: false, - }, { - description: "not owned because no OwnerReference.Name", - machine: Machine{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "MachineSet", - UID: uuid1, - }}, + machine: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, }, }, - machineSet: MachineSet{ - ObjectMeta: v1.ObjectMeta{ - UID: uuid1, + machineOwnerRefs: []metav1.OwnerReference{ + { + Kind: "Other", + Name: "foo", + UID: uuid1, }, }, - owned: false, + expectedOwnerRef: nil, }, { - description: "not owned as UID values don't match", - machine: Machine{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "MachineSet", - Name: "foo", - UID: uuid2, - }}, + description: "not owned because no OwnerReference.Name", + machine: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, }, }, - machineSet: MachineSet{ - TypeMeta: v1.TypeMeta{ - Kind: "MachineSet", - }, - ObjectMeta: v1.ObjectMeta{ - UID: uuid1, + machineOwnerRefs: []metav1.OwnerReference{ + { + Kind: machineSetKind, + UID: uuid1, }, }, - owned: false, + expectedOwnerRef: nil, }, { description: "owned as UID values match and same Kind and Name not empty", - machine: Machine{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "MachineSet", - Name: "foo", - UID: uuid1, - }}, + machine: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, }, }, - machineSet: MachineSet{ - TypeMeta: v1.TypeMeta{ - Kind: "MachineSet", - }, - ObjectMeta: v1.ObjectMeta{ + machineOwnerRefs: []metav1.OwnerReference{ + { + Kind: machineSetKind, Name: "foo", - UID: uuid1, + UID: uuid2, }, }, - owned: true, + expectedOwnerRef: &metav1.OwnerReference{ + Kind: machineSetKind, + Name: "foo", + UID: uuid2, + }, }} { t.Run(tc.description, func(t *testing.T) { - owned := machineIsOwnedByMachineSet(&tc.machine, &tc.machineSet) - if tc.owned != owned { - t.Errorf("expected %t, got %t", tc.owned, owned) + tc.machine.SetOwnerReferences(tc.machineOwnerRefs) + + ownerRef := getOwnerForKind(tc.machine, machineSetKind) + if !reflect.DeepEqual(tc.expectedOwnerRef, ownerRef) { + t.Errorf("expected %v, got %v", tc.expectedOwnerRef, ownerRef) } }) } } -func TestUtilMachineSetMachineDeploymentOwnerRef(t *testing.T) { +func TestUtilMachineSetHasMachineDeploymentOwnerRef(t *testing.T) { for _, tc := range []struct { - description string - machineSet MachineSet - machineDeployment MachineDeployment - owned bool + description string + machineSet *unstructured.Unstructured + machineSetOwnerRefs []metav1.OwnerReference + owned bool }{{ - description: "machineset not owned as no owner references", - machineSet: MachineSet{}, - machineDeployment: MachineDeployment{}, - owned: false, + description: "machineset not owned as no owner references", + machineSet: &unstructured.Unstructured{}, + machineSetOwnerRefs: []metav1.OwnerReference{}, + owned: false, }, { description: "machineset not owned as ownerref not a MachineDeployment", - machineSet: MachineSet{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "Other", - }}, + machineSet: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, }, }, - machineDeployment: MachineDeployment{}, - owned: false, + machineSetOwnerRefs: []metav1.OwnerReference{ + { + Kind: "Other", + }, + }, + owned: false, }, { description: "machineset owned as Kind matches and Name not empty", - machineSet: MachineSet{ - ObjectMeta: v1.ObjectMeta{ - OwnerReferences: []v1.OwnerReference{{ - Kind: "MachineDeployment", - Name: "foo", - }}, + machineSet: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, }, }, - machineDeployment: MachineDeployment{ - TypeMeta: v1.TypeMeta{ - Kind: "MachineDeployment", - }, - ObjectMeta: v1.ObjectMeta{ + machineSetOwnerRefs: []metav1.OwnerReference{ + { + Kind: machineDeploymentKind, Name: "foo", }, }, owned: true, }} { t.Run(tc.description, func(t *testing.T) { - owned := machineSetHasMachineDeploymentOwnerRef(&tc.machineSet) + tc.machineSet.SetOwnerReferences(tc.machineSetOwnerRefs) + owned := machineSetHasMachineDeploymentOwnerRef(tc.machineSet) if tc.owned != owned { t.Errorf("expected %t, got %t", tc.owned, owned) } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/machine_types.go b/cluster-autoscaler/cloudprovider/clusterapi/machine_types.go deleted file mode 100644 index db167b982c9f..000000000000 --- a/cluster-autoscaler/cloudprovider/clusterapi/machine_types.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterapi - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Machine is the Schema for the machines API -type Machine struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MachineSpec `json:"spec,omitempty"` - Status MachineStatus `json:"status,omitempty"` -} - -// MachineSpec defines the desired state of Machine -type MachineSpec struct { - // ObjectMeta will autopopulate the Node created. Use this to - // indicate what labels, annotations, name prefix, etc., should be used - // when creating the Node. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Taints is the full, authoritative list of taints to apply to the corresponding - // Node. This list will overwrite any modifications made to the Node on - // an ongoing basis. - // +optional - Taints []corev1.Taint `json:"taints,omitempty"` - - // ProviderID is the identification ID of the machine provided by the provider. - // This field must match the provider ID as seen on the node object corresponding to this machine. - // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler - // with cluster-api as provider. Clean-up login in the autoscaler compares machines v/s nodes to find out - // machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a - // generic out-of-tree provider for autoscaler, this field is required by autoscaler to be - // able to have a provider view of the list of machines. Another list of nodes is queries from the k8s apiserver - // and then comparison is done to find out unregistered machines and are marked for delete. - // This field will be set by the actuators and consumed by higher level entities like autoscaler who will - // be interfacing with cluster-api as generic provider. - // +optional - ProviderID *string `json:"providerID,omitempty"` -} - -// MachineStatus defines the observed state of Machine -type MachineStatus struct { - // NodeRef will point to the corresponding Node if it exists. - // +optional - NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` - - // FailureMessage will be set in the event that there is a terminal problem - // reconciling the Machine and will contain a more verbose string suitable - // for logging and human consumption. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the Machine's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the Machine object and/or logged in the - // controller's output. - // +optional - FailureMessage *string `json:"failureMessage,omitempty"` -} - -// MachineList contains a list of Machine -type MachineList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Machine `json:"items"` -} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/machinedeployment_types.go b/cluster-autoscaler/cloudprovider/clusterapi/machinedeployment_types.go deleted file mode 100644 index d1525bf9d5a1..000000000000 --- a/cluster-autoscaler/cloudprovider/clusterapi/machinedeployment_types.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterapi - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// MachineDeploymentSpec is the internal autoscaler Schema for MachineDeploymentSpec -type MachineDeploymentSpec struct { - // Number of desired machines. Defaults to 1. - // This is a pointer to distinguish between explicit zero and not specified. - Replicas *int32 `json:"replicas,omitempty"` - - // Label selector for machines. Existing MachineSets whose machines are - // selected by this will be the ones affected by this deployment. - // It must match the machine template's labels. - Selector metav1.LabelSelector `json:"selector"` - - // Template describes the machines that will be created. - Template MachineTemplateSpec `json:"template"` -} - -// MachineDeploymentStatus is the internal autoscaler Schema for MachineDeploymentStatus -type MachineDeploymentStatus struct { - // Number of desired machines. Defaults to 1. - // This is a pointer to distinguish between explicit zero and not specified. - Replicas int32 `json:"replicas,omitempty"` -} - -// MachineDeployment is the internal autoscaler Schema for MachineDeployment -type MachineDeployment struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MachineDeploymentSpec `json:"spec,omitempty"` - Status MachineDeploymentStatus `json:"status,omitempty"` -} - -// MachineDeploymentList is the internal autoscaler Schema for MachineDeploymentList -type MachineDeploymentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []MachineDeployment `json:"items"` -} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/machineset_types.go b/cluster-autoscaler/cloudprovider/clusterapi/machineset_types.go deleted file mode 100644 index 2f2d7ddb895c..000000000000 --- a/cluster-autoscaler/cloudprovider/clusterapi/machineset_types.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterapi - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// MachineSet is the internal autoscaler Schema for machineSets -type MachineSet struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MachineSetSpec `json:"spec,omitempty"` - Status MachineSetStatus `json:"status,omitempty"` -} - -// MachineSetSpec is the internal autoscaler Schema for MachineSetSpec -type MachineSetSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // +optional - Replicas *int32 `json:"replicas,omitempty"` - - // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. - // Defaults to 0 (machine will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` - - // Selector is a label query over machines that should match the replica count. - // Label keys and values that must match in order to be controlled by this MachineSet. - // It must match the machine template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector metav1.LabelSelector `json:"selector"` - - // Template is the object that describes the machine that will be created if - // insufficient replicas are detected. - // +optional - Template MachineTemplateSpec `json:"template,omitempty"` -} - -// MachineTemplateSpec is the internal autoscaler Schema for MachineTemplateSpec -type MachineTemplateSpec struct { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Specification of the desired behavior of the machine. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec MachineSpec `json:"spec,omitempty"` -} - -// MachineSetStatus is the internal autoscaler Schema for MachineSetStatus -type MachineSetStatus struct { - // Replicas is the most recently observed number of replicas. - Replicas int32 `json:"replicas"` -} - -// MachineSetList is the internal autoscaler Schema for MachineSetList -type MachineSetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []MachineSet `json:"items"` -} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/zz_generated.deepcopy.go b/cluster-autoscaler/cloudprovider/clusterapi/zz_generated.deepcopy.go deleted file mode 100644 index 9b702a86260a..000000000000 --- a/cluster-autoscaler/cloudprovider/clusterapi/zz_generated.deepcopy.go +++ /dev/null @@ -1,360 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by main. DO NOT EDIT. - -package clusterapi - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Machine) DeepCopyInto(out *Machine) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. -func (in *Machine) DeepCopy() *Machine { - if in == nil { - return nil - } - out := new(Machine) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Machine) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeployment. -func (in *MachineDeployment) DeepCopy() *MachineDeployment { - if in == nil { - return nil - } - out := new(MachineDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineDeployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentList) DeepCopyInto(out *MachineDeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineDeployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentList. -func (in *MachineDeploymentList) DeepCopy() *MachineDeploymentList { - if in == nil { - return nil - } - out := new(MachineDeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineDeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentSpec) DeepCopyInto(out *MachineDeploymentSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - in.Selector.DeepCopyInto(&out.Selector) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentSpec. -func (in *MachineDeploymentSpec) DeepCopy() *MachineDeploymentSpec { - if in == nil { - return nil - } - out := new(MachineDeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentStatus) DeepCopyInto(out *MachineDeploymentStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStatus. -func (in *MachineDeploymentStatus) DeepCopy() *MachineDeploymentStatus { - if in == nil { - return nil - } - out := new(MachineDeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineList) DeepCopyInto(out *MachineList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Machine, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. -func (in *MachineList) DeepCopy() *MachineList { - if in == nil { - return nil - } - out := new(MachineList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSet) DeepCopyInto(out *MachineSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. -func (in *MachineSet) DeepCopy() *MachineSet { - if in == nil { - return nil - } - out := new(MachineSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. -func (in *MachineSetList) DeepCopy() *MachineSetList { - if in == nil { - return nil - } - out := new(MachineSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - in.Selector.DeepCopyInto(&out.Selector) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. -func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { - if in == nil { - return nil - } - out := new(MachineSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. -func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { - if in == nil { - return nil - } - out := new(MachineSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]v1.Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ProviderID != nil { - in, out := &in.ProviderID, &out.ProviderID - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. -func (in *MachineSpec) DeepCopy() *MachineSpec { - if in == nil { - return nil - } - out := new(MachineSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { - *out = *in - if in.FailureMessage != nil { - in, out := &in.FailureMessage, &out.FailureMessage - *out = new(string) - **out = **in - } - if in.NodeRef != nil { - in, out := &in.NodeRef, &out.NodeRef - *out = new(v1.ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. -func (in *MachineStatus) DeepCopy() *MachineStatus { - if in == nil { - return nil - } - out := new(MachineStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. -func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { - if in == nil { - return nil - } - out := new(MachineTemplateSpec) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/BUILD new file mode 100644 index 000000000000..f72793758337 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/BUILD @@ -0,0 +1,47 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["memcache_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/client-go/discovery/fake:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["memcache.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/discovery/cached/memory", + importpath = "k8s.io/client-go/discovery/cached/memory", + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", + "//staging/src/k8s.io/client-go/discovery:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//vendor/github.com/googleapis/gnostic/openapiv2:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go new file mode 100644 index 000000000000..6e01066b0b08 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go @@ -0,0 +1,243 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memory + +import ( + "errors" + "fmt" + "net" + "net/url" + "sync" + "syscall" + + openapi_v2 "github.com/googleapis/gnostic/openapiv2" + + errorsutil "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + restclient "k8s.io/client-go/rest" +) + +type cacheEntry struct { + resourceList *metav1.APIResourceList + err error +} + +// memCacheClient can Invalidate() to stay up-to-date with discovery +// information. +// +// TODO: Switch to a watch interface. Right now it will poll after each +// Invalidate() call. +type memCacheClient struct { + delegate discovery.DiscoveryInterface + + lock sync.RWMutex + groupToServerResources map[string]*cacheEntry + groupList *metav1.APIGroupList + cacheValid bool +} + +// Error Constants +var ( + ErrCacheNotFound = errors.New("not found") +) + +var _ discovery.CachedDiscoveryInterface = &memCacheClient{} + +// isTransientConnectionError checks whether given error is "Connection refused" or +// "Connection reset" error which usually means that apiserver is temporarily +// unavailable. +func isTransientConnectionError(err error) bool { + urlError, ok := err.(*url.Error) + if !ok { + return false + } + opError, ok := urlError.Err.(*net.OpError) + if !ok { + return false + } + errno, ok := opError.Err.(syscall.Errno) + if !ok { + return false + } + return errno == syscall.ECONNREFUSED || errno == syscall.ECONNRESET +} + +func isTransientError(err error) bool { + if isTransientConnectionError(err) { + return true + } + + if t, ok := err.(errorsutil.APIStatus); ok && t.Status().Code >= 500 { + return true + } + + return errorsutil.IsTooManyRequests(err) +} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *memCacheClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + d.lock.Lock() + defer d.lock.Unlock() + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, err + } + } + cachedVal, ok := d.groupToServerResources[groupVersion] + if !ok { + return nil, ErrCacheNotFound + } + + if cachedVal.err != nil && isTransientError(cachedVal.err) { + r, err := d.serverResourcesForGroupVersion(groupVersion) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", groupVersion, err)) + } + cachedVal = &cacheEntry{r, err} + d.groupToServerResources[groupVersion] = cachedVal + } + + return cachedVal.resourceList, cachedVal.err +} + +// ServerResources returns the supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. +func (d *memCacheClient) ServerResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerResources(d) +} + +// ServerGroupsAndResources returns the groups and supported resources for all groups and versions. +func (d *memCacheClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return discovery.ServerGroupsAndResources(d) +} + +func (d *memCacheClient) ServerGroups() (*metav1.APIGroupList, error) { + d.lock.Lock() + defer d.lock.Unlock() + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, err + } + } + return d.groupList, nil +} + +func (d *memCacheClient) RESTClient() restclient.Interface { + return d.delegate.RESTClient() +} + +func (d *memCacheClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredResources(d) +} + +func (d *memCacheClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredNamespacedResources(d) +} + +func (d *memCacheClient) ServerVersion() (*version.Info, error) { + return d.delegate.ServerVersion() +} + +func (d *memCacheClient) OpenAPISchema() (*openapi_v2.Document, error) { + return d.delegate.OpenAPISchema() +} + +func (d *memCacheClient) Fresh() bool { + d.lock.RLock() + defer d.lock.RUnlock() + // Return whether the cache is populated at all. It is still possible that + // a single entry is missing due to transient errors and the attempt to read + // that entry will trigger retry. + return d.cacheValid +} + +// Invalidate enforces that no cached data that is older than the current time +// is used. +func (d *memCacheClient) Invalidate() { + d.lock.Lock() + defer d.lock.Unlock() + d.cacheValid = false + d.groupToServerResources = nil + d.groupList = nil +} + +// refreshLocked refreshes the state of cache. The caller must hold d.lock for +// writing. +func (d *memCacheClient) refreshLocked() error { + // TODO: Could this multiplicative set of calls be replaced by a single call + // to ServerResources? If it's possible for more than one resulting + // APIResourceList to have the same GroupVersion, the lists would need merged. + gl, err := d.delegate.ServerGroups() + if err != nil || len(gl.Groups) == 0 { + utilruntime.HandleError(fmt.Errorf("couldn't get current server API group list: %v", err)) + return err + } + + wg := &sync.WaitGroup{} + resultLock := &sync.Mutex{} + rl := map[string]*cacheEntry{} + for _, g := range gl.Groups { + for _, v := range g.Versions { + gv := v.GroupVersion + wg.Add(1) + go func() { + defer wg.Done() + defer utilruntime.HandleCrash() + + r, err := d.serverResourcesForGroupVersion(gv) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", gv, err)) + } + + resultLock.Lock() + defer resultLock.Unlock() + rl[gv] = &cacheEntry{r, err} + }() + } + } + wg.Wait() + + d.groupToServerResources, d.groupList = rl, gl + d.cacheValid = true + return nil +} + +func (d *memCacheClient) serverResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + r, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + return r, err + } + if len(r.APIResources) == 0 { + return r, fmt.Errorf("Got empty response for: %v", groupVersion) + } + return r, nil +} + +// NewMemCacheClient creates a new CachedDiscoveryInterface which caches +// discovery information in memory and will stay up-to-date if Invalidate is +// called with regularity. +// +// NOTE: The client will NOT resort to live lookups on cache misses. +func NewMemCacheClient(delegate discovery.DiscoveryInterface) discovery.CachedDiscoveryInterface { + return &memCacheClient{ + delegate: delegate, + groupToServerResources: map[string]*cacheEntry{}, + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/BUILD new file mode 100644 index 000000000000..a2301eede96c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/BUILD @@ -0,0 +1,57 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "category_expansion.go", + "discovery.go", + "shortcut.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/restmapper", + importpath = "k8s.io/client-go/restmapper", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/klog/v2:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "category_expansion_test.go", + "discovery_test.go", + "shortcut_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", + "//staging/src/k8s.io/client-go/discovery:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/rest/fake:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/googleapis/gnostic/openapiv2:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/category_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/category_expansion.go new file mode 100644 index 000000000000..2537a2b4e25c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// CategoryExpander maps category strings to GroupResources. +// Categories are classification or 'tag' of a group of resources. +type CategoryExpander interface { + Expand(category string) ([]schema.GroupResource, bool) +} + +// SimpleCategoryExpander implements CategoryExpander interface +// using a static mapping of categories to GroupResource mapping. +type SimpleCategoryExpander struct { + Expansions map[string][]schema.GroupResource +} + +// Expand fulfills CategoryExpander +func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret, ok := e.Expansions[category] + return ret, ok +} + +// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList, +// and then convert to fallbackExpander +type discoveryCategoryExpander struct { + discoveryClient discovery.DiscoveryInterface +} + +// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from +// the API, found through the discovery client. In case of any error or no category found (which likely +// means we're at a cluster prior to categories support, fallback to the expander provided. +func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander { + if client == nil { + panic("Please provide discovery client to shortcut expander") + } + return discoveryCategoryExpander{discoveryClient: client} +} + +// Expand fulfills CategoryExpander +func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. + apiResourceLists, _ := e.discoveryClient.ServerResources() + if len(apiResourceLists) == 0 { + return nil, false + } + + discoveredExpansions := map[string][]schema.GroupResource{} + for _, apiResourceList := range apiResourceLists { + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + continue + } + // Collect GroupVersions by categories + for _, apiResource := range apiResourceList.APIResources { + if categories := apiResource.Categories; len(categories) > 0 { + for _, category := range categories { + groupResource := schema.GroupResource{ + Group: gv.Group, + Resource: apiResource.Name, + } + discoveredExpansions[category] = append(discoveredExpansions[category], groupResource) + } + } + } + } + + ret, ok := discoveredExpansions[category] + return ret, ok +} + +// UnionCategoryExpander implements CategoryExpander interface. +// It maps given category string to union of expansions returned by all the CategoryExpanders in the list. +type UnionCategoryExpander []CategoryExpander + +// Expand fulfills CategoryExpander +func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret := []schema.GroupResource{} + ok := false + + // Expand the category for each CategoryExpander in the list and merge/combine the results. + for _, expansion := range u { + curr, currOk := expansion.Expand(category) + + for _, currGR := range curr { + found := false + for _, existing := range ret { + if existing == currGR { + found = true + break + } + } + if !found { + ret = append(ret, currGR) + } + } + ok = ok || currOk + } + + return ret, ok +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/discovery.go b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/discovery.go new file mode 100644 index 000000000000..19ae95e1b5ba --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/discovery.go @@ -0,0 +1,338 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + + "k8s.io/klog/v2" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + // Make sure the preferred version comes first + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions + if discoveryVersion.Version != group.Group.PreferredVersion.Version { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Kind: meta.AnyKind, + }) + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + + // if we have a slash, then this is a subresource and we shouldn't create mappings for those. + if strings.Contains(resource.Name, "/") { + continue + } + + plural := gv.WithResource(resource.Name) + singular := gv.WithResource(resource.SingularName) + // this is for legacy resources and servers which don't list singular forms. For those we must still guess. + if len(resource.SingularName) == 0 { + _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind)) + } + + versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) + versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope) + // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { + gs, rs, err := cl.ServerGroupsAndResources() + if rs == nil || gs == nil { + return nil, err + // TODO track the errors and update callers to handle partial errors. + } + rsm := map[string]*metav1.APIResourceList{} + for _, r := range rs { + rsm[r.GroupVersion] = r + } + + var result []*APIGroupResources + for _, group := range gs { + groupResources := &APIGroupResources{ + Group: *group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, ok := rsm[version.GroupVersion] + if !ok { + continue + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl discovery.CachedDiscoveryInterface +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewDiscoveryRESTMapper(groupResources) + return d.delegate, err +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + klog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/shortcut.go b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/shortcut.go new file mode 100644 index 000000000000..6903ec8088d1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -0,0 +1,172 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "strings" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped +type shortcutExpander struct { + RESTMapper meta.RESTMapper + + discoveryClient discovery.DiscoveryInterface +} + +var _ meta.RESTMapper = &shortcutExpander{} + +// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +} + +// KindFor fulfills meta.RESTMapper +func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) +} + +// KindsFor fulfills meta.RESTMapper +func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) +} + +// ResourcesFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) +} + +// ResourceFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) +} + +// ResourceSingularizer fulfills meta.RESTMapper +func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) +} + +// RESTMapping fulfills meta.RESTMapper +func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +// RESTMappings fulfills meta.RESTMapper +func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk, versions...) +} + +// getShortcutMappings returns a set of tuples which holds short names for resources. +// First the list of potential resources will be taken from the API server. +// Next we will append the hardcoded list of resources - to be backward compatible with old servers. +// NOTE that the list is ordered by group priority. +func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) { + res := []resourceShortcuts{} + // get server resources + // This can return an error *and* the results it was able to find. We don't need to fail on the error. + apiResList, err := e.discoveryClient.ServerResources() + if err != nil { + klog.V(1).Infof("Error loading discovery information: %v", err) + } + for _, apiResources := range apiResList { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + continue + } + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + rs := resourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, + } + res = append(res, rs) + } + } + } + + return apiResList, res, nil +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. If no match has been found, we will match on group prefixing. +// Lastly we will return resource unmodified. +func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { + // get the shortcut mappings and return on first match. + if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil { + // avoid expanding if there's an exact match to a full resource name + for _, apiResources := range allResources { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + continue + } + if len(resource.Group) != 0 && resource.Group != gv.Group { + continue + } + for _, apiRes := range apiResources.APIResources { + if resource.Resource == apiRes.Name { + return resource + } + if resource.Resource == apiRes.SingularName { + return resource + } + } + } + + for _, item := range shortcutResources { + if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + + // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling + if len(resource.Group) == 0 { + return resource + } + for _, item := range shortcutResources { + if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + } + + return resource +} + +// ResourceShortcuts represents a structure that holds the information how to +// transition from resource's shortcut to its full name. +type resourceShortcuts struct { + ShortForm schema.GroupResource + LongForm schema.GroupResource +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/scale/fake/BUILD new file mode 100644 index 000000000000..10f707f89ad8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/fake/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/scale/fake", + importpath = "k8s.io/client-go/scale/fake", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/client-go/scale:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/fake/client.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/fake/client.go new file mode 100644 index 000000000000..4b5392cae90a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/fake/client.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fake provides a fake client interface to arbitrary Kubernetes +// APIs that exposes common high level operations and exposes common +// metadata. +package fake + +import ( + "context" + + autoscalingapi "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/scale" + "k8s.io/client-go/testing" +) + +// FakeScaleClient provides a fake implementation of scale.ScalesGetter. +type FakeScaleClient struct { + testing.Fake +} + +func (f *FakeScaleClient) Scales(namespace string) scale.ScaleInterface { + return &fakeNamespacedScaleClient{ + namespace: namespace, + fake: &f.Fake, + } +} + +type fakeNamespacedScaleClient struct { + namespace string + fake *testing.Fake +} + +func (f *fakeNamespacedScaleClient) Get(ctx context.Context, resource schema.GroupResource, name string, opts metav1.GetOptions) (*autoscalingapi.Scale, error) { + obj, err := f.fake. + Invokes(testing.NewGetSubresourceAction(resource.WithVersion(""), f.namespace, "scale", name), &autoscalingapi.Scale{}) + + if err != nil { + return nil, err + } + + return obj.(*autoscalingapi.Scale), err +} + +func (f *fakeNamespacedScaleClient) Update(ctx context.Context, resource schema.GroupResource, scale *autoscalingapi.Scale, opts metav1.UpdateOptions) (*autoscalingapi.Scale, error) { + obj, err := f.fake. + Invokes(testing.NewUpdateSubresourceAction(resource.WithVersion(""), "scale", f.namespace, scale), &autoscalingapi.Scale{}) + + if err != nil { + return nil, err + } + + return obj.(*autoscalingapi.Scale), err +} + +func (f *fakeNamespacedScaleClient) Patch(ctx context.Context, gvr schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) (*autoscalingapi.Scale, error) { + obj, err := f.fake. + Invokes(testing.NewPatchSubresourceAction(gvr, f.namespace, name, pt, patch, "scale"), &autoscalingapi.Scale{}) + + if err != nil { + return nil, err + } + + return obj.(*autoscalingapi.Scale), err +} diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt index bd6640e40658..9a1c4bd13263 100644 --- a/cluster-autoscaler/vendor/modules.txt +++ b/cluster-autoscaler/vendor/modules.txt @@ -958,6 +958,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook # k8s.io/client-go v0.0.0 => /tmp/ca-update-vendor.LoV7/kubernetes/staging/src/k8s.io/client-go k8s.io/client-go/discovery +k8s.io/client-go/discovery/cached/memory k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic k8s.io/client-go/dynamic/dynamicinformer @@ -1153,7 +1154,9 @@ k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest k8s.io/client-go/rest/fake k8s.io/client-go/rest/watch +k8s.io/client-go/restmapper k8s.io/client-go/scale +k8s.io/client-go/scale/fake k8s.io/client-go/scale/scheme k8s.io/client-go/scale/scheme/appsint k8s.io/client-go/scale/scheme/appsv1beta1 From d28f1185bede05f3b1b0c69f54f90c1309d34560 Mon Sep 17 00:00:00 2001 From: Kubernetes Prow Robot Date: Thu, 3 Sep 2020 08:27:41 -0700 Subject: [PATCH 2/4] Merge pull request #3314 from detiber/autoDiscovery [cluster-autoscaler][clusterapi] Add support for node autodiscovery to clusterapi provider --- .../cloudprovider/clusterapi/README.md | 31 + .../clusterapi/clusterapi_autodiscovery.go | 99 +++ .../clusterapi_autodiscovery_test.go | 324 ++++++++++ .../clusterapi/clusterapi_controller.go | 109 +++- .../clusterapi/clusterapi_controller_test.go | 575 +++++++++++++++--- .../clusterapi/clusterapi_nodegroup.go | 39 +- .../clusterapi/clusterapi_nodegroup_test.go | 70 ++- .../clusterapi/clusterapi_provider.go | 2 +- .../clusterapi/clusterapi_unstructured.go | 15 +- .../clusterapi_unstructured_test.go | 58 +- .../clusterapi/clusterapi_utils.go | 34 ++ .../clusterapi/clusterapi_utils_test.go | 246 ++++++++ 12 files changed, 1438 insertions(+), 164 deletions(-) create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_autodiscovery.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_autodiscovery_test.go diff --git a/cluster-autoscaler/cloudprovider/clusterapi/README.md b/cluster-autoscaler/cloudprovider/clusterapi/README.md index 6b2b1ec611fa..3f12c397fef4 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/README.md +++ b/cluster-autoscaler/cloudprovider/clusterapi/README.md @@ -31,6 +31,37 @@ Please note, this example only shows the cloud provider options, you will most likely need other command line flags. For more information you can invoke `cluster-autoscaler --help` to see a full list of options. +## Configuring node group auto discovery + +If you do not configure node group auto discovery, cluster autoscaler will attempt +to match nodes against any scalable resources found in any namespace and belonging +to any Cluster. + +Limiting cluster autoscaler to only match against resources in the blue namespace + +``` +--node-group-auto-discovery=clusterapi:namespace=blue +``` + +Limiting cluster autoscaler to only match against resources belonging to Cluster test1 + +``` +--node-group-auto-discovery=clusterapi:clusterName=test1 +``` + +Limiting cluster autoscaler to only match against resources matching the provided labels + +``` +--node-group-auto-discovery=clusterapi:color=green,shape=square +``` + +These can be mixed and matched in any combination, for example to only match resources +in the staging namespace, belonging to the purple cluster, with the label owner=jim: + +``` +--node-group-auto-discovery=clusterapi:namespace=staging,clusterName=purple,owner=jim +``` + ## Enabling Autoscaling To enable the automatic scaling of components in your cluster-api managed diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_autodiscovery.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_autodiscovery.go new file mode 100644 index 000000000000..b54f845a2f47 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_autodiscovery.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" +) + +type clusterAPIAutoDiscoveryConfig struct { + clusterName string + namespace string + labelSelector labels.Selector +} + +func parseAutoDiscoverySpec(spec string) (*clusterAPIAutoDiscoveryConfig, error) { + cfg := &clusterAPIAutoDiscoveryConfig{ + labelSelector: labels.NewSelector(), + } + + tokens := strings.Split(spec, ":") + if len(tokens) != 2 { + return cfg, errors.NewAutoscalerError(errors.ConfigurationError, fmt.Sprintf("spec \"%s\" should be discoverer:key=value,key=value", spec)) + } + discoverer := tokens[0] + if discoverer != autoDiscovererTypeClusterAPI { + return cfg, errors.NewAutoscalerError(errors.ConfigurationError, fmt.Sprintf("unsupported discoverer specified: %s", discoverer)) + } + + for _, arg := range strings.Split(tokens[1], ",") { + if len(arg) == 0 { + continue + } + kv := strings.Split(arg, "=") + if len(kv) != 2 { + return cfg, errors.NewAutoscalerError(errors.ConfigurationError, fmt.Sprintf("invalid key=value pair %s", kv)) + } + k, v := kv[0], kv[1] + + switch k { + case autoDiscovererClusterNameKey: + cfg.clusterName = v + case autoDiscovererNamespaceKey: + cfg.namespace = v + default: + req, err := labels.NewRequirement(k, selection.Equals, []string{v}) + if err != nil { + return cfg, errors.NewAutoscalerError(errors.ConfigurationError, fmt.Sprintf("failed to create label selector; %v", err)) + } + cfg.labelSelector = cfg.labelSelector.Add(*req) + } + } + return cfg, nil +} + +func parseAutoDiscovery(specs []string) ([]*clusterAPIAutoDiscoveryConfig, error) { + result := make([]*clusterAPIAutoDiscoveryConfig, 0, len(specs)) + for _, spec := range specs { + autoDiscoverySpec, err := parseAutoDiscoverySpec(spec) + if err != nil { + return result, err + } + result = append(result, autoDiscoverySpec) + } + return result, nil +} + +func allowedByAutoDiscoverySpec(spec *clusterAPIAutoDiscoveryConfig, r *unstructured.Unstructured) bool { + switch { + case spec.namespace != "" && spec.namespace != r.GetNamespace(): + return false + case spec.clusterName != "" && spec.clusterName != clusterNameFromResource(r): + return false + case !spec.labelSelector.Matches(labels.Set(r.GetLabels())): + return false + default: + return true + } +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_autodiscovery_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_autodiscovery_test.go new file mode 100644 index 000000000000..98f4426f3130 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_autodiscovery_test.go @@ -0,0 +1,324 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/labels" +) + +func Test_parseAutoDiscoverySpec(t *testing.T) { + for _, tc := range []struct { + name string + spec string + want *clusterAPIAutoDiscoveryConfig + wantErr bool + }{{ + name: "missing ':'", + spec: "foo", + wantErr: true, + }, { + name: "wrong provider given", + spec: "asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/clustername", + wantErr: true, + }, { + name: "invalid key/value pair given", + spec: "clusterapi:invalid", + wantErr: true, + }, { + name: "no attributes specified", + spec: "clusterapi:", + want: &clusterAPIAutoDiscoveryConfig{ + labelSelector: labels.NewSelector(), + }, + wantErr: false, + }, { + name: "only clusterName given", + spec: "clusterapi:clusterName=foo", + want: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + labelSelector: labels.NewSelector(), + }, + wantErr: false, + }, { + name: "only namespace given", + spec: "clusterapi:namespace=default", + want: &clusterAPIAutoDiscoveryConfig{ + namespace: "default", + labelSelector: labels.NewSelector(), + }, + wantErr: false, + }, { + name: "no clustername or namespace given, key provided without value", + spec: "clusterapi:mylabel=", + want: &clusterAPIAutoDiscoveryConfig{ + labelSelector: labels.SelectorFromSet(labels.Set{"mylabel": ""}), + }, + wantErr: false, + }, { + name: "no clustername or namespace given, single key/value pair for labels", + spec: "clusterapi:mylabel=myval", + want: &clusterAPIAutoDiscoveryConfig{ + labelSelector: labels.SelectorFromSet(labels.Set{"mylabel": "myval"}), + }, + wantErr: false, + }, { + name: "no clustername or namespace given, multiple key/value pair for labels", + spec: "clusterapi:color=blue,shape=square", + want: &clusterAPIAutoDiscoveryConfig{ + labelSelector: labels.SelectorFromSet(labels.Set{"color": "blue", "shape": "square"}), + }, + wantErr: false, + }, { + name: "no clustername given, multiple key/value pair for labels", + spec: "clusterapi:namespace=test,color=blue,shape=square", + want: &clusterAPIAutoDiscoveryConfig{ + namespace: "test", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "blue", "shape": "square"}), + }, + wantErr: false, + }, { + name: "no clustername given, single key/value pair for labels", + spec: "clusterapi:namespace=test,color=blue", + want: &clusterAPIAutoDiscoveryConfig{ + namespace: "test", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "blue"}), + }, + wantErr: false, + }, { + name: "no namespace given, multiple key/value pair for labels", + spec: "clusterapi:clusterName=foo,color=blue,shape=square", + want: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "blue", "shape": "square"}), + }, + wantErr: false, + }, { + name: "no namespace given, single key/value pair for labels", + spec: "clusterapi:clusterName=foo,shape=square", + want: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + labelSelector: labels.SelectorFromSet(labels.Set{"shape": "square"}), + }, + wantErr: false, + }, { + name: "clustername, namespace, multiple key/value pair for labels provided", + spec: "clusterapi:namespace=test,color=blue,shape=square,clusterName=foo", + want: &clusterAPIAutoDiscoveryConfig{ + namespace: "test", + clusterName: "foo", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "blue", "shape": "square"}), + }, + wantErr: false, + }, { + name: "clustername, namespace, single key/value pair for labels provided", + spec: "clusterapi:namespace=test,color=blue,clusterName=foo", + want: &clusterAPIAutoDiscoveryConfig{ + namespace: "test", + clusterName: "foo", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "blue"}), + }, + wantErr: false, + }} { + t.Run(tc.name, func(t *testing.T) { + got, err := parseAutoDiscoverySpec(tc.spec) + if (err != nil) != tc.wantErr { + t.Errorf("parseAutoDiscoverySpec() error = %v, wantErr %v", err, tc.wantErr) + return + } + if err == nil && !reflect.DeepEqual(got, tc.want) { + t.Errorf("parseAutoDiscoverySpec() got = %v, want %v", got, tc.want) + } + }) + } +} + +func Test_parseAutoDiscovery(t *testing.T) { + for _, tc := range []struct { + name string + spec []string + want []*clusterAPIAutoDiscoveryConfig + wantErr bool + }{{ + name: "contains invalid spec", + spec: []string{"foo", "clusterapi:color=green"}, + wantErr: true, + }, { + name: "clustername, namespace, single key/value pair for labels provided", + spec: []string{ + "clusterapi:namespace=test,color=blue,clusterName=foo", + "clusterapi:namespace=default,color=green,clusterName=bar", + }, + want: []*clusterAPIAutoDiscoveryConfig{ + { + namespace: "test", + clusterName: "foo", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "blue"}), + }, + { + namespace: "default", + clusterName: "bar", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "green"}), + }, + }, + wantErr: false, + }} { + t.Run(tc.name, func(t *testing.T) { + got, err := parseAutoDiscovery(tc.spec) + if (err != nil) != tc.wantErr { + t.Errorf("parseAutoDiscoverySpec() error = %v, wantErr %v", err, tc.wantErr) + return + } + if len(got) != len(tc.want) { + t.Errorf("parseAutoDiscoverySpec() expected length of got to be = %v, got %v", len(tc.want), len(got)) + } + if err == nil && !reflect.DeepEqual(got, tc.want) { + t.Errorf("parseAutoDiscoverySpec() got = %v, want %v", got, tc.want) + } + }) + } +} + +func Test_allowedByAutoDiscoverySpec(t *testing.T) { + for _, tc := range []struct { + name string + testSpec testSpec + autoDiscoveryConfig *clusterAPIAutoDiscoveryConfig + additionalLabels map[string]string + shouldMatch bool + }{{ + name: "no clustername, namespace, or label selector specified should match any MachineSet", + testSpec: createTestSpec(RandomString(6), RandomString(6), RandomString(6), 1, false, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{labelSelector: labels.NewSelector()}, + shouldMatch: true, + }, { + name: "no clustername, namespace, or label selector specified should match any MachineDeployment", + testSpec: createTestSpec(RandomString(6), RandomString(6), RandomString(6), 1, true, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{labelSelector: labels.NewSelector()}, + shouldMatch: true, + }, { + name: "clustername specified does not match MachineSet, namespace matches, no labels specified", + testSpec: createTestSpec("default", RandomString(6), RandomString(6), 1, false, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.NewSelector(), + }, + shouldMatch: false, + }, { + name: "clustername specified does not match MachineDeployment, namespace matches, no labels specified", + testSpec: createTestSpec("default", RandomString(6), RandomString(6), 1, true, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.NewSelector(), + }, + shouldMatch: false, + }, { + name: "namespace specified does not match MachineSet, clusterName matches, no labels specified", + testSpec: createTestSpec(RandomString(6), "foo", RandomString(6), 1, false, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.NewSelector(), + }, + shouldMatch: false, + }, { + name: "clustername specified does not match MachineDeployment, namespace matches, no labels specified", + testSpec: createTestSpec(RandomString(6), "foo", RandomString(6), 1, true, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.NewSelector(), + }, + shouldMatch: false, + }, { + name: "namespace and clusterName matches MachineSet, no labels specified", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, false, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.NewSelector(), + }, + shouldMatch: true, + }, { + name: "namespace and clusterName matches MachineDeployment, no labels specified", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, true, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.NewSelector(), + }, + shouldMatch: true, + }, { + name: "namespace and clusterName matches MachineSet, does not match label selector", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, false, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "green"}), + }, + shouldMatch: false, + }, { + name: "namespace and clusterName matches MachineDeployment, does not match label selector", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, true, nil), + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "green"}), + }, + shouldMatch: false, + }, { + name: "namespace, clusterName, and label selector matches MachineSet", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, false, nil), + additionalLabels: map[string]string{"color": "green"}, + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "green"}), + }, + shouldMatch: true, + }, { + name: "namespace, clusterName, and label selector matches MachineDeployment", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, true, nil), + additionalLabels: map[string]string{"color": "green"}, + autoDiscoveryConfig: &clusterAPIAutoDiscoveryConfig{ + clusterName: "foo", + namespace: "default", + labelSelector: labels.SelectorFromSet(labels.Set{"color": "green"}), + }, + shouldMatch: true, + }} { + t.Run(tc.name, func(t *testing.T) { + testConfigs := createTestConfigs(tc.testSpec) + resource := testConfigs[0].machineSet + if tc.testSpec.rootIsMachineDeployment { + resource = testConfigs[0].machineDeployment + } + if tc.additionalLabels != nil { + resource.SetLabels(labels.Merge(resource.GetLabels(), tc.additionalLabels)) + } + got := allowedByAutoDiscoverySpec(tc.autoDiscoveryConfig, resource) + + if got != tc.shouldMatch { + t.Errorf("allowedByAutoDiscoverySpec got = %v, want %v", got, tc.shouldMatch) + } + }) + } +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go index f495826e7072..459217a58a0e 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go @@ -37,6 +37,8 @@ import ( "k8s.io/client-go/scale" "k8s.io/client-go/tools/cache" klog "k8s.io/klog/v2" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ) const ( @@ -52,6 +54,9 @@ const ( machineDeploymentKind = "MachineDeployment" machineSetKind = "MachineSet" machineKind = "Machine" + autoDiscovererTypeClusterAPI = "clusterapi" + autoDiscovererClusterNameKey = "clusterName" + autoDiscovererNamespaceKey = "namespace" ) // machineController watches for Nodes, Machines, MachineSets and @@ -72,6 +77,7 @@ type machineController struct { machineDeploymentResource schema.GroupVersionResource machineDeploymentsAvailable bool accessLock sync.Mutex + autoDiscoverySpecs []*clusterAPIAutoDiscoveryConfig } func indexMachineByProviderID(obj interface{}) ([]string, error) { @@ -80,7 +86,7 @@ func indexMachineByProviderID(obj interface{}) ([]string, error) { return nil, nil } - providerID, found, err := unstructured.NestedString(u.Object, "spec", "providerID") + providerID, found, err := unstructured.NestedString(u.UnstructuredContent(), "spec", "providerID") if err != nil || !found { return nil, nil } @@ -102,18 +108,18 @@ func indexNodeByProviderID(obj interface{}) ([]string, error) { } func (c *machineController) findMachine(id string) (*unstructured.Unstructured, error) { - return findResourceByKey(c.machineInformer.Informer().GetStore(), id) + return c.findResourceByKey(c.machineInformer.Informer().GetStore(), id) } func (c *machineController) findMachineSet(id string) (*unstructured.Unstructured, error) { - return findResourceByKey(c.machineSetInformer.Informer().GetStore(), id) + return c.findResourceByKey(c.machineSetInformer.Informer().GetStore(), id) } func (c *machineController) findMachineDeployment(id string) (*unstructured.Unstructured, error) { - return findResourceByKey(c.machineDeploymentInformer.Informer().GetStore(), id) + return c.findResourceByKey(c.machineDeploymentInformer.Informer().GetStore(), id) } -func findResourceByKey(store cache.Store, key string) (*unstructured.Unstructured, error) { +func (c *machineController) findResourceByKey(store cache.Store, key string) (*unstructured.Unstructured, error) { item, exists, err := store.GetByKey(key) if err != nil { return nil, err @@ -128,6 +134,11 @@ func findResourceByKey(store cache.Store, key string) (*unstructured.Unstructure return nil, fmt.Errorf("internal error; unexpected type: %T", item) } + // Verify the resource is allowed by the autodiscovery configuration + if !c.allowedByAutoDiscoverySpecs(u) { + return nil, nil + } + return u.DeepCopy(), nil } @@ -300,10 +311,16 @@ func newMachineController( workloadClient kubeclient.Interface, managementDiscoveryClient discovery.DiscoveryInterface, managementScaleClient scale.ScalesGetter, + discoveryOpts cloudprovider.NodeGroupDiscoveryOptions, ) (*machineController, error) { workloadInformerFactory := kubeinformers.NewSharedInformerFactory(workloadClient, 0) managementInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(managementClient, 0, metav1.NamespaceAll, nil) + autoDiscoverySpecs, err := parseAutoDiscovery(discoveryOpts.NodeGroupAutoDiscoverySpecs) + if err != nil { + return nil, fmt.Errorf("failed to parse auto discovery configuration: %v", err) + } + CAPIGroup := getCAPIGroup() CAPIVersion, err := getAPIGroupPreferredVersion(managementDiscoveryClient, CAPIGroup) if err != nil { @@ -363,6 +380,7 @@ func newMachineController( } return &machineController{ + autoDiscoverySpecs: autoDiscoverySpecs, workloadInformerFactory: workloadInformerFactory, managementInformerFactory: managementInformerFactory, machineDeploymentInformer: machineDeploymentInformer, @@ -416,7 +434,7 @@ func (c *machineController) scalableResourceProviderIDs(scalableResource *unstru var providerIDs []string for _, machine := range machines { - providerID, found, err := unstructured.NestedString(machine.Object, "spec", "providerID") + providerID, found, err := unstructured.NestedString(machine.UnstructuredContent(), "spec", "providerID") if err != nil { return nil, err } @@ -430,7 +448,7 @@ func (c *machineController) scalableResourceProviderIDs(scalableResource *unstru klog.Warningf("Machine %q has no providerID", machine.GetName()) - failureMessage, found, err := unstructured.NestedString(machine.Object, "status", "failureMessage") + failureMessage, found, err := unstructured.NestedString(machine.UnstructuredContent(), "status", "failureMessage") if err != nil { return nil, err } @@ -446,7 +464,7 @@ func (c *machineController) scalableResourceProviderIDs(scalableResource *unstru continue } - _, found, err = unstructured.NestedFieldCopy(machine.Object, "status", "nodeRef") + _, found, err = unstructured.NestedFieldCopy(machine.UnstructuredContent(), "status", "nodeRef") if err != nil { return nil, err } @@ -456,7 +474,7 @@ func (c *machineController) scalableResourceProviderIDs(scalableResource *unstru continue } - nodeRefKind, found, err := unstructured.NestedString(machine.Object, "status", "nodeRef", "kind") + nodeRefKind, found, err := unstructured.NestedString(machine.UnstructuredContent(), "status", "nodeRef", "kind") if err != nil { return nil, err } @@ -466,7 +484,7 @@ func (c *machineController) scalableResourceProviderIDs(scalableResource *unstru continue } - nodeRefName, found, err := unstructured.NestedString(machine.Object, "status", "nodeRef", "name") + nodeRefName, found, err := unstructured.NestedString(machine.UnstructuredContent(), "status", "nodeRef", "name") if err != nil { return nil, err } @@ -497,21 +515,13 @@ func (c *machineController) nodeGroups() ([]*nodegroup, error) { nodegroups := make([]*nodegroup, 0, len(scalableResources)) for _, r := range scalableResources { - ng, err := newNodegroupFromScalableResource(c, r) + ng, err := newNodeGroupFromScalableResource(c, r) if err != nil { return nil, err } - // add nodegroup iff it has the capacity to scale - if ng.MaxSize()-ng.MinSize() > 0 { - replicas, found, err := unstructured.NestedInt64(r.Object, "spec", "replicas") - if err != nil { - return nil, err - } - - if found && replicas > 0 { - nodegroups = append(nodegroups, ng) - } + if ng != nil { + nodegroups = append(nodegroups, ng) } } return nodegroups, nil @@ -526,14 +536,14 @@ func (c *machineController) nodeGroupForNode(node *corev1.Node) (*nodegroup, err return nil, nil } - nodegroup, err := newNodegroupFromScalableResource(c, scalableResource) + nodegroup, err := newNodeGroupFromScalableResource(c, scalableResource) if err != nil { return nil, fmt.Errorf("failed to build nodegroup for node %q: %v", node.Name, err) } - // We don't scale from 0 so nodes must belong to a nodegroup - // that has a scale size of at least 1. - if nodegroup.MaxSize()-nodegroup.MinSize() < 1 { + // the nodegroup will be nil if it doesn't match the autodiscovery configuration + // or if it doesn't meet the scaling requirements + if nodegroup == nil { return nil, nil } @@ -568,7 +578,7 @@ func (c *machineController) findNodeByProviderID(providerID normalizedProviderID func (c *machineController) listMachinesForScalableResource(r *unstructured.Unstructured) ([]*unstructured.Unstructured, error) { switch r.GetKind() { case machineSetKind, machineDeploymentKind: - unstructuredSelector, found, err := unstructured.NestedMap(r.Object, "spec", "selector") + unstructuredSelector, found, err := unstructured.NestedMap(r.UnstructuredContent(), "spec", "selector") if err != nil { return nil, err } @@ -587,7 +597,7 @@ func (c *machineController) listMachinesForScalableResource(r *unstructured.Unst return nil, err } - return listResources(c.machineInformer.Lister().ByNamespace(r.GetNamespace()), selector) + return listResources(c.machineInformer.Lister().ByNamespace(r.GetNamespace()), clusterNameFromResource(r), selector) default: return nil, fmt.Errorf("unknown scalable resource kind %s", r.GetKind()) } @@ -611,10 +621,31 @@ func (c *machineController) listScalableResources() ([]*unstructured.Unstructure } func (c *machineController) listResources(lister cache.GenericLister) ([]*unstructured.Unstructured, error) { - return listResources(lister.ByNamespace(metav1.NamespaceAll), labels.Everything()) + if len(c.autoDiscoverySpecs) == 0 { + return listResources(lister.ByNamespace(metav1.NamespaceAll), "", labels.Everything()) + } + + var results []*unstructured.Unstructured + tracker := map[string]bool{} + for _, spec := range c.autoDiscoverySpecs { + resources, err := listResources(lister.ByNamespace(spec.namespace), spec.clusterName, spec.labelSelector) + if err != nil { + return nil, err + } + for i := range resources { + r := resources[i] + key := fmt.Sprintf("%s-%s-%s", r.GetKind(), r.GetNamespace(), r.GetName()) + if _, ok := tracker[key]; !ok { + results = append(results, r) + tracker[key] = true + } + } + } + + return results, nil } -func listResources(lister cache.GenericNamespaceLister, selector labels.Selector) ([]*unstructured.Unstructured, error) { +func listResources(lister cache.GenericNamespaceLister, clusterName string, selector labels.Selector) ([]*unstructured.Unstructured, error) { objs, err := lister.List(selector) if err != nil { return nil, err @@ -627,6 +658,11 @@ func listResources(lister cache.GenericNamespaceLister, selector labels.Selector return nil, fmt.Errorf("expected unstructured resource from lister, not %T", x) } + // if clusterName is not empty and the clusterName does not match the resource, do not return it as part of the results + if clusterName != "" && clusterNameFromResource(u) != clusterName { + continue + } + // if we are listing MachineSets, do not return MachineSets that are owned by a MachineDeployment if u.GetKind() == machineSetKind && machineSetHasMachineDeploymentOwnerRef(u) { continue @@ -637,3 +673,18 @@ func listResources(lister cache.GenericNamespaceLister, selector labels.Selector return results, nil } + +func (c *machineController) allowedByAutoDiscoverySpecs(r *unstructured.Unstructured) bool { + // If no autodiscovery configuration fall back to previous behavior of allowing all + if len(c.autoDiscoverySpecs) == 0 { + return true + } + + for _, spec := range c.autoDiscoverySpecs { + if allowedByAutoDiscoverySpec(spec, r) { + return true + } + } + + return false +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go index e6af9af0cb3c..bad77f565f0f 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go @@ -41,12 +41,16 @@ import ( fakekube "k8s.io/client-go/kubernetes/fake" fakescale "k8s.io/client-go/scale/fake" clientgotesting "k8s.io/client-go/testing" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ) type testControllerShutdownFunc func() type testConfig struct { spec *testSpec + clusterName string + namespace string machineDeployment *unstructured.Unstructured machineSet *unstructured.Unstructured machines []*unstructured.Unstructured @@ -57,6 +61,7 @@ type testSpec struct { annotations map[string]string machineDeploymentName string machineSetName string + clusterName string namespace string nodeCount int rootIsMachineDeployment bool @@ -160,7 +165,7 @@ func mustCreateTestController(t *testing.T, testConfigs ...*testConfig) (*machin return true, nil, err } - replicas, found, err := unstructured.NestedInt64(u.Object, "spec", "replicas") + replicas, found, err := unstructured.NestedInt64(u.UnstructuredContent(), "spec", "replicas") if err != nil { return true, nil, err } @@ -212,7 +217,7 @@ func mustCreateTestController(t *testing.T, testConfigs ...*testConfig) (*machin } scaleClient.AddReactor("*", "*", scaleReactor) - controller, err := newMachineController(dynamicClientset, kubeclientSet, discoveryClient, scaleClient) + controller, err := newMachineController(dynamicClientset, kubeclientSet, discoveryClient, scaleClient, cloudprovider.NodeGroupDiscoveryOptions{}) if err != nil { t.Fatal("failed to create test controller") } @@ -227,50 +232,58 @@ func mustCreateTestController(t *testing.T, testConfigs ...*testConfig) (*machin } } -func createMachineSetTestConfig(namespace, namePrefix string, nodeCount int, annotations map[string]string) *testConfig { - return createTestConfigs(createTestSpecs(namespace, namePrefix, 1, nodeCount, false, annotations)...)[0] +func createMachineSetTestConfig(namespace, clusterName, namePrefix string, nodeCount int, annotations map[string]string) *testConfig { + return createTestConfigs(createTestSpecs(namespace, clusterName, namePrefix, 1, nodeCount, false, annotations)...)[0] } -func createMachineSetTestConfigs(namespace, namePrefix string, configCount, nodeCount int, annotations map[string]string) []*testConfig { - return createTestConfigs(createTestSpecs(namespace, namePrefix, configCount, nodeCount, false, annotations)...) +func createMachineSetTestConfigs(namespace, clusterName, namePrefix string, configCount, nodeCount int, annotations map[string]string) []*testConfig { + return createTestConfigs(createTestSpecs(namespace, clusterName, namePrefix, configCount, nodeCount, false, annotations)...) } -func createMachineDeploymentTestConfig(namespace, namePrefix string, nodeCount int, annotations map[string]string) *testConfig { - return createTestConfigs(createTestSpecs(namespace, namePrefix, 1, nodeCount, true, annotations)...)[0] +func createMachineDeploymentTestConfig(namespace, clusterName, namePrefix string, nodeCount int, annotations map[string]string) *testConfig { + return createTestConfigs(createTestSpecs(namespace, clusterName, namePrefix, 1, nodeCount, true, annotations)...)[0] } -func createMachineDeploymentTestConfigs(namespace, namePrefix string, configCount, nodeCount int, annotations map[string]string) []*testConfig { - return createTestConfigs(createTestSpecs(namespace, namePrefix, configCount, nodeCount, true, annotations)...) +func createMachineDeploymentTestConfigs(namespace, clusterName, namePrefix string, configCount, nodeCount int, annotations map[string]string) []*testConfig { + return createTestConfigs(createTestSpecs(namespace, clusterName, namePrefix, configCount, nodeCount, true, annotations)...) } -func createTestSpecs(namespace, namePrefix string, scalableResourceCount, nodeCount int, isMachineDeployment bool, annotations map[string]string) []testSpec { +func createTestSpecs(namespace, clusterName, namePrefix string, scalableResourceCount, nodeCount int, isMachineDeployment bool, annotations map[string]string) []testSpec { var specs []testSpec for i := 0; i < scalableResourceCount; i++ { - specs = append(specs, testSpec{ - annotations: annotations, - machineDeploymentName: fmt.Sprintf("%s-%d", namePrefix, i), - machineSetName: fmt.Sprintf("%s-%d", namePrefix, i), - namespace: namespace, - nodeCount: nodeCount, - rootIsMachineDeployment: isMachineDeployment, - }) + specs = append(specs, createTestSpec(namespace, clusterName, fmt.Sprintf("%s-%d", namePrefix, i), nodeCount, isMachineDeployment, annotations)) } return specs } +func createTestSpec(namespace, clusterName, name string, nodeCount int, isMachineDeployment bool, annotations map[string]string) testSpec { + return testSpec{ + annotations: annotations, + machineDeploymentName: name, + machineSetName: name, + clusterName: clusterName, + namespace: namespace, + nodeCount: nodeCount, + rootIsMachineDeployment: isMachineDeployment, + } +} + func createTestConfigs(specs ...testSpec) []*testConfig { result := make([]*testConfig, 0, len(specs)) for i, spec := range specs { config := &testConfig{ - spec: &specs[i], - nodes: make([]*corev1.Node, spec.nodeCount), - machines: make([]*unstructured.Unstructured, spec.nodeCount), + spec: &specs[i], + namespace: spec.namespace, + clusterName: spec.clusterName, + nodes: make([]*corev1.Node, spec.nodeCount), + machines: make([]*unstructured.Unstructured, spec.nodeCount), } machineSetLabels := map[string]string{ + "clusterName": spec.clusterName, "machineSetName": spec.machineSetName, } @@ -284,7 +297,8 @@ func createTestConfigs(specs ...testSpec) []*testConfig { "uid": spec.machineSetName, }, "spec": map[string]interface{}{ - "replicas": int64(spec.nodeCount), + "clusterName": spec.clusterName, + "replicas": int64(spec.nodeCount), }, "status": map[string]interface{}{}, }, @@ -298,6 +312,7 @@ func createTestConfigs(specs ...testSpec) []*testConfig { machineSetLabels["machineDeploymentName"] = spec.machineDeploymentName machineDeploymentLabels := map[string]string{ + "clusterName": spec.clusterName, "machineDeploymentName": spec.machineDeploymentName, } @@ -311,7 +326,8 @@ func createTestConfigs(specs ...testSpec) []*testConfig { "uid": spec.machineDeploymentName, }, "spec": map[string]interface{}{ - "replicas": int64(spec.nodeCount), + "clusterName": spec.clusterName, + "replicas": int64(spec.nodeCount), }, "status": map[string]interface{}{}, }, @@ -339,7 +355,7 @@ func createTestConfigs(specs ...testSpec) []*testConfig { } for j := 0; j < spec.nodeCount; j++ { - config.nodes[j], config.machines[j] = makeLinkedNodeAndMachine(j, spec.namespace, machineOwner, machineSetLabels) + config.nodes[j], config.machines[j] = makeLinkedNodeAndMachine(j, spec.namespace, spec.clusterName, machineOwner, machineSetLabels) } result = append(result, config) @@ -351,7 +367,7 @@ func createTestConfigs(specs ...testSpec) []*testConfig { // makeLinkedNodeAndMachine creates a node and machine. The machine // has its NodeRef set to the new node and the new machine's owner // reference is set to owner. -func makeLinkedNodeAndMachine(i int, namespace string, owner metav1.OwnerReference, machineLabels map[string]string) (*corev1.Node, *unstructured.Unstructured) { +func makeLinkedNodeAndMachine(i int, namespace, clusterName string, owner metav1.OwnerReference, machineLabels map[string]string) (*corev1.Node, *unstructured.Unstructured) { node := &corev1.Node{ TypeMeta: metav1.TypeMeta{ Kind: "Node", @@ -376,7 +392,8 @@ func makeLinkedNodeAndMachine(i int, namespace string, owner metav1.OwnerReferen "namespace": namespace, }, "spec": map[string]interface{}{ - "providerID": fmt.Sprintf("test:////%s-%s-nodeid-%d", namespace, owner.Name, i), + "clusterName": clusterName, + "providerID": fmt.Sprintf("test:////%s-%s-nodeid-%d", namespace, owner.Name, i), }, "status": map[string]interface{}{ "nodeRef": map[string]interface{}{ @@ -420,24 +437,6 @@ func addTestConfigs(t *testing.T, controller *machineController, testConfigs ... return nil } -func selectorFromScalableResource(u *unstructured.Unstructured) (labels.Selector, error) { - unstructuredSelector, found, err := unstructured.NestedMap(u.Object, "spec", "selector") - if err != nil { - return nil, err - } - - if !found { - return nil, fmt.Errorf("expected field spec.selector on scalable resource type") - } - - labelSelector := &metav1.LabelSelector{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredSelector, labelSelector); err != nil { - return nil, err - } - - return metav1.LabelSelectorAsSelector(labelSelector) -} - func createResource(client dynamic.Interface, informer informers.GenericInformer, gvr schema.GroupVersionResource, resource *unstructured.Unstructured) error { if _, err := client.Resource(gvr).Namespace(resource.GetNamespace()).Create(context.TODO(), resource.DeepCopy(), metav1.CreateOptions{}); err != nil { return err @@ -532,7 +531,7 @@ func TestControllerFindMachine(t *testing.T) { for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -548,7 +547,7 @@ func TestControllerFindMachine(t *testing.T) { } func TestControllerFindMachineOwner(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -597,7 +596,7 @@ func TestControllerFindMachineOwner(t *testing.T) { } func TestControllerFindMachineByProviderID(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -657,7 +656,7 @@ func TestControllerFindMachineByProviderID(t *testing.T) { } func TestControllerFindNodeByNodeName(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -756,7 +755,8 @@ func TestControllerListMachinesForScalableResource(t *testing.T) { t.Run("MachineSet", func(t *testing.T) { namespace := RandomString(6) - testConfig1 := createMachineSetTestConfig(namespace, RandomString(6), 5, map[string]string{ + clusterName := RandomString(6) + testConfig1 := createMachineSetTestConfig(namespace, clusterName, RandomString(6), 5, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -765,7 +765,7 @@ func TestControllerListMachinesForScalableResource(t *testing.T) { // nodes and the additional machineset to the existing set of // test objects in the controller. This gives us two // machinesets, each with their own machines and linked nodes. - testConfig2 := createMachineSetTestConfig(namespace, RandomString(6), 5, map[string]string{ + testConfig2 := createMachineSetTestConfig(namespace, clusterName, RandomString(6), 5, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -775,7 +775,8 @@ func TestControllerListMachinesForScalableResource(t *testing.T) { t.Run("MachineDeployment", func(t *testing.T) { namespace := RandomString(6) - testConfig1 := createMachineDeploymentTestConfig(namespace, RandomString(6), 5, map[string]string{ + clusterName := RandomString(6) + testConfig1 := createMachineDeploymentTestConfig(namespace, clusterName, RandomString(6), 5, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -784,7 +785,7 @@ func TestControllerListMachinesForScalableResource(t *testing.T) { // nodes, machineset, and the additional machineset to the existing set of // test objects in the controller. This gives us two // machinedeployments, each with their own machineSet, machines and linked nodes. - testConfig2 := createMachineDeploymentTestConfig(namespace, RandomString(6), 5, map[string]string{ + testConfig2 := createMachineDeploymentTestConfig(namespace, clusterName, RandomString(6), 5, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -815,7 +816,7 @@ func TestControllerLookupNodeGroupForNonExistentNode(t *testing.T) { } t.Run("MachineSet", func(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -823,7 +824,7 @@ func TestControllerLookupNodeGroupForNonExistentNode(t *testing.T) { }) t.Run("MachineDeployment", func(t *testing.T) { - testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -854,7 +855,7 @@ func TestControllerNodeGroupForNodeWithMissingMachineOwner(t *testing.T) { } t.Run("MachineSet", func(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -862,7 +863,7 @@ func TestControllerNodeGroupForNodeWithMissingMachineOwner(t *testing.T) { }) t.Run("MachineDeployment", func(t *testing.T) { - testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -871,7 +872,7 @@ func TestControllerNodeGroupForNodeWithMissingMachineOwner(t *testing.T) { } func TestControllerNodeGroupForNodeWithMissingSetMachineOwner(t *testing.T) { - testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -913,7 +914,7 @@ func TestControllerNodeGroupForNodeWithPositiveScalingBounds(t *testing.T) { } t.Run("MachineSet", func(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "1", }) @@ -921,7 +922,7 @@ func TestControllerNodeGroupForNodeWithPositiveScalingBounds(t *testing.T) { }) t.Run("MachineDeployment", func(t *testing.T) { - testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "1", }) @@ -950,19 +951,20 @@ func TestControllerNodeGroups(t *testing.T) { defer stop() namespace := RandomString(6) + clusterName := RandomString(6) // Test #1: zero nodegroups assertNodegroupLen(t, controller, 0) // Test #2: add 5 machineset-based nodegroups - machineSetConfigs := createMachineSetTestConfigs(namespace, RandomString(6), 5, 1, annotations) + machineSetConfigs := createMachineSetTestConfigs(namespace, clusterName, RandomString(6), 5, 1, annotations) if err := addTestConfigs(t, controller, machineSetConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } assertNodegroupLen(t, controller, 5) // Test #2: add 2 machinedeployment-based nodegroups - machineDeploymentConfigs := createMachineDeploymentTestConfigs(namespace, RandomString(6), 2, 1, annotations) + machineDeploymentConfigs := createMachineDeploymentTestConfigs(namespace, clusterName, RandomString(6), 2, 1, annotations) if err := addTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -986,14 +988,14 @@ func TestControllerNodeGroups(t *testing.T) { } // Test #5: machineset with no scaling bounds results in no nodegroups - machineSetConfigs = createMachineSetTestConfigs(namespace, RandomString(6), 5, 1, annotations) + machineSetConfigs = createMachineSetTestConfigs(namespace, clusterName, RandomString(6), 5, 1, annotations) if err := addTestConfigs(t, controller, machineSetConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } assertNodegroupLen(t, controller, 0) // Test #6: machinedeployment with no scaling bounds results in no nodegroups - machineDeploymentConfigs = createMachineDeploymentTestConfigs(namespace, RandomString(6), 2, 1, annotations) + machineDeploymentConfigs = createMachineDeploymentTestConfigs(namespace, clusterName, RandomString(6), 2, 1, annotations) if err := addTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -1005,7 +1007,7 @@ func TestControllerNodeGroups(t *testing.T) { } // Test #7: machineset with bad scaling bounds results in an error and no nodegroups - machineSetConfigs = createMachineSetTestConfigs(namespace, RandomString(6), 5, 1, annotations) + machineSetConfigs = createMachineSetTestConfigs(namespace, clusterName, RandomString(6), 5, 1, annotations) if err := addTestConfigs(t, controller, machineSetConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -1014,7 +1016,7 @@ func TestControllerNodeGroups(t *testing.T) { } // Test #8: machinedeployment with bad scaling bounds results in an error and no nodegroups - machineDeploymentConfigs = createMachineDeploymentTestConfigs(namespace, RandomString(6), 2, 1, annotations) + machineDeploymentConfigs = createMachineDeploymentTestConfigs(namespace, clusterName, RandomString(6), 2, 1, annotations) if err := addTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -1078,19 +1080,19 @@ func TestControllerNodeGroupsNodeCount(t *testing.T) { t.Run("MachineSet", func(t *testing.T) { for _, tc := range testCases { - test(t, tc, createMachineSetTestConfigs(RandomString(6), RandomString(6), tc.nodeGroups, tc.nodesPerGroup, annotations)) + test(t, tc, createMachineSetTestConfigs(RandomString(6), RandomString(6), RandomString(6), tc.nodeGroups, tc.nodesPerGroup, annotations)) } }) t.Run("MachineDeployment", func(t *testing.T) { for _, tc := range testCases { - test(t, tc, createMachineDeploymentTestConfigs(RandomString(6), RandomString(6), tc.nodeGroups, tc.nodesPerGroup, annotations)) + test(t, tc, createMachineDeploymentTestConfigs(RandomString(6), RandomString(6), RandomString(6), tc.nodeGroups, tc.nodesPerGroup, annotations)) } }) } func TestControllerFindMachineFromNodeAnnotation(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -1138,7 +1140,7 @@ func TestControllerFindMachineFromNodeAnnotation(t *testing.T) { } func TestControllerMachineSetNodeNamesWithoutLinkage(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 3, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 3, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -1180,7 +1182,7 @@ func TestControllerMachineSetNodeNamesWithoutLinkage(t *testing.T) { } func TestControllerMachineSetNodeNamesUsingProviderID(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 3, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 3, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -1232,7 +1234,7 @@ func TestControllerMachineSetNodeNamesUsingProviderID(t *testing.T) { } func TestControllerMachineSetNodeNamesUsingStatusNodeRefName(t *testing.T) { - testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), 3, map[string]string{ + testConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 3, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", }) @@ -1303,7 +1305,7 @@ func TestControllerGetAPIVersionGroup(t *testing.T) { } func TestControllerGetAPIVersionGroupWithMachineDeployments(t *testing.T) { - testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 1, map[string]string{ + testConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "1", }) @@ -1538,3 +1540,430 @@ func RandomString(n int) string { } return string(result) } + +func Test_machineController_allowedByAutoDiscoverySpecs(t *testing.T) { + for _, tc := range []struct { + name string + testSpec testSpec + autoDiscoverySpecs []*clusterAPIAutoDiscoveryConfig + additionalLabels map[string]string + shouldMatch bool + }{{ + name: "autodiscovery specs includes permissive spec that should match any MachineSet", + testSpec: createTestSpec(RandomString(6), RandomString(6), RandomString(6), 1, false, nil), + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {labelSelector: labels.NewSelector()}, + {clusterName: "foo", namespace: "bar", labelSelector: labels.Nothing()}, + }, + shouldMatch: true, + }, { + name: "autodiscovery specs includes permissive spec that should match any MachineDeployment", + testSpec: createTestSpec(RandomString(6), RandomString(6), RandomString(6), 1, true, nil), + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {labelSelector: labels.NewSelector()}, + {clusterName: "foo", namespace: "bar", labelSelector: labels.Nothing()}, + }, + shouldMatch: true, + }, { + name: "autodiscovery specs includes a restrictive spec that should match specific MachineSet", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, false, nil), + additionalLabels: map[string]string{"color": "green"}, + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {clusterName: "foo", namespace: "default", labelSelector: labels.SelectorFromSet(labels.Set{"color": "green"})}, + {clusterName: "wombat", namespace: "bar", labelSelector: labels.Nothing()}, + }, + shouldMatch: true, + }, { + name: "autodiscovery specs includes a restrictive spec that should match specific MachineDeployment", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, true, nil), + additionalLabels: map[string]string{"color": "green"}, + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {clusterName: "foo", namespace: "default", labelSelector: labels.SelectorFromSet(labels.Set{"color": "green"})}, + {clusterName: "wombat", namespace: "bar", labelSelector: labels.Nothing()}, + }, + shouldMatch: true, + }, { + name: "autodiscovery specs does not include any specs that should match specific MachineSet", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, false, nil), + additionalLabels: map[string]string{"color": "green"}, + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {clusterName: "test", namespace: "default", labelSelector: labels.SelectorFromSet(labels.Set{"color": "blue"})}, + {clusterName: "wombat", namespace: "bar", labelSelector: labels.Nothing()}, + }, + shouldMatch: false, + }, { + name: "autodiscovery specs does not include any specs that should match specific MachineDeployment", + testSpec: createTestSpec("default", "foo", RandomString(6), 1, true, nil), + additionalLabels: map[string]string{"color": "green"}, + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {clusterName: "test", namespace: "default", labelSelector: labels.SelectorFromSet(labels.Set{"color": "blue"})}, + {clusterName: "wombat", namespace: "bar", labelSelector: labels.Nothing()}, + }, + shouldMatch: false, + }} { + t.Run(tc.name, func(t *testing.T) { + testConfigs := createTestConfigs(tc.testSpec) + resource := testConfigs[0].machineSet + if tc.testSpec.rootIsMachineDeployment { + resource = testConfigs[0].machineDeployment + } + if tc.additionalLabels != nil { + resource.SetLabels(labels.Merge(resource.GetLabels(), tc.additionalLabels)) + } + c := &machineController{ + autoDiscoverySpecs: tc.autoDiscoverySpecs, + } + + got := c.allowedByAutoDiscoverySpecs(resource) + if got != tc.shouldMatch { + t.Errorf("allowedByAutoDiscoverySpecs got = %v, want %v", got, tc.shouldMatch) + } + }) + } +} + +func Test_machineController_listScalableResources(t *testing.T) { + uniqueMDConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, nil) + + mdTestConfigs := createMachineDeploymentTestConfigs(RandomString(6), RandomString(6), RandomString(6), 5, 1, nil) + mdTestConfigs = append(mdTestConfigs, uniqueMDConfig) + + allMachineDeployments := make([]*unstructured.Unstructured, 0, len(mdTestConfigs)) + for i := range mdTestConfigs { + allMachineDeployments = append(allMachineDeployments, mdTestConfigs[i].machineDeployment) + } + + uniqueMSConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, nil) + + msTestConfigs := createMachineSetTestConfigs(RandomString(6), RandomString(6), RandomString(6), 5, 1, nil) + msTestConfigs = append(msTestConfigs, uniqueMSConfig) + + allMachineSets := make([]*unstructured.Unstructured, 0, len(msTestConfigs)) + for i := range msTestConfigs { + allMachineSets = append(allMachineSets, msTestConfigs[i].machineSet) + } + + allTestConfigs := append(mdTestConfigs, msTestConfigs...) + allScalableResources := append(allMachineDeployments, allMachineSets...) + + for _, tc := range []struct { + name string + autoDiscoverySpecs []*clusterAPIAutoDiscoveryConfig + want []*unstructured.Unstructured + wantErr bool + }{{ + name: "undefined autodiscovery results in returning all scalable resources", + autoDiscoverySpecs: nil, + want: allScalableResources, + wantErr: false, + }, { + name: "autodiscovery configuration to match against unique MachineSet only returns that MachineSet", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMSConfig.namespace, clusterName: uniqueMSConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMSConfig.machineSet.GetLabels())}, + }, + want: []*unstructured.Unstructured{uniqueMSConfig.machineSet}, + wantErr: false, + }, { + name: "autodiscovery configuration to match against unique MachineDeployment only returns that MachineDeployment", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMDConfig.namespace, clusterName: uniqueMDConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMDConfig.machineDeployment.GetLabels())}, + }, + want: []*unstructured.Unstructured{uniqueMDConfig.machineDeployment}, + wantErr: false, + }, { + name: "autodiscovery configuration to match against both unique MachineDeployment and unique MachineSet only returns those scalable resources", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMDConfig.namespace, clusterName: uniqueMDConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMDConfig.machineDeployment.GetLabels())}, + {namespace: uniqueMSConfig.namespace, clusterName: uniqueMSConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMSConfig.machineSet.GetLabels())}, + }, + want: []*unstructured.Unstructured{uniqueMDConfig.machineDeployment, uniqueMSConfig.machineSet}, + wantErr: false, + }, { + name: "autodiscovery configuration to match against both unique MachineDeployment, unique MachineSet, and a permissive config returns all scalable resources", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMDConfig.namespace, clusterName: uniqueMDConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMDConfig.machineDeployment.GetLabels())}, + {namespace: uniqueMSConfig.namespace, clusterName: uniqueMSConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMSConfig.machineSet.GetLabels())}, + {labelSelector: labels.NewSelector()}, + }, + want: allScalableResources, + wantErr: false, + }, { + name: "autodiscovery configuration to match against both unique MachineDeployment, unique MachineSet, and a restrictive returns unique scalable resources", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMDConfig.namespace, clusterName: uniqueMDConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMDConfig.machineDeployment.GetLabels())}, + {namespace: uniqueMSConfig.namespace, clusterName: uniqueMSConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMSConfig.machineSet.GetLabels())}, + {namespace: RandomString(6), clusterName: RandomString(6), labelSelector: labels.Nothing()}, + }, + want: []*unstructured.Unstructured{uniqueMDConfig.machineDeployment, uniqueMSConfig.machineSet}, + wantErr: false, + }, { + name: "autodiscovery configuration to match against a restrictive config returns no scalable resources", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: RandomString(6), clusterName: RandomString(6), labelSelector: labels.Nothing()}, + }, + want: nil, + wantErr: false, + }} { + t.Run(tc.name, func(t *testing.T) { + c, stop := mustCreateTestController(t, allTestConfigs...) + defer stop() + c.autoDiscoverySpecs = tc.autoDiscoverySpecs + + got, err := c.listScalableResources() + if (err != nil) != tc.wantErr { + t.Errorf("listScalableRsources() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if len(got) != len(tc.want) { + t.Errorf("listScalableRsources() expected length of got to be = %v, got %v", len(tc.want), len(got)) + } + + // Sort results as order is not guaranteed. + sort.Slice(got, func(i, j int) bool { + return got[i].GetName() < got[j].GetName() + }) + sort.Slice(tc.want, func(i, j int) bool { + return tc.want[i].GetName() < tc.want[j].GetName() + }) + + if err == nil && !reflect.DeepEqual(got, tc.want) { + t.Errorf("listScalableRsources() got = %v, want %v", got, tc.want) + } + }) + } +} + +func Test_machineController_nodeGroupForNode(t *testing.T) { + uniqueMDConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + mdTestConfigs := createMachineDeploymentTestConfigs(RandomString(6), RandomString(6), RandomString(6), 5, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + mdTestConfigs = append(mdTestConfigs, uniqueMDConfig) + + allMachineDeployments := make([]*unstructured.Unstructured, 0, len(mdTestConfigs)) + for i := range mdTestConfigs { + allMachineDeployments = append(allMachineDeployments, mdTestConfigs[i].machineDeployment) + } + + uniqueMSConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + msTestConfigs := createMachineSetTestConfigs(RandomString(6), RandomString(6), RandomString(6), 5, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + msTestConfigs = append(msTestConfigs, uniqueMSConfig) + + allMachineSets := make([]*unstructured.Unstructured, 0, len(msTestConfigs)) + for i := range msTestConfigs { + allMachineSets = append(allMachineSets, msTestConfigs[i].machineSet) + } + + allTestConfigs := append(mdTestConfigs, msTestConfigs...) + + for _, tc := range []struct { + name string + autoDiscoverySpecs []*clusterAPIAutoDiscoveryConfig + node *corev1.Node + scalableResource *unstructured.Unstructured + wantErr bool + }{{ + name: "undefined autodiscovery results in returning MachineSet resource for given node", + autoDiscoverySpecs: nil, + node: msTestConfigs[0].nodes[0], + scalableResource: msTestConfigs[0].machineSet, + wantErr: false, + }, { + name: "undefined autodiscovery results in returning MachineDeployment resource for given node", + autoDiscoverySpecs: nil, + node: mdTestConfigs[0].nodes[0], + scalableResource: mdTestConfigs[0].machineDeployment, + wantErr: false, + }, { + name: "autodiscovery configuration to match against a restrictive config does not return a nodegroup", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: RandomString(6), clusterName: RandomString(6), labelSelector: labels.Nothing()}, + }, + node: msTestConfigs[0].nodes[0], + scalableResource: nil, + wantErr: false, + }, { + name: "autodiscovery configuration to match against unique MachineSet returns nodegroup for that MachineSet", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMSConfig.namespace, clusterName: uniqueMSConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMSConfig.machineSet.GetLabels())}, + }, + node: uniqueMSConfig.nodes[0], + scalableResource: uniqueMSConfig.machineSet, + wantErr: false, + }, { + name: "autodiscovery configuration to match against unique MachineDeployment returns nodegroup for that MachineDeployment", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMDConfig.namespace, clusterName: uniqueMDConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMDConfig.machineDeployment.GetLabels())}, + }, + node: uniqueMDConfig.nodes[0], + scalableResource: uniqueMDConfig.machineDeployment, + wantErr: false, + }} { + t.Run(tc.name, func(t *testing.T) { + c, stop := mustCreateTestController(t, allTestConfigs...) + defer stop() + c.autoDiscoverySpecs = tc.autoDiscoverySpecs + + got, err := c.nodeGroupForNode(tc.node) + if (err != nil) != tc.wantErr { + t.Errorf("nodeGroupForNode() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if got == nil && tc.scalableResource != nil { + t.Error("expected a node group to be returned, got nil") + return + } + + if tc.scalableResource == nil && got != nil { + t.Errorf("expected nil node group, got: %v", got) + return + } + + if tc.scalableResource != nil && !reflect.DeepEqual(got.scalableResource.unstructured, tc.scalableResource) { + t.Errorf("nodeGroupForNode() got = %v, want node group for scalable resource %v", got, tc.scalableResource) + } + }) + } +} + +func Test_machineController_nodeGroups(t *testing.T) { + uniqueMDConfig := createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + mdTestConfigs := createMachineDeploymentTestConfigs(RandomString(6), RandomString(6), RandomString(6), 5, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + mdTestConfigs = append(mdTestConfigs, uniqueMDConfig) + + allMachineDeployments := make([]*unstructured.Unstructured, 0, len(mdTestConfigs)) + for i := range mdTestConfigs { + allMachineDeployments = append(allMachineDeployments, mdTestConfigs[i].machineDeployment) + } + + uniqueMSConfig := createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + msTestConfigs := createMachineSetTestConfigs(RandomString(6), RandomString(6), RandomString(6), 5, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + msTestConfigs = append(msTestConfigs, uniqueMSConfig) + + allMachineSets := make([]*unstructured.Unstructured, 0, len(msTestConfigs)) + for i := range msTestConfigs { + allMachineSets = append(allMachineSets, msTestConfigs[i].machineSet) + } + + allTestConfigs := append(mdTestConfigs, msTestConfigs...) + allScalableResources := append(allMachineDeployments, allMachineSets...) + + for _, tc := range []struct { + name string + autoDiscoverySpecs []*clusterAPIAutoDiscoveryConfig + expectedScalableResources []*unstructured.Unstructured + wantErr bool + }{{ + name: "undefined autodiscovery results in returning nodegroups for all scalable resources", + autoDiscoverySpecs: nil, + expectedScalableResources: allScalableResources, + wantErr: false, + }, { + name: "autodiscovery configuration to match against unique MachineSet only returns nodegroup for that MachineSet", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMSConfig.namespace, clusterName: uniqueMSConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMSConfig.machineSet.GetLabels())}, + }, + expectedScalableResources: []*unstructured.Unstructured{uniqueMSConfig.machineSet}, + wantErr: false, + }, { + name: "autodiscovery configuration to match against unique MachineDeployment only returns nodegroup for that MachineDeployment", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMDConfig.namespace, clusterName: uniqueMDConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMDConfig.machineDeployment.GetLabels())}, + }, + expectedScalableResources: []*unstructured.Unstructured{uniqueMDConfig.machineDeployment}, + wantErr: false, + }, { + name: "autodiscovery configuration to match against both unique MachineDeployment and unique MachineSet only returns nodegroups for those scalable resources", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMDConfig.namespace, clusterName: uniqueMDConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMDConfig.machineDeployment.GetLabels())}, + {namespace: uniqueMSConfig.namespace, clusterName: uniqueMSConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMSConfig.machineSet.GetLabels())}, + }, + expectedScalableResources: []*unstructured.Unstructured{uniqueMDConfig.machineDeployment, uniqueMSConfig.machineSet}, + wantErr: false, + }, { + name: "autodiscovery configuration to match against both unique MachineDeployment, unique MachineSet, and a permissive config returns nodegroups for all scalable resources", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMDConfig.namespace, clusterName: uniqueMDConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMDConfig.machineDeployment.GetLabels())}, + {namespace: uniqueMSConfig.namespace, clusterName: uniqueMSConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMSConfig.machineSet.GetLabels())}, + {labelSelector: labels.NewSelector()}, + }, + expectedScalableResources: allScalableResources, + wantErr: false, + }, { + name: "autodiscovery configuration to match against both unique MachineDeployment, unique MachineSet, and a restrictive returns nodegroups for unique scalable resources", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: uniqueMDConfig.namespace, clusterName: uniqueMDConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMDConfig.machineDeployment.GetLabels())}, + {namespace: uniqueMSConfig.namespace, clusterName: uniqueMSConfig.clusterName, labelSelector: labels.SelectorFromSet(uniqueMSConfig.machineSet.GetLabels())}, + {namespace: RandomString(6), clusterName: RandomString(6), labelSelector: labels.Nothing()}, + }, + expectedScalableResources: []*unstructured.Unstructured{uniqueMDConfig.machineDeployment, uniqueMSConfig.machineSet}, + wantErr: false, + }, { + name: "autodiscovery configuration to match against a restrictive config returns no nodegroups", + autoDiscoverySpecs: []*clusterAPIAutoDiscoveryConfig{ + {namespace: RandomString(6), clusterName: RandomString(6), labelSelector: labels.Nothing()}, + }, + expectedScalableResources: nil, + wantErr: false, + }} { + t.Run(tc.name, func(t *testing.T) { + c, stop := mustCreateTestController(t, allTestConfigs...) + defer stop() + c.autoDiscoverySpecs = tc.autoDiscoverySpecs + + got, err := c.nodeGroups() + if (err != nil) != tc.wantErr { + t.Errorf("nodeGroups() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if len(got) != len(tc.expectedScalableResources) { + t.Errorf("nodeGroups() expected length of got to be = %v, got %v", len(tc.expectedScalableResources), len(got)) + } + + // Sort results as order is not guaranteed. + sort.Slice(got, func(i, j int) bool { + return got[i].scalableResource.Name() < got[j].scalableResource.Name() + }) + sort.Slice(tc.expectedScalableResources, func(i, j int) bool { + return tc.expectedScalableResources[i].GetName() < tc.expectedScalableResources[j].GetName() + }) + + if err == nil { + for i := range got { + if !reflect.DeepEqual(got[i].scalableResource.unstructured, tc.expectedScalableResources[i]) { + t.Errorf("nodeGroups() got = %v, expected to consist of nodegroups for scalable resources: %v", got, tc.expectedScalableResources) + } + } + } + }) + } +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go index 2632aba5212f..c0ba8e77d605 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go @@ -53,11 +53,7 @@ func (ng *nodegroup) MaxSize() int { // (new nodes finish startup and registration or removed nodes are // deleted completely). Implementation required. func (ng *nodegroup) TargetSize() (int, error) { - size, err := ng.scalableResource.Replicas() - if err != nil { - return 0, err - } - return int(size), nil + return ng.scalableResource.Replicas() } // IncreaseSize increases the size of the node group. To delete a node @@ -73,12 +69,8 @@ func (ng *nodegroup) IncreaseSize(delta int) error { if err != nil { return err } - intSize := int(size) - if intSize+delta > ng.MaxSize() { - return fmt.Errorf("size increase too large - desired:%d max:%d", intSize+delta, ng.MaxSize()) - } - return ng.scalableResource.SetSize(int32(intSize + delta)) + return ng.scalableResource.SetSize(size + delta) } // DeleteNodes deletes nodes from this node group. Error is returned @@ -118,7 +110,7 @@ func (ng *nodegroup) DeleteNodes(nodes []*corev1.Node) error { // Step 2: if deleting len(nodes) would make the replica count // < minSize, then the request to delete that many nodes is bogus // and we fail fast. - if replicas-int32(len(nodes)) < int32(ng.MinSize()) { + if replicas-len(nodes) < ng.MinSize() { return fmt.Errorf("unable to delete %d machines in %q, machine replicas are %q, minSize is %q ", len(nodes), ng.Id(), replicas, ng.MinSize()) } @@ -187,7 +179,7 @@ func (ng *nodegroup) DecreaseTargetSize(delta int) error { size, delta, len(nodes)) } - return ng.scalableResource.SetSize(int32(size + delta)) + return ng.scalableResource.SetSize(size + delta) } // Id returns an unique identifier of the node group. @@ -268,12 +260,33 @@ func (ng *nodegroup) Autoprovisioned() bool { return false } -func newNodegroupFromScalableResource(controller *machineController, unstructuredScalableResource *unstructured.Unstructured) (*nodegroup, error) { +func newNodeGroupFromScalableResource(controller *machineController, unstructuredScalableResource *unstructured.Unstructured) (*nodegroup, error) { + // Ensure that the resulting node group would be allowed based on the autodiscovery specs if defined + if !controller.allowedByAutoDiscoverySpecs(unstructuredScalableResource) { + return nil, nil + } + scalableResource, err := newUnstructuredScalableResource(controller, unstructuredScalableResource) if err != nil { return nil, err } + replicas, found, err := unstructured.NestedInt64(unstructuredScalableResource.UnstructuredContent(), "spec", "replicas") + if err != nil { + return nil, err + } + + // We don't scale from 0 so nodes must belong to a nodegroup + // that has a scale size of at least 1. + if found && replicas == 0 { + return nil, nil + } + + // Ensure the node group would have the capacity to scale + if scalableResource.MaxSize()-scalableResource.MinSize() < 1 { + return nil, nil + } + return &nodegroup{ machineController: controller, scalableResource: scalableResource, diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go index 8deb746b7506..11bf4d48096d 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go @@ -46,6 +46,7 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { minSize int maxSize int nodeCount int + expectNil bool } var testCases = []testCase{{ @@ -82,15 +83,17 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { maxSize: 0, replicas: 0, errors: false, + expectNil: true, }, { description: "no error: min=0, max=1", annotations: map[string]string{ nodeGroupMaxSizeAnnotationKey: "1", }, - minSize: 0, - maxSize: 1, - replicas: 0, - errors: false, + minSize: 0, + maxSize: 1, + replicas: 0, + errors: false, + expectNil: true, }, { description: "no error: min=1, max=10, replicas=5", annotations: map[string]string{ @@ -102,13 +105,14 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { replicas: 5, nodeCount: 5, errors: false, + expectNil: true, }} newNodeGroup := func(controller *machineController, testConfig *testConfig) (*nodegroup, error) { if testConfig.machineDeployment != nil { - return newNodegroupFromScalableResource(controller, testConfig.machineDeployment) + return newNodeGroupFromScalableResource(controller, testConfig.machineDeployment) } - return newNodegroupFromScalableResource(controller, testConfig.machineSet) + return newNodeGroupFromScalableResource(controller, testConfig.machineSet) } test := func(t *testing.T, tc testCase, testConfig *testConfig) { @@ -120,16 +124,18 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { t.Fatal("expected an error") } - if !tc.errors && ng == nil { - t.Fatalf("test case logic error: %v", err) - } - if tc.errors { // if the test case is expected to error then // don't assert the remainder return } + if tc.expectNil && ng == nil { + // if the test case is expected to return nil then + // don't assert the remainder + return + } + if ng == nil { t.Fatal("expected nodegroup to be non-nil") } @@ -197,7 +203,7 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { t.Run("MachineSet", func(t *testing.T) { for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { - test(t, tc, createMachineSetTestConfig(RandomString(6), RandomString(6), tc.nodeCount, tc.annotations)) + test(t, tc, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), tc.nodeCount, tc.annotations)) }) } }) @@ -205,7 +211,7 @@ func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { t.Run("MachineDeployment", func(t *testing.T) { for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { - test(t, tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), tc.nodeCount, tc.annotations)) + test(t, tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), tc.nodeCount, tc.annotations)) }) } }) @@ -289,7 +295,7 @@ func TestNodeGroupIncreaseSizeErrors(t *testing.T) { nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", } - test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } }) @@ -301,7 +307,7 @@ func TestNodeGroupIncreaseSizeErrors(t *testing.T) { nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", } - test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) + test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } }) @@ -367,7 +373,7 @@ func TestNodeGroupIncreaseSize(t *testing.T) { expected: 4, delta: 1, } - test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), int(tc.initial), annotations)) }) t.Run("MachineDeployment", func(t *testing.T) { @@ -377,7 +383,7 @@ func TestNodeGroupIncreaseSize(t *testing.T) { expected: 4, delta: 1, } - test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) + test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } @@ -472,7 +478,7 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { delta: -1, expectedError: true, } - test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), int(tc.initial), annotations)) }) t.Run("MachineSet", func(t *testing.T) { @@ -483,7 +489,7 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { expected: 3, delta: -1, } - test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), int(tc.initial), annotations)) }) t.Run("MachineDeployment", func(t *testing.T) { @@ -495,7 +501,7 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { delta: -1, expectedError: true, } - test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) + test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } @@ -577,7 +583,7 @@ func TestNodeGroupDecreaseSizeErrors(t *testing.T) { nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", } - test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) + test(t, &tc, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } }) @@ -589,7 +595,7 @@ func TestNodeGroupDecreaseSizeErrors(t *testing.T) { nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", } - test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), int(tc.initial), annotations)) + test(t, &tc, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), int(tc.initial), annotations)) }) } }) @@ -667,14 +673,14 @@ func TestNodeGroupDeleteNodes(t *testing.T) { // sorting and the expected semantics in test() will fail. t.Run("MachineSet", func(t *testing.T) { - test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) }) t.Run("MachineDeployment", func(t *testing.T) { - test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) @@ -747,15 +753,17 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { t.Run("MachineSet", func(t *testing.T) { namespace := RandomString(6) - testConfig0 := createMachineSetTestConfigs(namespace, RandomString(6), 1, 2, annotations) - testConfig1 := createMachineSetTestConfigs(namespace, RandomString(6), 1, 2, annotations) + clusterName := RandomString(6) + testConfig0 := createMachineSetTestConfigs(namespace, clusterName, RandomString(6), 1, 2, annotations) + testConfig1 := createMachineSetTestConfigs(namespace, clusterName, RandomString(6), 1, 2, annotations) test(t, 2, append(testConfig0, testConfig1...)) }) t.Run("MachineDeployment", func(t *testing.T) { namespace := RandomString(6) - testConfig0 := createMachineDeploymentTestConfigs(namespace, RandomString(6), 1, 2, annotations) - testConfig1 := createMachineDeploymentTestConfigs(namespace, RandomString(6), 1, 2, annotations) + clusterName := RandomString(6) + testConfig0 := createMachineDeploymentTestConfigs(namespace, clusterName, RandomString(6), 1, 2, annotations) + testConfig1 := createMachineDeploymentTestConfigs(namespace, clusterName, RandomString(6), 1, 2, annotations) test(t, 2, append(testConfig0, testConfig1...)) }) } @@ -925,14 +933,14 @@ func TestNodeGroupDeleteNodesTwice(t *testing.T) { // sorting and the expected semantics in test() will fail. t.Run("MachineSet", func(t *testing.T) { - test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) }) t.Run("MachineDeployment", func(t *testing.T) { - test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) @@ -1006,14 +1014,14 @@ func TestNodeGroupWithFailedMachine(t *testing.T) { // sorting and the expected semantics in test() will fail. t.Run("MachineSet", func(t *testing.T) { - test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) }) t.Run("MachineDeployment", func(t *testing.T) { - test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), 10, map[string]string{ + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), 10, map[string]string{ nodeGroupMinSizeAnnotationKey: "1", nodeGroupMaxSizeAnnotationKey: "10", })) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go index a0a017cad67a..fbac1fd89f32 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go @@ -171,7 +171,7 @@ func BuildClusterAPI(opts config.AutoscalingOptions, do cloudprovider.NodeGroupD klog.Fatalf("create scale client failed: %v", err) } - controller, err := newMachineController(managementClient, workloadClient, managementDiscoveryClient, managementScaleClient) + controller, err := newMachineController(managementClient, workloadClient, managementDiscoveryClient, managementScaleClient, do) if err != nil { klog.Fatal(err) } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go index 3fd5bd9849ce..d5bf3e387339 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go @@ -79,7 +79,7 @@ func (r unstructuredScalableResource) ProviderIDs() ([]string, error) { return providerIds, nil } -func (r unstructuredScalableResource) Replicas() (int32, error) { +func (r unstructuredScalableResource) Replicas() (int, error) { gvr, err := r.GroupVersionResource() if err != nil { return 0, err @@ -92,10 +92,17 @@ func (r unstructuredScalableResource) Replicas() (int32, error) { if s == nil { return 0, fmt.Errorf("unknown %s %s/%s", r.Kind(), r.Namespace(), r.Name()) } - return s.Spec.Replicas, nil + return int(s.Spec.Replicas), nil } -func (r unstructuredScalableResource) SetSize(nreplicas int32) error { +func (r unstructuredScalableResource) SetSize(nreplicas int) error { + switch { + case nreplicas > r.maxSize: + return fmt.Errorf("size increase too large - desired:%d max:%d", nreplicas, r.maxSize) + case nreplicas < r.minSize: + return fmt.Errorf("size decrease too large - desired:%d min:%d", nreplicas, r.minSize) + } + gvr, err := r.GroupVersionResource() if err != nil { return err @@ -110,7 +117,7 @@ func (r unstructuredScalableResource) SetSize(nreplicas int32) error { return fmt.Errorf("unknown %s %s/%s", r.Kind(), r.Namespace(), r.Name()) } - s.Spec.Replicas = nreplicas + s.Spec.Replicas = int32(nreplicas) _, updateErr := r.controller.managementScaleClient.Scales(r.Namespace()).Update(context.TODO(), gvr.GroupResource(), s, metav1.UpdateOptions{}) return updateErr } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured_test.go index 2c51dcfbf79d..cc0bb23f84fd 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured_test.go @@ -25,7 +25,7 @@ import ( func TestSetSize(t *testing.T) { initialReplicas := 1 - updatedReplicas := int32(5) + updatedReplicas := 5 test := func(t *testing.T, testConfig *testConfig) { controller, stop := mustCreateTestController(t, testConfig) @@ -54,23 +54,39 @@ func TestSetSize(t *testing.T) { s, err := sr.controller.managementScaleClient.Scales(testResource.GetNamespace()). Get(context.TODO(), gvr.GroupResource(), testResource.GetName(), metav1.GetOptions{}) - if s.Spec.Replicas != updatedReplicas { + if s.Spec.Replicas != int32(updatedReplicas) { t.Errorf("expected %v, got: %v", updatedReplicas, s.Spec.Replicas) } } t.Run("MachineSet", func(t *testing.T) { - test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + test(t, createMachineSetTestConfig( + RandomString(6), + RandomString(6), + RandomString(6), + initialReplicas, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }, + )) }) t.Run("MachineDeployment", func(t *testing.T) { - test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + test(t, createMachineDeploymentTestConfig( + RandomString(6), + RandomString(6), + RandomString(6), + initialReplicas, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }, + )) }) } func TestReplicas(t *testing.T) { initialReplicas := 1 - updatedReplicas := int32(5) + updatedReplicas := 5 test := func(t *testing.T, testConfig *testConfig) { controller, stop := mustCreateTestController(t, testConfig) @@ -96,7 +112,7 @@ func TestReplicas(t *testing.T) { t.Fatal(err) } - if i != int32(initialReplicas) { + if i != initialReplicas { t.Errorf("expected %v, got: %v", initialReplicas, i) } @@ -107,7 +123,7 @@ func TestReplicas(t *testing.T) { t.Fatal(err) } - s.Spec.Replicas = updatedReplicas + s.Spec.Replicas = int32(updatedReplicas) _, err = sr.controller.managementScaleClient.Scales(testResource.GetNamespace()). Update(context.TODO(), gvr.GroupResource(), s, metav1.UpdateOptions{}) @@ -126,17 +142,17 @@ func TestReplicas(t *testing.T) { } t.Run("MachineSet", func(t *testing.T) { - test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), RandomString(6), initialReplicas, nil)) }) t.Run("MachineDeployment", func(t *testing.T) { - test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), RandomString(6), initialReplicas, nil)) }) } func TestSetSizeAndReplicas(t *testing.T) { initialReplicas := 1 - updatedReplicas := int32(5) + updatedReplicas := 5 test := func(t *testing.T, testConfig *testConfig) { controller, stop := mustCreateTestController(t, testConfig) @@ -157,7 +173,7 @@ func TestSetSizeAndReplicas(t *testing.T) { t.Fatal(err) } - if i != int32(initialReplicas) { + if i != initialReplicas { t.Errorf("expected %v, got: %v", initialReplicas, i) } @@ -177,10 +193,26 @@ func TestSetSizeAndReplicas(t *testing.T) { } t.Run("MachineSet", func(t *testing.T) { - test(t, createMachineSetTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + test(t, createMachineSetTestConfig( + RandomString(6), + RandomString(6), + RandomString(6), + initialReplicas, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }, + )) }) t.Run("MachineDeployment", func(t *testing.T) { - test(t, createMachineDeploymentTestConfig(RandomString(6), RandomString(6), initialReplicas, nil)) + test(t, createMachineDeploymentTestConfig( + RandomString(6), + RandomString(6), + RandomString(6), + initialReplicas, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }, + )) }) } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go index ed7bacf49c72..104bffecbe69 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go @@ -28,6 +28,8 @@ import ( const ( nodeGroupMinSizeAnnotationKey = "cluster.k8s.io/cluster-api-autoscaler-node-group-min-size" nodeGroupMaxSizeAnnotationKey = "cluster.k8s.io/cluster-api-autoscaler-node-group-max-size" + clusterNameLabel = "cluster.x-k8s.io/cluster-name" + deprecatedClusterNameLabel = "cluster.k8s.io/cluster-name" ) var ( @@ -138,3 +140,35 @@ func normalizedProviderString(s string) normalizedProviderID { split := strings.Split(s, "/") return normalizedProviderID(split[len(split)-1]) } + +func clusterNameFromResource(r *unstructured.Unstructured) string { + // Use Spec.ClusterName if defined (only available on v1alpha3+ types) + clusterName, found, err := unstructured.NestedString(r.Object, "spec", "clusterName") + if err != nil { + return "" + } + + if found { + return clusterName + } + + // Fallback to value of clusterNameLabel + if clusterName, ok := r.GetLabels()[clusterNameLabel]; ok { + return clusterName + } + + // fallback for backward compatibility for deprecatedClusterNameLabel + if clusterName, ok := r.GetLabels()[deprecatedClusterNameLabel]; ok { + return clusterName + } + + // fallback for cluster-api v1alpha1 cluster linking + templateLabels, found, err := unstructured.NestedStringMap(r.UnstructuredContent(), "spec", "template", "metadata", "labels") + if found { + if clusterName, ok := templateLabels[deprecatedClusterNameLabel]; ok { + return clusterName + } + } + + return "" +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go index 421fb830e405..9a9cada7734c 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go @@ -426,3 +426,249 @@ func TestUtilNormalizedProviderID(t *testing.T) { }) } } + +func Test_clusterNameFromResource(t *testing.T) { + for _, tc := range []struct { + name string + resource *unstructured.Unstructured + want string + }{{ + name: "cluster name not set, v1alpha1 MachineSet", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.k8s.io/v1alpha1", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "", + }, { + name: "cluster name not set, v1alpha1 MachineDeployment", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineDeploymentKind, + "apiVersion": "cluster.k8s.io/v1alpha1", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "", + }, { + name: "cluster name set in MachineSet labels, v1alpha1 MachineSet", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.k8s.io/v1alpha1", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + "labels": map[string]interface{}{ + deprecatedClusterNameLabel: "bar", + }, + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "bar", + }, { + name: "cluster name set in MachineDeployment, v1alpha1 MachineDeployment", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineDeploymentKind, + "apiVersion": "cluster.k8s.io/v1alpha1", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + "labels": map[string]interface{}{ + deprecatedClusterNameLabel: "bar", + }, + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "bar", + }, { + name: "cluster name set in Machine template labels, v1alpha1 MachineSet", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.k8s.io/v1alpha1", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + "template": map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + deprecatedClusterNameLabel: "bar", + }, + }, + }, + }, + "status": map[string]interface{}{}, + }, + }, + want: "bar", + }, { + name: "cluster name set in Machine template, v1alpha1 MachineDeployment", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineDeploymentKind, + "apiVersion": "cluster.k8s.io/v1alpha1", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + "template": map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + deprecatedClusterNameLabel: "bar", + }, + }, + }, + }, + "status": map[string]interface{}{}, + }, + }, + want: "bar", + }, { + name: "cluster name not set, v1alpha2 MachineSet", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "", + }, { + name: "cluster name not set, v1alpha2 MachineDeployment", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineDeploymentKind, + "apiVersion": "cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "", + }, { + name: "cluster name set in MachineSet labels, v1alpha2 MachineSet", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + "labels": map[string]interface{}{ + clusterNameLabel: "bar", + }, + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "bar", + }, { + name: "cluster name set in MachineDeployment, v1alpha2 MachineDeployment", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineDeploymentKind, + "apiVersion": "cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + "labels": map[string]interface{}{ + clusterNameLabel: "bar", + }, + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "bar", + }, { + name: "cluster name set in spec, v1alpha3 MachineSet", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineSetKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "clusterName": "bar", + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "bar", + }, { + name: "cluster name set in spec, v1alpha3 MachineDeployment", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": machineDeploymentKind, + "apiVersion": "cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "clusterName": "bar", + "replicas": int64(1), + }, + "status": map[string]interface{}{}, + }, + }, + want: "bar", + }} { + t.Run(tc.name, func(t *testing.T) { + if got := clusterNameFromResource(tc.resource); got != tc.want { + t.Errorf("clusterNameFromResource() = %v, want %v", got, tc.want) + } + }) + } +} From 60376f970e2b7508f24b8e4cf95ee491e5724ef1 Mon Sep 17 00:00:00 2001 From: Kubernetes Prow Robot Date: Mon, 21 Sep 2020 08:22:31 -0700 Subject: [PATCH 3/4] Merge pull request #3203 from detiber/configSplit2 [cluster-autoscaler] Support using --cloud-config for clusterapi provider --- .../cloudprovider/clusterapi/README.md | 45 ++++++-- .../clusterapi/clusterapi_provider.go | 24 +++- .../config/autoscaling_options.go | 3 + cluster-autoscaler/main.go | 106 +++++++++--------- 4 files changed, 112 insertions(+), 66 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/README.md b/cluster-autoscaler/cloudprovider/clusterapi/README.md index 3f12c397fef4..ef0277f00228 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/README.md +++ b/cluster-autoscaler/cloudprovider/clusterapi/README.md @@ -10,14 +10,6 @@ cluster. The cluster-api provider requires Kubernetes v1.16 or greater to run the v1alpha3 version of the API. -## Cluster API Prerequisites - -Please be aware that currently the cluster autoscaler only supports CAPI -clusters that have joined their management and workload clusters into a single -cluster. For more information about this please see the -[Cluster API Concepts documentations](https://cluster-api.sigs.k8s.io/user/concepts.html) -and the [`clusterctl move` command documentation](https://cluster-api.sigs.k8s.io/user/concepts.html). - ## Starting the Autoscaler To enable the Cluster API provider, you must first specify it in the command @@ -62,6 +54,43 @@ in the staging namespace, belonging to the purple cluster, with the label owner= --node-group-auto-discovery=clusterapi:namespace=staging,clusterName=purple,owner=jim ``` +## Connecting cluster-autoscaler to Cluster API management and workload Clusters + +You will also need to provide the path to the kubeconfig(s) for the management +and workload cluster you wish cluster-autoscaler to run against. To specify the +kubeconfig path for the workload cluster to monitor, use the `--kubeconfig` +option and supply the path to the kubeconfig. If the `--kubeconfig` option is +not specified, cluster-autoscaler will attempt to use an in-cluster configuration. +To specify the kubeconfig path for the management cluster to monitor, use the +`--cloud-config` option and supply the path to the kubeconfig. If the +`--cloud-config` option is not specified it will fall back to using the kubeconfig +that was provided with the `--kubeconfig` option. + +Use in-cluster config for both management and workload cluster: +``` +cluster-autoscaler --cloud-provider=clusterapi +``` + +Use in-cluster config for workload cluster, specify kubeconfig for management cluster: +``` +cluster-autoscaler --cloud-provider=clusterapi --cloud-config=/mnt/kubeconfig +``` + +Use in-cluster config for management cluster, specify kubeconfig for workload cluster: +``` +cluster-autoscaler --cloud-provider=clusterapi --kubeconfig=/mnt/kubeconfig --clusterapi-cloud-config-authoritative +``` + +Use separate kubeconfigs for both management and workload cluster: +``` +cluster-autoscaler --cloud-provider=clusterapi --kubeconfig=/mnt/workload.kubeconfig --cloud-config=/mnt/management.kubeconfig +``` + +Use a single provided kubeconfig for both management and workload cluster: +``` +cluster-autoscaler --cloud-provider=clusterapi --kubeconfig=/mnt/workload.kubeconfig +``` + ## Enabling Autoscaling To enable the automatic scaling of components in your cluster-api managed diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go index fbac1fd89f32..35a046e64e47 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go @@ -140,30 +140,42 @@ func newProvider( // BuildClusterAPI builds CloudProvider implementation for machine api. func BuildClusterAPI(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { - externalConfig, err := clientcmd.BuildConfigFromFlags("", opts.KubeConfigPath) + managementKubeconfig := opts.CloudConfig + if managementKubeconfig == "" && !opts.ClusterAPICloudConfigAuthoritative { + managementKubeconfig = opts.KubeConfigPath + } + + managementConfig, err := clientcmd.BuildConfigFromFlags("", managementKubeconfig) + if err != nil { + klog.Fatalf("cannot build management cluster config: %v", err) + } + + workloadKubeconfig := opts.KubeConfigPath + + workloadConfig, err := clientcmd.BuildConfigFromFlags("", workloadKubeconfig) if err != nil { - klog.Fatalf("cannot build config: %v", err) + klog.Fatalf("cannot build workload cluster config: %v", err) } // Grab a dynamic interface that we can create informers from - managementClient, err := dynamic.NewForConfig(externalConfig) + managementClient, err := dynamic.NewForConfig(managementConfig) if err != nil { klog.Fatalf("could not generate dynamic client for config") } - workloadClient, err := kubernetes.NewForConfig(externalConfig) + workloadClient, err := kubernetes.NewForConfig(workloadConfig) if err != nil { klog.Fatalf("create kube clientset failed: %v", err) } - managementDiscoveryClient, err := discovery.NewDiscoveryClientForConfig(externalConfig) + managementDiscoveryClient, err := discovery.NewDiscoveryClientForConfig(managementConfig) if err != nil { klog.Fatalf("create discovery client failed: %v", err) } cachedDiscovery := memory.NewMemCacheClient(managementDiscoveryClient) managementScaleClient, err := scale.NewForConfig( - externalConfig, + managementConfig, restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscovery), dynamic.LegacyAPIPathResolverFunc, scale.NewDiscoveryScaleKindResolver(managementDiscoveryClient)) diff --git a/cluster-autoscaler/config/autoscaling_options.go b/cluster-autoscaler/config/autoscaling_options.go index b7c5c4c6e939..7e0e85c7dffc 100644 --- a/cluster-autoscaler/config/autoscaling_options.go +++ b/cluster-autoscaler/config/autoscaling_options.go @@ -142,4 +142,7 @@ type AutoscalingOptions struct { AWSUseStaticInstanceList bool // Path to kube configuration if available KubeConfigPath string + // ClusterAPICloudConfigAuthoritative tells the Cluster API provider to treat the CloudConfig option as authoritative and + // not use KubeConfigPath as a fallback when it is not provided. + ClusterAPICloudConfigAuthoritative bool } diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index f40501e22ab3..723b73b7463a 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -169,10 +169,11 @@ var ( regional = flag.Bool("regional", false, "Cluster is regional.") newPodScaleUpDelay = flag.Duration("new-pod-scale-up-delay", 0*time.Second, "Pods less than this old will not be considered for scale-up.") - ignoreTaintsFlag = multiStringFlag("ignore-taint", "Specifies a taint to ignore in node templates when considering to scale a node group") - balancingIgnoreLabelsFlag = multiStringFlag("balancing-ignore-label", "Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar") - awsUseStaticInstanceList = flag.Bool("aws-use-static-instance-list", false, "Should CA fetch instance types in runtime or use a static list. AWS only") - enableProfiling = flag.Bool("profiling", false, "Is debug/pprof endpoint enabled") + ignoreTaintsFlag = multiStringFlag("ignore-taint", "Specifies a taint to ignore in node templates when considering to scale a node group") + balancingIgnoreLabelsFlag = multiStringFlag("balancing-ignore-label", "Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar") + awsUseStaticInstanceList = flag.Bool("aws-use-static-instance-list", false, "Should CA fetch instance types in runtime or use a static list. AWS only") + enableProfiling = flag.Bool("profiling", false, "Is debug/pprof endpoint enabled") + clusterAPICloudConfigAuthoritative = flag.Bool("clusterapi-cloud-config-authoritative", false, "Treat the cloud-config flag authoritatively (do not fallback to using kubeconfig flag). ClusterAPI only") ) func createAutoscalingOptions() config.AutoscalingOptions { @@ -193,54 +194,55 @@ func createAutoscalingOptions() config.AutoscalingOptions { klog.Fatalf("Failed to parse flags: %v", err) } return config.AutoscalingOptions{ - CloudConfig: *cloudConfig, - CloudProviderName: *cloudProviderFlag, - NodeGroupAutoDiscovery: *nodeGroupAutoDiscoveryFlag, - MaxTotalUnreadyPercentage: *maxTotalUnreadyPercentage, - OkTotalUnreadyCount: *okTotalUnreadyCount, - ScaleUpFromZero: *scaleUpFromZero, - EstimatorName: *estimatorFlag, - ExpanderName: *expanderFlag, - IgnoreDaemonSetsUtilization: *ignoreDaemonSetsUtilization, - IgnoreMirrorPodsUtilization: *ignoreMirrorPodsUtilization, - MaxBulkSoftTaintCount: *maxBulkSoftTaintCount, - MaxBulkSoftTaintTime: *maxBulkSoftTaintTime, - MaxEmptyBulkDelete: *maxEmptyBulkDeleteFlag, - MaxGracefulTerminationSec: *maxGracefulTerminationFlag, - MaxNodeProvisionTime: *maxNodeProvisionTime, - MaxNodesTotal: *maxNodesTotal, - MaxCoresTotal: maxCoresTotal, - MinCoresTotal: minCoresTotal, - MaxMemoryTotal: maxMemoryTotal, - MinMemoryTotal: minMemoryTotal, - GpuTotal: parsedGpuTotal, - NodeGroups: *nodeGroupsFlag, - ScaleDownDelayAfterAdd: *scaleDownDelayAfterAdd, - ScaleDownDelayAfterDelete: *scaleDownDelayAfterDelete, - ScaleDownDelayAfterFailure: *scaleDownDelayAfterFailure, - ScaleDownEnabled: *scaleDownEnabled, - ScaleDownUnneededTime: *scaleDownUnneededTime, - ScaleDownUnreadyTime: *scaleDownUnreadyTime, - ScaleDownUtilizationThreshold: *scaleDownUtilizationThreshold, - ScaleDownGpuUtilizationThreshold: *scaleDownGpuUtilizationThreshold, - ScaleDownNonEmptyCandidatesCount: *scaleDownNonEmptyCandidatesCount, - ScaleDownCandidatesPoolRatio: *scaleDownCandidatesPoolRatio, - ScaleDownCandidatesPoolMinCount: *scaleDownCandidatesPoolMinCount, - WriteStatusConfigMap: *writeStatusConfigMapFlag, - BalanceSimilarNodeGroups: *balanceSimilarNodeGroupsFlag, - ConfigNamespace: *namespace, - ClusterName: *clusterName, - NodeAutoprovisioningEnabled: *nodeAutoprovisioningEnabled, - MaxAutoprovisionedNodeGroupCount: *maxAutoprovisionedNodeGroupCount, - UnremovableNodeRecheckTimeout: *unremovableNodeRecheckTimeout, - ExpendablePodsPriorityCutoff: *expendablePodsPriorityCutoff, - Regional: *regional, - NewPodScaleUpDelay: *newPodScaleUpDelay, - IgnoredTaints: *ignoreTaintsFlag, - BalancingExtraIgnoredLabels: *balancingIgnoreLabelsFlag, - KubeConfigPath: *kubeConfigFile, - NodeDeletionDelayTimeout: *nodeDeletionDelayTimeout, - AWSUseStaticInstanceList: *awsUseStaticInstanceList, + CloudConfig: *cloudConfig, + CloudProviderName: *cloudProviderFlag, + NodeGroupAutoDiscovery: *nodeGroupAutoDiscoveryFlag, + MaxTotalUnreadyPercentage: *maxTotalUnreadyPercentage, + OkTotalUnreadyCount: *okTotalUnreadyCount, + ScaleUpFromZero: *scaleUpFromZero, + EstimatorName: *estimatorFlag, + ExpanderName: *expanderFlag, + IgnoreDaemonSetsUtilization: *ignoreDaemonSetsUtilization, + IgnoreMirrorPodsUtilization: *ignoreMirrorPodsUtilization, + MaxBulkSoftTaintCount: *maxBulkSoftTaintCount, + MaxBulkSoftTaintTime: *maxBulkSoftTaintTime, + MaxEmptyBulkDelete: *maxEmptyBulkDeleteFlag, + MaxGracefulTerminationSec: *maxGracefulTerminationFlag, + MaxNodeProvisionTime: *maxNodeProvisionTime, + MaxNodesTotal: *maxNodesTotal, + MaxCoresTotal: maxCoresTotal, + MinCoresTotal: minCoresTotal, + MaxMemoryTotal: maxMemoryTotal, + MinMemoryTotal: minMemoryTotal, + GpuTotal: parsedGpuTotal, + NodeGroups: *nodeGroupsFlag, + ScaleDownDelayAfterAdd: *scaleDownDelayAfterAdd, + ScaleDownDelayAfterDelete: *scaleDownDelayAfterDelete, + ScaleDownDelayAfterFailure: *scaleDownDelayAfterFailure, + ScaleDownEnabled: *scaleDownEnabled, + ScaleDownUnneededTime: *scaleDownUnneededTime, + ScaleDownUnreadyTime: *scaleDownUnreadyTime, + ScaleDownUtilizationThreshold: *scaleDownUtilizationThreshold, + ScaleDownGpuUtilizationThreshold: *scaleDownGpuUtilizationThreshold, + ScaleDownNonEmptyCandidatesCount: *scaleDownNonEmptyCandidatesCount, + ScaleDownCandidatesPoolRatio: *scaleDownCandidatesPoolRatio, + ScaleDownCandidatesPoolMinCount: *scaleDownCandidatesPoolMinCount, + WriteStatusConfigMap: *writeStatusConfigMapFlag, + BalanceSimilarNodeGroups: *balanceSimilarNodeGroupsFlag, + ConfigNamespace: *namespace, + ClusterName: *clusterName, + NodeAutoprovisioningEnabled: *nodeAutoprovisioningEnabled, + MaxAutoprovisionedNodeGroupCount: *maxAutoprovisionedNodeGroupCount, + UnremovableNodeRecheckTimeout: *unremovableNodeRecheckTimeout, + ExpendablePodsPriorityCutoff: *expendablePodsPriorityCutoff, + Regional: *regional, + NewPodScaleUpDelay: *newPodScaleUpDelay, + IgnoredTaints: *ignoreTaintsFlag, + BalancingExtraIgnoredLabels: *balancingIgnoreLabelsFlag, + KubeConfigPath: *kubeConfigFile, + NodeDeletionDelayTimeout: *nodeDeletionDelayTimeout, + AWSUseStaticInstanceList: *awsUseStaticInstanceList, + ClusterAPICloudConfigAuthoritative: *clusterAPICloudConfigAuthoritative, } } From 520b1174636eb1bf40ee08fc75c12dd260eeec56 Mon Sep 17 00:00:00 2001 From: Kubernetes Prow Robot Date: Thu, 24 Sep 2020 21:37:11 -0700 Subject: [PATCH 4/4] Merge pull request #3161 from detiber/fixCAPIAnnotations Update group identifier to use for Cluster API annotations --- .../cloudprovider/clusterapi/README.md | 4 +- .../clusterapi/clusterapi_controller.go | 7 ++- .../clusterapi/clusterapi_controller_test.go | 47 ++++++++++++++----- .../clusterapi/clusterapi_nodegroup.go | 10 ++-- .../clusterapi/clusterapi_nodegroup_test.go | 3 ++ .../clusterapi/clusterapi_unstructured.go | 14 +++--- .../clusterapi/clusterapi_utils.go | 16 +++++-- .../clusterapi/clusterapi_utils_test.go | 18 +++++++ 8 files changed, 88 insertions(+), 31 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/README.md b/cluster-autoscaler/cloudprovider/clusterapi/README.md index ef0277f00228..50d686aa681b 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/README.md +++ b/cluster-autoscaler/cloudprovider/clusterapi/README.md @@ -101,12 +101,12 @@ resources depending on the type of cluster-api mechanism that you are using. There are two annotations that control how a cluster resource should be scaled: -* `cluster.k8s.io/cluster-api-autoscaler-node-group-min-size` - This specifies +* `cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size` - This specifies the minimum number of nodes for the associated resource group. The autoscaler will not scale the group below this number. Please note that currently the cluster-api provider will not scale down to zero nodes. -* `cluster.k8s.io/cluster-api-autoscaler-node-group-max-size` - This specifies +* `cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size` - This specifies the maximum number of nodes for the associated resource group. The autoscaler will not scale the group above this number. diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go index 459217a58a0e..fb56d7f80d56 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go @@ -258,7 +258,12 @@ func (c *machineController) findMachineByProviderID(providerID normalizedProvide if node == nil { return nil, nil } - return c.findMachine(node.Annotations[machineAnnotationKey]) + + machineID, ok := node.Annotations[machineAnnotationKey] + if !ok { + machineID = node.Annotations[deprecatedMachineAnnotationKey] + } + return c.findMachine(machineID) } func isFailedMachineProviderID(providerID normalizedProviderID) bool { diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go index bad77f565f0f..d6c414c63b22 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go @@ -486,24 +486,32 @@ func deleteTestConfigs(t *testing.T, controller *machineController, testConfigs func TestControllerFindMachine(t *testing.T) { type testCase struct { - description string - name string - namespace string - lookupSucceeds bool + description string + name string + namespace string + useDeprecatedAnnotation bool + lookupSucceeds bool } var testCases = []testCase{{ - description: "lookup fails", - lookupSucceeds: false, - name: "machine-does-not-exist", - namespace: "namespace-does-not-exist", + description: "lookup fails", + lookupSucceeds: false, + useDeprecatedAnnotation: false, + name: "machine-does-not-exist", + namespace: "namespace-does-not-exist", }, { - description: "lookup fails in valid namespace", - lookupSucceeds: false, - name: "machine-does-not-exist-in-existing-namespace", + description: "lookup fails in valid namespace", + lookupSucceeds: false, + useDeprecatedAnnotation: false, + name: "machine-does-not-exist-in-existing-namespace", }, { - description: "lookup succeeds", - lookupSucceeds: true, + description: "lookup succeeds", + lookupSucceeds: true, + useDeprecatedAnnotation: false, + }, { + description: "lookup succeeds with deprecated annotation", + lookupSucceeds: true, + useDeprecatedAnnotation: true, }} test := func(t *testing.T, tc testCase, testConfig *testConfig) { @@ -541,6 +549,19 @@ func TestControllerFindMachine(t *testing.T) { if tc.namespace == "" { tc.namespace = testConfig.machines[0].GetNamespace() } + if tc.useDeprecatedAnnotation { + for i := range testConfig.machines { + n := testConfig.nodes[i] + annotations := n.GetAnnotations() + val, ok := annotations[machineAnnotationKey] + if !ok { + t.Fatal("node did not contain machineAnnotationKey") + } + delete(annotations, machineAnnotationKey) + annotations[deprecatedMachineAnnotationKey] = val + n.SetAnnotations(annotations) + } + } test(t, tc, testConfig) }) } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go index c0ba8e77d605..8d04f62e33cc 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go @@ -27,9 +27,13 @@ import ( ) const ( - machineDeleteAnnotationKey = "cluster.k8s.io/delete-machine" - machineAnnotationKey = "cluster.k8s.io/machine" - debugFormat = "%s (min: %d, max: %d, replicas: %d)" + // deprecatedMachineDeleteAnnotationKey should not be removed until minimum cluster-api support is v1alpha3 + deprecatedMachineDeleteAnnotationKey = "cluster.k8s.io/delete-machine" + // TODO: determine what currently relies on deprecatedMachineAnnotationKey to determine when it can be removed + deprecatedMachineAnnotationKey = "cluster.k8s.io/machine" + machineDeleteAnnotationKey = "cluster.x-k8s.io/delete-machine" + machineAnnotationKey = "cluster.x-k8s.io/machine" + debugFormat = "%s (min: %d, max: %d, replicas: %d)" ) type nodegroup struct { diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go index 11bf4d48096d..9c7e22453af1 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go @@ -649,6 +649,9 @@ func TestNodeGroupDeleteNodes(t *testing.T) { if _, found := machine.GetAnnotations()[machineDeleteAnnotationKey]; !found { t.Errorf("expected annotation %q on machine %s", machineDeleteAnnotationKey, machine.GetName()) } + if _, found := machine.GetAnnotations()[deprecatedMachineDeleteAnnotationKey]; !found { + t.Errorf("expected annotation %q on machine %s", deprecatedMachineDeleteAnnotationKey, machine.GetName()) + } } gvr, err := ng.scalableResource.GroupVersionResource() diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go index d5bf3e387339..ac6991bebbe9 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go @@ -129,15 +129,12 @@ func (r unstructuredScalableResource) UnmarkMachineForDeletion(machine *unstruct } annotations := u.GetAnnotations() - if _, ok := annotations[machineDeleteAnnotationKey]; ok { - delete(annotations, machineDeleteAnnotationKey) - u.SetAnnotations(annotations) - _, updateErr := r.controller.managementClient.Resource(r.controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) - - return updateErr - } + delete(annotations, machineDeleteAnnotationKey) + delete(annotations, deprecatedMachineDeleteAnnotationKey) + u.SetAnnotations(annotations) + _, updateErr := r.controller.managementClient.Resource(r.controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) - return nil + return updateErr } func (r unstructuredScalableResource) MarkMachineForDeletion(machine *unstructured.Unstructured) error { @@ -154,6 +151,7 @@ func (r unstructuredScalableResource) MarkMachineForDeletion(machine *unstructur } annotations[machineDeleteAnnotationKey] = time.Now().String() + annotations[deprecatedMachineDeleteAnnotationKey] = time.Now().String() u.SetAnnotations(annotations) _, updateErr := r.controller.managementClient.Resource(r.controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go index 104bffecbe69..f564efc0c8f8 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go @@ -26,10 +26,12 @@ import ( ) const ( - nodeGroupMinSizeAnnotationKey = "cluster.k8s.io/cluster-api-autoscaler-node-group-min-size" - nodeGroupMaxSizeAnnotationKey = "cluster.k8s.io/cluster-api-autoscaler-node-group-max-size" - clusterNameLabel = "cluster.x-k8s.io/cluster-name" - deprecatedClusterNameLabel = "cluster.k8s.io/cluster-name" + deprecatedNodeGroupMinSizeAnnotationKey = "cluster.k8s.io/cluster-api-autoscaler-node-group-min-size" + deprecatedNodeGroupMaxSizeAnnotationKey = "cluster.k8s.io/cluster-api-autoscaler-node-group-max-size" + nodeGroupMinSizeAnnotationKey = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size" + nodeGroupMaxSizeAnnotationKey = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size" + clusterNameLabel = "cluster.x-k8s.io/cluster-name" + deprecatedClusterNameLabel = "cluster.k8s.io/cluster-name" ) var ( @@ -60,6 +62,9 @@ type normalizedProviderID string // value is not of type int. func minSize(annotations map[string]string) (int, error) { val, found := annotations[nodeGroupMinSizeAnnotationKey] + if !found { + val, found = annotations[deprecatedNodeGroupMinSizeAnnotationKey] + } if !found { return 0, errMissingMinAnnotation } @@ -76,6 +81,9 @@ func minSize(annotations map[string]string) (int, error) { // value is not of type int. func maxSize(annotations map[string]string) (int, error) { val, found := annotations[nodeGroupMaxSizeAnnotationKey] + if !found { + val, found = annotations[deprecatedNodeGroupMaxSizeAnnotationKey] + } if !found { return 0, errMissingMaxAnnotation } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go index 9a9cada7734c..65ee11fe9ccb 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go @@ -112,6 +112,24 @@ func TestUtilParseScalingBounds(t *testing.T) { }, min: 0, max: 1, + }, { + description: "deprecated min/max annotations still work, result is min 0, max 1", + annotations: map[string]string{ + deprecatedNodeGroupMinSizeAnnotationKey: "0", + deprecatedNodeGroupMaxSizeAnnotationKey: "1", + }, + min: 0, + max: 1, + }, { + description: "deprecated min/max annotations do not take precedence over non-deprecated annotations, result is min 1, max 2", + annotations: map[string]string{ + deprecatedNodeGroupMinSizeAnnotationKey: "0", + deprecatedNodeGroupMaxSizeAnnotationKey: "1", + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "2", + }, + min: 1, + max: 2, }} { t.Run(tc.description, func(t *testing.T) { machineSet := unstructured.Unstructured{